From c185f9ce6521685d676b696ec699694b84bd8da5 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Sun, 5 Jan 2025 13:44:35 +0000 Subject: [PATCH 01/27] forester batch ops refactored --- forester-utils/src/instructions.rs | 6 +----- forester/src/batch_processor/common.rs | 6 +++--- forester/tests/batched_address_test.rs | 4 ++-- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/forester-utils/src/instructions.rs b/forester-utils/src/instructions.rs index c96d49f49..60cdedb05 100644 --- a/forester-utils/src/instructions.rs +++ b/forester-utils/src/instructions.rs @@ -22,7 +22,7 @@ use light_prover_client::{ }; use light_utils::bigint::bigint_to_be_bytes_array; use light_verifier::CompressedProof; -use log::{error, info}; +use log::error; use reqwest::Client; use solana_sdk::pubkey::Pubkey; use thiserror::Error; @@ -268,8 +268,6 @@ pub async fn create_append_batch_ix_data>( .await .unwrap(); - info!("Leaves: {:?}", leaves); - let (old_leaves, merkle_proofs) = { let mut old_leaves = vec![]; let mut merkle_proofs = vec![]; @@ -284,8 +282,6 @@ pub async fn create_append_batch_ix_data>( (old_leaves, merkle_proofs) }; - info!("Old leaves: {:?}", old_leaves); - let (proof, new_root) = { let circuit_inputs = get_batch_append_with_proofs_inputs::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>( diff --git a/forester/src/batch_processor/common.rs b/forester/src/batch_processor/common.rs index dda238d9b..89e5d2d95 100644 --- a/forester/src/batch_processor/common.rs +++ b/forester/src/batch_processor/common.rs @@ -107,7 +107,7 @@ impl> BatchProcessor { }; Self::calculate_completion_from_tree(account.data.as_mut_slice()) - } + } async fn get_output_queue_completion(&self, rpc: &mut R) -> f64 { let mut account = match rpc.get_account(self.context.output_queue).await { @@ -116,7 +116,7 @@ impl> BatchProcessor { }; Self::calculate_completion_from_queue(account.data.as_mut_slice()) - } + } fn calculate_completion_from_tree(data: &mut [u8]) -> f64 { let tree = match BatchedMerkleTreeAccount::state_tree_from_bytes_mut(data) { @@ -166,7 +166,7 @@ impl> BatchProcessor { let (_, zkp_batch_size) = self.get_num_inserted_zkps(&mut rpc).await?; state::perform_nullify(&self.context, &mut rpc).await?; Ok(zkp_batch_size) - } + } async fn get_num_inserted_zkps(&self, rpc: &mut R) -> Result<(u64, usize)> { let (num_inserted_zkps, zkp_batch_size) = { diff --git a/forester/tests/batched_address_test.rs b/forester/tests/batched_address_test.rs index ba4ff36fe..ab6b3317c 100644 --- a/forester/tests/batched_address_test.rs +++ b/forester/tests/batched_address_test.rs @@ -143,7 +143,7 @@ async fn test_address_batched() { println!("Creating new address batch tree..."); let merkle_tree_keypair = Keypair::new(); - env.indexer + env.indexer .add_address_merkle_tree( &mut env.rpc, &merkle_tree_keypair, @@ -151,7 +151,7 @@ async fn test_address_batched() { None, 2, ) - .await; + .await; env_accounts.batch_address_merkle_tree = merkle_tree_keypair.pubkey(); let address_trees: Vec = env From c9547c15b43e2283e774fa145f807ad0848132f6 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Mon, 6 Jan 2025 18:01:30 +0000 Subject: [PATCH 02/27] refactor batch processing workflows --- forester/src/batch_processor/common.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/forester/src/batch_processor/common.rs b/forester/src/batch_processor/common.rs index 89e5d2d95..b2304ebb1 100644 --- a/forester/src/batch_processor/common.rs +++ b/forester/src/batch_processor/common.rs @@ -53,7 +53,7 @@ impl> BatchProcessor { BatchReadyState::ReadyForNullify => self.process_state_nullify().await, BatchReadyState::NotReady => Ok(0), } - } + } async fn verify_batch_ready(&self) -> BatchReadyState { let mut rpc = match self.context.rpc_pool.get_connection().await { @@ -152,14 +152,14 @@ impl> BatchProcessor { let remaining = total - batch.get_num_inserted_zkps(); remaining as f64 / total as f64 - } + } async fn process_state_append(&self) -> Result { let mut rpc = self.context.rpc_pool.get_connection().await?; let (num_inserted_zkps, zkp_batch_size) = self.get_num_inserted_zkps(&mut rpc).await?; state::perform_append(&self.context, &mut rpc, num_inserted_zkps).await?; Ok(zkp_batch_size) - } + } async fn process_state_nullify(&self) -> Result { let mut rpc = self.context.rpc_pool.get_connection().await?; From cd6219f55fb367ec26a578a3e61183383c91ea45 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Tue, 7 Jan 2025 07:49:33 +0000 Subject: [PATCH 03/27] switched the verification order of input and output queues to prioritize output batch readiness --- forester/src/batch_processor/common.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/forester/src/batch_processor/common.rs b/forester/src/batch_processor/common.rs index b2304ebb1..94de5f9f0 100644 --- a/forester/src/batch_processor/common.rs +++ b/forester/src/batch_processor/common.rs @@ -53,7 +53,7 @@ impl> BatchProcessor { BatchReadyState::ReadyForNullify => self.process_state_nullify().await, BatchReadyState::NotReady => Ok(0), } - } + } async fn verify_batch_ready(&self) -> BatchReadyState { let mut rpc = match self.context.rpc_pool.get_connection().await { @@ -159,14 +159,14 @@ impl> BatchProcessor { let (num_inserted_zkps, zkp_batch_size) = self.get_num_inserted_zkps(&mut rpc).await?; state::perform_append(&self.context, &mut rpc, num_inserted_zkps).await?; Ok(zkp_batch_size) - } + } async fn process_state_nullify(&self) -> Result { let mut rpc = self.context.rpc_pool.get_connection().await?; let (_, zkp_batch_size) = self.get_num_inserted_zkps(&mut rpc).await?; state::perform_nullify(&self.context, &mut rpc).await?; Ok(zkp_batch_size) - } + } async fn get_num_inserted_zkps(&self, rpc: &mut R) -> Result<(u64, usize)> { let (num_inserted_zkps, zkp_batch_size) = { From 4c4b292d551a86f6a6df7966d1cd520eda267ff7 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Tue, 7 Jan 2025 12:06:44 +0000 Subject: [PATCH 04/27] prioritization of the queue processing --- forester-utils/src/instructions.rs | 7 ++++++- forester/tests/batched_state_test.rs | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/forester-utils/src/instructions.rs b/forester-utils/src/instructions.rs index 60cdedb05..fe013c7fc 100644 --- a/forester-utils/src/instructions.rs +++ b/forester-utils/src/instructions.rs @@ -22,7 +22,7 @@ use light_prover_client::{ }; use light_utils::bigint::bigint_to_be_bytes_array; use light_verifier::CompressedProof; -use log::error; +use log::{error, info}; use reqwest::Client; use solana_sdk::pubkey::Pubkey; use thiserror::Error; @@ -268,6 +268,8 @@ pub async fn create_append_batch_ix_data>( .await .unwrap(); + info!("Leaves: {:?}", leaves); + let (old_leaves, merkle_proofs) = { let mut old_leaves = vec![]; let mut merkle_proofs = vec![]; @@ -282,6 +284,9 @@ pub async fn create_append_batch_ix_data>( (old_leaves, merkle_proofs) }; + info!("Old leaves: {:?}", old_leaves); + + let (proof, new_root) = { let circuit_inputs = get_batch_append_with_proofs_inputs::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>( diff --git a/forester/tests/batched_state_test.rs b/forester/tests/batched_state_test.rs index d19aa0898..7bbc83021 100644 --- a/forester/tests/batched_state_test.rs +++ b/forester/tests/batched_state_test.rs @@ -361,4 +361,4 @@ async fn test_state_batched() { .send(()) .expect("Failed to send shutdown signal"); service_handle.await.unwrap().unwrap(); -} +} \ No newline at end of file From 388ea6366514a64e3953a60fcc74fefdf5d87b3e Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Tue, 7 Jan 2025 19:01:35 +0000 Subject: [PATCH 05/27] refactor imports --- forester-utils/src/instructions.rs | 1 - forester/tests/batched_state_test.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/forester-utils/src/instructions.rs b/forester-utils/src/instructions.rs index fe013c7fc..c96d49f49 100644 --- a/forester-utils/src/instructions.rs +++ b/forester-utils/src/instructions.rs @@ -286,7 +286,6 @@ pub async fn create_append_batch_ix_data>( info!("Old leaves: {:?}", old_leaves); - let (proof, new_root) = { let circuit_inputs = get_batch_append_with_proofs_inputs::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>( diff --git a/forester/tests/batched_state_test.rs b/forester/tests/batched_state_test.rs index 7bbc83021..d19aa0898 100644 --- a/forester/tests/batched_state_test.rs +++ b/forester/tests/batched_state_test.rs @@ -361,4 +361,4 @@ async fn test_state_batched() { .send(()) .expect("Failed to send shutdown signal"); service_handle.await.unwrap().unwrap(); -} \ No newline at end of file +} From 386421ca2450397b05c49cf195ee17f9da255a2c Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Wed, 8 Jan 2025 23:58:58 +0000 Subject: [PATCH 06/27] add conversion utilities and refactor indexer code --- Cargo.lock | 3 + .../src/address_merkle_tree_config.rs | 3 +- forester-utils/src/indexer/mod.rs | 685 ++-- forester-utils/src/instructions.rs | 12 +- forester-utils/src/registry.rs | 3 +- forester/Cargo.toml | 3 +- forester/src/batch_processor/address.rs | 13 +- forester/src/batch_processor/common.rs | 9 +- forester/src/batch_processor/mod.rs | 6 +- forester/src/batch_processor/state.rs | 39 +- forester/src/epoch_manager.rs | 10 +- forester/src/indexer_type.rs | 293 ++ forester/src/lib.rs | 8 +- forester/src/photon_indexer.rs | 39 +- forester/src/rollover/mod.rs | 4 +- forester/src/rollover/operations.rs | 91 +- forester/src/send_transaction.rs | 3 +- program-tests/utils/Cargo.toml | 1 + program-tests/utils/src/conversions.rs | 239 ++ program-tests/utils/src/e2e_test_env.rs | 31 +- program-tests/utils/src/indexer/mod.rs | 4 +- .../utils/src/indexer/test_indexer.rs | 3558 ++++++++--------- program-tests/utils/src/lib.rs | 3 +- program-tests/utils/src/spl.rs | 46 +- program-tests/utils/src/system_program.rs | 8 +- program-tests/utils/src/test_forester.rs | 2 +- sdk-libs/client/src/indexer/mod.rs | 225 +- .../client/src/photon_rpc/photon_client.rs | 4 +- sdk-libs/program-test/Cargo.toml | 1 + .../program-test/src/indexer/extensions.rs | 108 + sdk-libs/program-test/src/indexer/mod.rs | 6 + .../program-test/src/indexer/test_indexer.rs | 1887 +++++++++ sdk-libs/program-test/src/indexer/utils.rs | 416 ++ sdk-libs/program-test/src/lib.rs | 1 + .../program-test/src/test_batch_forester.rs | 3 +- sdk-libs/program-test/src/test_indexer.rs | 1328 +++--- sdk-libs/sdk/src/proof.rs | 8 + 37 files changed, 6117 insertions(+), 2986 deletions(-) create mode 100644 forester/src/indexer_type.rs create mode 100644 program-tests/utils/src/conversions.rs create mode 100644 sdk-libs/program-test/src/indexer/extensions.rs create mode 100644 sdk-libs/program-test/src/indexer/mod.rs create mode 100644 sdk-libs/program-test/src/indexer/test_indexer.rs create mode 100644 sdk-libs/program-test/src/indexer/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 36a461b04..483f6da9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1964,6 +1964,7 @@ dependencies = [ "light-program-test", "light-prover-client", "light-registry", + "light-sdk", "light-system-program", "light-test-utils", "photon-api", @@ -3080,6 +3081,7 @@ dependencies = [ "light-compressed-token", "light-hasher", "light-indexed-merkle-tree", + "light-merkle-tree-metadata", "light-merkle-tree-reference", "light-prover-client", "light-registry", @@ -3237,6 +3239,7 @@ dependencies = [ "light-program-test", "light-prover-client", "light-registry", + "light-sdk", "light-system-program", "light-utils 1.1.0", "light-verifier", diff --git a/forester-utils/src/address_merkle_tree_config.rs b/forester-utils/src/address_merkle_tree_config.rs index 3c37c5a51..ac190d0af 100644 --- a/forester-utils/src/address_merkle_tree_config.rs +++ b/forester-utils/src/address_merkle_tree_config.rs @@ -8,10 +8,9 @@ use light_client::rpc::RpcConnection; use light_hasher::{Discriminator as LightDiscriminator, Poseidon}; use num_traits::Zero; use solana_sdk::pubkey::Pubkey; - +use light_client::indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}; use crate::{ get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree, - indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}, AccountZeroCopy, }; diff --git a/forester-utils/src/indexer/mod.rs b/forester-utils/src/indexer/mod.rs index 854633fbf..e2b309a84 100644 --- a/forester-utils/src/indexer/mod.rs +++ b/forester-utils/src/indexer/mod.rs @@ -1,328 +1,357 @@ -use std::fmt::Debug; - -use account_compression::initialize_address_merkle_tree::{ - Error as AccountCompressionError, Pubkey, -}; -use async_trait::async_trait; -use light_client::rpc::RpcConnection; -use light_compressed_token::TokenData; -use light_hash_set::HashSetError; -use light_hasher::Poseidon; -use light_indexed_merkle_tree::{ - array::{IndexedArray, IndexedElement}, - reference::IndexedMerkleTree, -}; -use light_merkle_tree_reference::MerkleTree; -use light_system_program::{ - invoke::processor::CompressedProof, - sdk::{compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent}, -}; -use num_bigint::BigUint; -use photon_api::apis::{default_api::GetCompressedAccountProofPostError, Error as PhotonApiError}; -use solana_sdk::signature::Keypair; -use thiserror::Error; - -#[derive(Debug, Clone)] -pub struct TokenDataWithContext { - pub token_data: TokenData, - pub compressed_account: CompressedAccountWithMerkleContext, -} - -#[derive(Debug, Default)] -pub struct BatchedTreeProofRpcResult { - pub proof: Option, - // If none -> proof by index, else included in zkp - pub root_indices: Vec>, - pub address_root_indices: Vec, -} - -#[derive(Debug, Default)] -pub struct ProofRpcResult { - pub proof: CompressedProof, - pub root_indices: Vec>, - pub address_root_indices: Vec, -} - -#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] -pub struct StateMerkleTreeAccounts { - pub merkle_tree: Pubkey, - pub nullifier_queue: Pubkey, - pub cpi_context: Pubkey, -} - -#[derive(Debug, Clone, Copy)] -pub struct AddressMerkleTreeAccounts { - pub merkle_tree: Pubkey, - pub queue: Pubkey, -} - -#[derive(Debug, Clone)] -pub struct StateMerkleTreeBundle { - pub rollover_fee: i64, - pub merkle_tree: Box>, - pub accounts: StateMerkleTreeAccounts, - pub version: u64, - pub output_queue_elements: Vec<[u8; 32]>, - /// leaf index, leaf, tx hash - pub input_leaf_indices: Vec<(u32, [u8; 32], [u8; 32])>, -} - -#[derive(Debug, Clone)] -pub struct AddressMerkleTreeBundle { - pub rollover_fee: i64, - pub merkle_tree: Box>, - pub indexed_array: Box>, - pub accounts: AddressMerkleTreeAccounts, - pub queue_elements: Vec<[u8; 32]>, -} - -pub struct ProofOfLeaf { - pub leaf: [u8; 32], - pub proof: Vec<[u8; 32]>, -} - -#[async_trait] -pub trait Indexer: Sync + Send + Debug + 'static { - /// Returns queue elements from the queue with the given pubkey. For input - /// queues account compression program does not store queue elements in the - /// account data but only emits these in the public transaction event. The - /// indexer needs the queue elements to create batch update proofs. - async fn get_queue_elements( - &self, - pubkey: [u8; 32], - batch: u64, - start_offset: u64, - end_offset: u64, - ) -> Result, IndexerError>; - - fn get_proof_by_index(&mut self, _merkle_tree_pubkey: Pubkey, _index: u64) -> ProofOfLeaf { - unimplemented!("get_proof_by_index not implemented") - } - - fn get_proofs_by_indices( - &mut self, - _merkle_tree_pubkey: Pubkey, - _indices: &[u64], - ) -> Vec { - unimplemented!("get_proof_by_index not implemented") - } - - fn get_leaf_indices_tx_hashes( - &mut self, - _merkle_tree_pubkey: Pubkey, - _zkp_batch_size: usize, - ) -> Vec<(u32, [u8; 32], [u8; 32])> { - unimplemented!(); - } - - async fn get_subtrees( - &self, - merkle_tree_pubkey: [u8; 32], - ) -> Result, IndexerError>; - - async fn get_multiple_compressed_account_proofs( - &self, - hashes: Vec, - ) -> Result, IndexerError>; - - async fn get_rpc_compressed_accounts_by_owner( - &self, - owner: &Pubkey, - ) -> Result, IndexerError>; - - async fn get_multiple_new_address_proofs( - &self, - merkle_tree_pubkey: [u8; 32], - addresses: Vec<[u8; 32]>, - ) -> Result>, IndexerError>; - - async fn get_multiple_new_address_proofs_full( - &self, - merkle_tree_pubkey: [u8; 32], - addresses: Vec<[u8; 32]>, - ) -> Result>, IndexerError>; - - fn account_nullified(&mut self, _merkle_tree_pubkey: Pubkey, _account_hash: &str) {} - - fn address_tree_updated( - &mut self, - _merkle_tree_pubkey: Pubkey, - _context: &NewAddressProofWithContext<16>, - ) { - } - - fn get_state_merkle_tree_accounts(&self, _pubkeys: &[Pubkey]) -> Vec { - unimplemented!() - } - - fn add_event_and_compressed_accounts( - &mut self, - _slot: u64, - _event: &PublicTransactionEvent, - ) -> ( - Vec, - Vec, - ) { - unimplemented!() - } - - fn get_state_merkle_trees(&self) -> &Vec { - unimplemented!() - } - - fn get_state_merkle_trees_mut(&mut self) -> &mut Vec { - unimplemented!() - } - - fn get_address_merkle_trees(&self) -> &Vec { - unimplemented!() - } - - fn get_address_merkle_trees_mut(&mut self) -> &mut Vec { - unimplemented!() - } - - fn get_token_compressed_accounts(&self) -> &Vec { - unimplemented!() - } - - fn get_payer(&self) -> &Keypair { - unimplemented!() - } - - fn get_group_pda(&self) -> &Pubkey { - unimplemented!() - } - - async fn create_proof_for_compressed_accounts( - &mut self, - _compressed_accounts: Option>, - _state_merkle_tree_pubkeys: Option>, - _new_addresses: Option<&[[u8; 32]]>, - _address_merkle_tree_pubkeys: Option>, - _rpc: &mut R, - ) -> ProofRpcResult { - unimplemented!() - } - - async fn create_proof_for_compressed_accounts2( - &mut self, - _compressed_accounts: Option>, - _state_merkle_tree_pubkeys: Option>, - _new_addresses: Option<&[[u8; 32]]>, - _address_merkle_tree_pubkeys: Option>, - _rpc: &mut R, - ) -> BatchedTreeProofRpcResult { - unimplemented!() - } - - fn add_address_merkle_tree_accounts( - &mut self, - _merkle_tree_keypair: &Keypair, - _queue_keypair: &Keypair, - _owning_program_id: Option, - ) -> AddressMerkleTreeAccounts { - unimplemented!() - } - - fn get_compressed_accounts_by_owner( - &self, - _owner: &Pubkey, - ) -> Vec { - unimplemented!() - } - - fn get_compressed_token_accounts_by_owner(&self, _owner: &Pubkey) -> Vec { - unimplemented!() - } - - fn add_state_bundle(&mut self, _state_bundle: StateMerkleTreeBundle) { - unimplemented!() - } - - async fn update_test_indexer_after_append( - &mut self, - _rpc: &mut R, - _merkle_tree_pubkey: Pubkey, - _output_queue_pubkey: Pubkey, - _num_inserted_zkps: u64, - ) { - unimplemented!() - } - - async fn update_test_indexer_after_nullification( - &mut self, - _rpc: &mut R, - _merkle_tree_pubkey: Pubkey, - _batch_index: usize, - ) { - unimplemented!() - } - - async fn finalize_batched_address_tree_update( - &mut self, - _rpc: &mut R, - _merkle_tree_pubkey: Pubkey, - ) { - unimplemented!() - } -} - -#[derive(Debug, Clone)] -pub struct MerkleProof { - pub hash: String, - pub leaf_index: u64, - pub merkle_tree: String, - pub proof: Vec<[u8; 32]>, - pub root_seq: u64, -} - -// For consistency with the Photon API. -#[derive(Clone, Debug, PartialEq)] -pub struct NewAddressProofWithContext { - pub merkle_tree: [u8; 32], - pub root: [u8; 32], - pub root_seq: u64, - pub low_address_index: u64, - pub low_address_value: [u8; 32], - pub low_address_next_index: u64, - pub low_address_next_value: [u8; 32], - pub low_address_proof: [[u8; 32]; NET_HEIGHT], - pub new_low_element: Option>, - pub new_element: Option>, - pub new_element_next_value: Option, -} - -#[derive(Error, Debug)] -pub enum IndexerError { - #[error("RPC Error: {0}")] - RpcError(#[from] solana_client::client_error::ClientError), - #[error("failed to deserialize account data")] - DeserializeError(#[from] solana_sdk::program_error::ProgramError), - #[error("failed to copy merkle tree")] - CopyMerkleTreeError(#[from] std::io::Error), - #[error(transparent)] - AccountCompressionError(#[from] AccountCompressionError), - #[error(transparent)] - HashSetError(#[from] HashSetError), - #[error(transparent)] - PhotonApiError(PhotonApiErrorWrapper), - #[error("error: {0:?}")] - Custom(String), - #[error("unknown error")] - Unknown, -} - -#[derive(Error, Debug)] -pub enum PhotonApiErrorWrapper { - #[error(transparent)] - GetCompressedAccountProofPostError(#[from] PhotonApiError), -} - -impl From> for IndexerError { - fn from(err: PhotonApiError) -> Self { - IndexerError::PhotonApiError(PhotonApiErrorWrapper::GetCompressedAccountProofPostError( - err, - )) - } -} +// use std::fmt::Debug; +// +// use account_compression::initialize_address_merkle_tree::{ +// Error as AccountCompressionError, Pubkey, +// }; +// use async_trait::async_trait; +// use light_client::rpc::RpcConnection; +// use light_compressed_token::TokenData; +// use light_hash_set::HashSetError; +// use light_hasher::Poseidon; +// use light_indexed_merkle_tree::{ +// array::{IndexedArray, IndexedElement}, +// reference::IndexedMerkleTree, +// }; +// use light_merkle_tree_reference::MerkleTree; +// use light_system_program::{ +// invoke::processor::CompressedProof, +// sdk::{compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent}, +// }; +// use num_bigint::BigUint; +// use photon_api::apis::{default_api::GetCompressedAccountProofPostError, Error as PhotonApiError}; +// use solana_sdk::signature::Keypair; +// use thiserror::Error; +// +// #[derive(Debug, Clone)] +// pub struct TokenDataWithContext { +// pub token_data: TokenData, +// pub compressed_account: CompressedAccountWithMerkleContext, +// } +// +// #[derive(Debug, Default)] +// pub struct BatchedTreeProofRpcResult { +// pub proof: Option, +// // If none -> proof by index, else included in zkp +// pub root_indices: Vec>, +// pub address_root_indices: Vec, +// } +// +// #[derive(Debug, Default)] +// pub struct ProofRpcResult { +// pub proof: CompressedProof, +// pub root_indices: Vec>, +// pub address_root_indices: Vec, +// } +// +// #[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] +// pub struct StateMerkleTreeAccounts { +// pub merkle_tree: Pubkey, +// pub nullifier_queue: Pubkey, +// pub cpi_context: Pubkey, +// } +// +// #[derive(Debug, Clone, Copy)] +// pub struct AddressMerkleTreeAccounts { +// pub merkle_tree: Pubkey, +// pub queue: Pubkey, +// } +// +// #[derive(Debug, Clone)] +// pub struct StateMerkleTreeBundle { +// pub rollover_fee: i64, +// pub merkle_tree: Box>, +// pub accounts: StateMerkleTreeAccounts, +// pub version: u64, +// pub output_queue_elements: Vec<[u8; 32]>, +// /// leaf index, leaf, tx hash +// pub input_leaf_indices: Vec<(u32, [u8; 32], [u8; 32])>, +// } +// +// #[derive(Debug, Clone)] +// pub struct AddressMerkleTreeBundle { +// pub rollover_fee: i64, +// pub merkle_tree: Box>, +// pub indexed_array: Box>, +// pub accounts: AddressMerkleTreeAccounts, +// pub queue_elements: Vec<[u8; 32]>, +// } +// +// pub struct ProofOfLeaf { +// pub leaf: [u8; 32], +// pub proof: Vec<[u8; 32]>, +// } +// +// #[async_trait] +// pub trait Indexer: Sync + Send + Debug + 'static { +// /// Returns queue elements from the queue with the given pubkey. For input +// /// queues account compression program does not store queue elements in the +// /// account data but only emits these in the public transaction event. The +// /// indexer needs the queue elements to create batch update proofs. +// +// // i +// async fn get_queue_elements( +// &self, +// pubkey: [u8; 32], +// batch: u64, +// start_offset: u64, +// end_offset: u64, +// ) -> Result, IndexerError>; +// +// // e +// fn get_proof_by_index(&mut self, _merkle_tree_pubkey: Pubkey, _index: u64) -> ProofOfLeaf { +// unimplemented!("get_proof_by_index not implemented") +// } +// +// // e +// fn get_proofs_by_indices( +// &mut self, +// _merkle_tree_pubkey: Pubkey, +// _indices: &[u64], +// ) -> Vec { +// unimplemented!("get_proof_by_index not implemented") +// } +// +// // e +// fn get_leaf_indices_tx_hashes( +// &mut self, +// _merkle_tree_pubkey: Pubkey, +// _zkp_batch_size: usize, +// ) -> Vec<(u32, [u8; 32], [u8; 32])> { +// unimplemented!(); +// } +// +// // i +// async fn get_subtrees( +// &self, +// merkle_tree_pubkey: [u8; 32], +// ) -> Result, IndexerError>; +// +// // i +// async fn get_multiple_compressed_account_proofs( +// &self, +// hashes: Vec, +// ) -> Result, IndexerError>; +// +// async fn get_rpc_compressed_accounts_by_owner( +// &self, +// owner: &Pubkey, +// ) -> Result, IndexerError>; +// +// // i +// async fn get_multiple_new_address_proofs( +// &self, +// merkle_tree_pubkey: [u8; 32], +// addresses: Vec<[u8; 32]>, +// ) -> Result>, IndexerError>; +// +// // i +// async fn get_multiple_new_address_proofs_full( +// &self, +// merkle_tree_pubkey: [u8; 32], +// addresses: Vec<[u8; 32]>, +// ) -> Result>, IndexerError>; +// +// // e +// fn account_nullified(&mut self, _merkle_tree_pubkey: Pubkey, _account_hash: &str) {} +// +// // e +// fn address_tree_updated( +// &mut self, +// _merkle_tree_pubkey: Pubkey, +// _context: &NewAddressProofWithContext<16>, +// ) { +// } +// +// // e +// fn get_state_merkle_tree_accounts(&self, _pubkeys: &[Pubkey]) -> Vec { +// unimplemented!() +// } +// +// // e +// fn add_event_and_compressed_accounts( +// &mut self, +// _slot: u64, +// _event: &PublicTransactionEvent, +// ) -> ( +// Vec, +// Vec, +// ) { +// unimplemented!() +// } +// +// // e +// fn get_state_merkle_trees(&self) -> &Vec { +// unimplemented!() +// } +// +// // e +// fn get_state_merkle_trees_mut(&mut self) -> &mut Vec { +// unimplemented!() +// } +// +// // e +// fn get_address_merkle_trees(&self) -> &Vec { +// unimplemented!() +// } +// +// // e +// fn get_address_merkle_trees_mut(&mut self) -> &mut Vec { +// unimplemented!() +// } +// +// // e +// fn get_token_compressed_accounts(&self) -> &Vec { +// unimplemented!() +// } +// +// // e +// fn get_payer(&self) -> &Keypair { +// unimplemented!() +// } +// +// // e +// fn get_group_pda(&self) -> &Pubkey { +// unimplemented!() +// } +// +// // i + e +// async fn create_proof_for_compressed_accounts( +// &mut self, +// _compressed_accounts: Option>, +// _state_merkle_tree_pubkeys: Option>, +// _new_addresses: Option<&[[u8; 32]]>, +// _address_merkle_tree_pubkeys: Option>, +// _rpc: &mut R, +// ) -> ProofRpcResult { +// unimplemented!() +// } +// +// // e +// async fn create_proof_for_compressed_accounts2( +// &mut self, +// _compressed_accounts: Option>, +// _state_merkle_tree_pubkeys: Option>, +// _new_addresses: Option<&[[u8; 32]]>, +// _address_merkle_tree_pubkeys: Option>, +// _rpc: &mut R, +// ) -> BatchedTreeProofRpcResult { +// unimplemented!() +// } +// +// // e +// fn add_address_merkle_tree_accounts( +// &mut self, +// _merkle_tree_keypair: &Keypair, +// _queue_keypair: &Keypair, +// _owning_program_id: Option, +// ) -> AddressMerkleTreeAccounts { +// unimplemented!() +// } +// +// // i +// fn get_compressed_accounts_by_owner( +// &self, +// _owner: &Pubkey, +// ) -> Vec { +// unimplemented!() +// } +// +// // e +// fn get_compressed_token_accounts_by_owner(&self, _owner: &Pubkey) -> Vec { +// unimplemented!() +// } +// +// // e +// fn add_state_bundle(&mut self, _state_bundle: StateMerkleTreeBundle) { +// unimplemented!() +// } +// +// // e +// async fn update_test_indexer_after_append( +// &mut self, +// _rpc: &mut R, +// _merkle_tree_pubkey: Pubkey, +// _output_queue_pubkey: Pubkey, +// _num_inserted_zkps: u64, +// ) { +// unimplemented!() +// } +// +// // e +// async fn update_test_indexer_after_nullification( +// &mut self, +// _rpc: &mut R, +// _merkle_tree_pubkey: Pubkey, +// _batch_index: usize, +// ) { +// unimplemented!() +// } +// +// // e +// async fn finalize_batched_address_tree_update( +// &mut self, +// _rpc: &mut R, +// _merkle_tree_pubkey: Pubkey, +// ) { +// unimplemented!() +// } +// } +// +// #[derive(Debug, Clone)] +// pub struct MerkleProof { +// pub hash: String, +// pub leaf_index: u64, +// pub merkle_tree: String, +// pub proof: Vec<[u8; 32]>, +// pub root_seq: u64, +// } +// +// // For consistency with the Photon API. +// #[derive(Clone, Debug, PartialEq)] +// pub struct NewAddressProofWithContext { +// pub merkle_tree: [u8; 32], +// pub root: [u8; 32], +// pub root_seq: u64, +// pub low_address_index: u64, +// pub low_address_value: [u8; 32], +// pub low_address_next_index: u64, +// pub low_address_next_value: [u8; 32], +// pub low_address_proof: [[u8; 32]; NET_HEIGHT], +// pub new_low_element: Option>, +// pub new_element: Option>, +// pub new_element_next_value: Option, +// } +// +// #[derive(Error, Debug)] +// pub enum IndexerError { +// #[error("RPC Error: {0}")] +// RpcError(#[from] solana_client::client_error::ClientError), +// #[error("failed to deserialize account data")] +// DeserializeError(#[from] solana_sdk::program_error::ProgramError), +// #[error("failed to copy merkle tree")] +// CopyMerkleTreeError(#[from] std::io::Error), +// #[error(transparent)] +// AccountCompressionError(#[from] AccountCompressionError), +// #[error(transparent)] +// HashSetError(#[from] HashSetError), +// #[error(transparent)] +// PhotonApiError(PhotonApiErrorWrapper), +// #[error("error: {0:?}")] +// Custom(String), +// #[error("unknown error")] +// Unknown, +// } +// +// #[derive(Error, Debug)] +// pub enum PhotonApiErrorWrapper { +// #[error(transparent)] +// GetCompressedAccountProofPostError(#[from] PhotonApiError), +// } +// +// impl From> for IndexerError { +// fn from(err: PhotonApiError) -> Self { +// IndexerError::PhotonApiError(PhotonApiErrorWrapper::GetCompressedAccountProofPostError( +// err, +// )) +// } +// } diff --git a/forester-utils/src/instructions.rs b/forester-utils/src/instructions.rs index c96d49f49..039391888 100644 --- a/forester-utils/src/instructions.rs +++ b/forester-utils/src/instructions.rs @@ -26,8 +26,7 @@ use log::{error, info}; use reqwest::Client; use solana_sdk::pubkey::Pubkey; use thiserror::Error; - -use crate::indexer::Indexer; +use light_client::indexer::Indexer; #[derive(Error, Debug)] pub enum ForesterUtilsError { @@ -41,11 +40,15 @@ pub enum ForesterUtilsError { IndexerError(String), } -pub async fn create_batch_update_address_tree_instruction_data>( +// TODO: replace TestIndexerExtensions with IndexerType +pub async fn create_batch_update_address_tree_instruction_data( rpc: &mut R, indexer: &mut I, merkle_tree_pubkey: Pubkey, -) -> Result<(InstructionDataBatchNullifyInputs, usize), ForesterUtilsError> { +) -> Result<(InstructionDataBatchNullifyInputs, usize), ForesterUtilsError> where + R: RpcConnection, + I: Indexer //+ TestIndexerExtensions, +{ let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await .map_err(|e| { error!( @@ -143,7 +146,6 @@ pub async fn create_batch_update_address_tree_instruction_data>( +pub(crate) async fn process_batch + IndexerType>( context: &BatchContext, ) -> Result { info!("Processing address batch operation"); @@ -49,10 +50,8 @@ pub(crate) async fn process_batch>( ) .await?; - let mut indexer = context.indexer.lock().await; - indexer - .finalize_batched_address_tree_update(&mut *rpc, context.merkle_tree) - .await; + finalize_batch_address_tree_update(&mut *rpc, context.indexer.clone(), context.merkle_tree) + .await.expect("Failed to finalize batch address tree update"); info!( "Address batch processing completed successfully. Batch size: {}", diff --git a/forester/src/batch_processor/common.rs b/forester/src/batch_processor/common.rs index 94de5f9f0..083998b4a 100644 --- a/forester/src/batch_processor/common.rs +++ b/forester/src/batch_processor/common.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use forester_utils::{forester_epoch::TreeType, indexer::Indexer}; +use forester_utils::{forester_epoch::TreeType}; use light_batched_merkle_tree::{ batch::{Batch, BatchState}, merkle_tree::BatchedMerkleTreeAccount, @@ -11,7 +11,8 @@ use solana_program::pubkey::Pubkey; use solana_sdk::signature::Keypair; use tokio::sync::Mutex; use tracing::info; - +use light_client::indexer::Indexer; +use crate::indexer_type::IndexerType; use super::{address, error::Result, state, BatchProcessError}; #[derive(Debug)] @@ -33,12 +34,12 @@ pub enum BatchReadyState { } #[derive(Debug)] -pub struct BatchProcessor> { +pub struct BatchProcessor + IndexerType> { context: BatchContext, tree_type: TreeType, } -impl> BatchProcessor { +impl + IndexerType> BatchProcessor { pub fn new(context: BatchContext, tree_type: TreeType) -> Self { Self { context, tree_type } } diff --git a/forester/src/batch_processor/mod.rs b/forester/src/batch_processor/mod.rs index f4013bef7..75546e93f 100644 --- a/forester/src/batch_processor/mod.rs +++ b/forester/src/batch_processor/mod.rs @@ -5,7 +5,7 @@ mod state; use common::BatchProcessor; use error::Result; -use forester_utils::{forester_epoch::TreeType, indexer::Indexer}; +use forester_utils::{forester_epoch::TreeType}; use light_client::rpc::RpcConnection; use tracing::{info, instrument}; @@ -17,7 +17,7 @@ use tracing::{info, instrument}; tree_type = ?tree_type ) )] -pub async fn process_batched_operations>( +pub async fn process_batched_operations + IndexerType>( context: BatchContext, tree_type: TreeType, ) -> Result { @@ -28,3 +28,5 @@ pub async fn process_batched_operations>( pub use common::BatchContext; pub use error::BatchProcessError; +use light_client::indexer::Indexer; +use crate::indexer_type::IndexerType; diff --git a/forester/src/batch_processor/state.rs b/forester/src/batch_processor/state.rs index a3c38d381..57a65724b 100644 --- a/forester/src/batch_processor/state.rs +++ b/forester/src/batch_processor/state.rs @@ -1,6 +1,5 @@ use borsh::BorshSerialize; use forester_utils::{ - indexer::Indexer, instructions::{create_append_batch_ix_data, create_nullify_batch_ix_data}, }; use light_batched_merkle_tree::event::{BatchAppendEvent, BatchNullifyEvent}; @@ -9,11 +8,12 @@ use light_registry::account_compression_cpi::sdk::{ create_batch_append_instruction, create_batch_nullify_instruction, }; use solana_sdk::signer::Signer; - +use light_client::indexer::Indexer; use super::common::BatchContext; use crate::batch_processor::error::{BatchProcessError, Result}; +use crate::indexer_type::{update_test_indexer_after_append, update_test_indexer_after_nullification, IndexerType}; -pub(crate) async fn perform_append>( +pub(crate) async fn perform_append + IndexerType>( context: &BatchContext, rpc: &mut R, num_inserted_zkps: u64, @@ -46,20 +46,19 @@ pub(crate) async fn perform_append>( ) .await?; - let mut indexer = context.indexer.lock().await; - indexer - .update_test_indexer_after_append( - rpc, - context.merkle_tree, - context.output_queue, - num_inserted_zkps, - ) - .await; + update_test_indexer_after_append( + rpc, + context.indexer.clone(), + context.merkle_tree, + context.output_queue, + num_inserted_zkps, + ) + .await.expect("Failed to update test indexer after append"); Ok(()) } -pub(crate) async fn perform_nullify>( +pub(crate) async fn perform_nullify + IndexerType>( context: &BatchContext, rpc: &mut R, ) -> Result<()> { @@ -88,12 +87,14 @@ pub(crate) async fn perform_nullify>( ) .await?; - context - .indexer - .lock() - .await - .update_test_indexer_after_nullification(rpc, context.merkle_tree, batch_index) - .await; + + update_test_indexer_after_nullification( + rpc, + context.indexer.clone(), + context.merkle_tree, + batch_index, + ) + .await.expect("Failed to update test indexer after nullification"); Ok(()) } diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index 566622c09..2772e1355 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -11,7 +11,6 @@ use anyhow::Context; use dashmap::DashMap; use forester_utils::{ forester_epoch::{get_epoch_phases, Epoch, TreeAccounts, TreeForesterSchedule, TreeType}, - indexer::{Indexer, MerkleProof, NewAddressProofWithContext}, }; use futures::future::join_all; use light_client::{ @@ -32,7 +31,7 @@ use tokio::{ time::{sleep, Instant}, }; use tracing::{debug, error, info, info_span, instrument, warn}; - +use light_client::indexer::{Indexer, MerkleProof, NewAddressProofWithContext}; use crate::{ batch_processor::{process_batched_operations, BatchContext}, errors::{ @@ -43,7 +42,7 @@ use crate::{ pagerduty::send_pagerduty_alert, queue_helpers::QueueItemData, rollover::{ - is_tree_ready_for_rollover, rollover_address_merkle_tree, rollover_state_merkle_tree, + is_tree_ready_for_rollover, }, send_transaction::{ send_batched_transactions, BuildTransactionBatchConfig, EpochManagerTransactions, @@ -54,6 +53,7 @@ use crate::{ tree_finder::TreeFinder, ForesterConfig, ForesterEpochInfo, Result, }; +use crate::indexer_type::{rollover_address_merkle_tree, rollover_state_merkle_tree, IndexerType}; #[derive(Copy, Clone, Debug)] pub struct WorkReport { @@ -114,7 +114,7 @@ impl> Clone for EpochManager { } } -impl> EpochManager { +impl + IndexerType> EpochManager { #[allow(clippy::too_many_arguments)] pub async fn new( config: Arc, @@ -1152,7 +1152,7 @@ impl> EpochManager { skip(config, protocol_config, rpc_pool, indexer, shutdown, work_report_sender, slot_tracker), fields(forester = %config.payer_keypair.pubkey()) )] -pub async fn run_service>( +pub async fn run_service + IndexerType>( config: Arc, protocol_config: Arc, rpc_pool: Arc>, diff --git a/forester/src/indexer_type.rs b/forester/src/indexer_type.rs new file mode 100644 index 000000000..2aa5572f4 --- /dev/null +++ b/forester/src/indexer_type.rs @@ -0,0 +1,293 @@ +use std::any::Any; +use std::sync::Arc; +use async_trait::async_trait; +use solana_program::pubkey::Pubkey; +use solana_sdk::signature::Keypair; +use solana_sdk::signer::Signer; +use tokio::sync::Mutex; +use tracing::info; +use forester_utils::forester_epoch::TreeAccounts; +use light_client::indexer::{Indexer, StateMerkleTreeAccounts, StateMerkleTreeBundle}; +use light_client::rpc::RpcConnection; +use light_hasher::Poseidon; +use light_merkle_tree_reference::MerkleTree; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; +use light_sdk::{STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT}; +use crate::errors::ForesterError; +use crate::ForesterConfig; +use crate::photon_indexer::PhotonIndexer; +use crate::rollover::{perform_address_merkle_tree_rollover, perform_state_merkle_tree_rollover_forester}; + +mod sealed { + use light_client::rpc::merkle_tree::MerkleTreeExt; + use super::*; + pub trait Sealed {} + impl Sealed for TestIndexer {} + impl Sealed for PhotonIndexer {} +} + +#[async_trait] +pub trait IndexerType: sealed::Sealed { + fn handle_state_bundle( + indexer: &mut impl Indexer, + new_merkle_tree: Pubkey, + new_queue: Pubkey, + new_cpi_context: Pubkey, + ) where + Self: Sized; + + fn handle_address_bundle( + indexer: &mut impl Indexer, + new_merkle_tree: &Keypair, + new_queue: &Keypair, + ) where + Self: Sized; + + async fn finalize_batch_address_tree_update( + rpc: &mut R, + indexer: &mut impl Indexer, + new_merkle_tree_pubkey: Pubkey, + ) where + Self: Sized; + + async fn update_test_indexer_after_nullification( + rpc: &mut R, + indexer: &mut impl Indexer, + merkle_tree_pubkey: Pubkey, + batch_index: usize, + ) where + Self: Sized; + + async fn update_test_indexer_after_append( + rpc: &mut R, + indexer: &mut impl Indexer, + merkle_tree_pubkey: Pubkey, + output_queue: Pubkey, + num_inserted_zkps: u64, + ) where + Self: Sized; +} + +#[async_trait] +impl IndexerType for TestIndexer { + fn handle_state_bundle( + indexer: &mut impl Indexer, + new_merkle_tree: Pubkey, + new_queue: Pubkey, + new_cpi_context: Pubkey, + ) { + if let Some(test_indexer) = (indexer as &mut dyn Any).downcast_mut::>() { + let state_bundle = StateMerkleTreeBundle { + rollover_fee: 0, + accounts: StateMerkleTreeAccounts { + merkle_tree: new_merkle_tree, + nullifier_queue: new_queue, + cpi_context: new_cpi_context, + }, + version: 1, + output_queue_elements: vec![], + merkle_tree: Box::new(MerkleTree::::new( + STATE_MERKLE_TREE_HEIGHT, + STATE_MERKLE_TREE_CANOPY_DEPTH, + )), + input_leaf_indices: vec![], + }; + test_indexer.add_state_bundle(state_bundle); + } + } + + fn handle_address_bundle( + indexer: &mut impl Indexer, + new_merkle_tree: &Keypair, + new_queue: &Keypair, + ) { + if let Some(test_indexer) = (indexer as &mut dyn Any).downcast_mut::>() { + test_indexer.add_address_merkle_tree_accounts(new_merkle_tree, new_queue, None); + } + } + + async fn finalize_batch_address_tree_update( + rpc: &mut R, + indexer: &mut impl Indexer, + new_merkle_tree_pubkey: Pubkey, + ) { + if let Some(test_indexer) = (indexer as &mut dyn Any).downcast_mut::>() { + test_indexer.finalize_batched_address_tree_update(rpc, new_merkle_tree_pubkey).await; + } + } + + async fn update_test_indexer_after_nullification(rpc: &mut R, indexer: &mut impl Indexer, merkle_tree_pubkey: Pubkey, batch_index: usize) + where + Self: Sized + { + if let Some(test_indexer) = (indexer as &mut dyn Any).downcast_mut::>() { + test_indexer.update_test_indexer_after_nullification(rpc, merkle_tree_pubkey, batch_index).await; + } + } + + async fn update_test_indexer_after_append(rpc: &mut R, indexer: &mut impl Indexer, merkle_tree_pubkey: Pubkey, output_queue: Pubkey, num_inserted_zkps: u64) + where + Self: Sized + { + if let Some(test_indexer) = (indexer as &mut dyn Any).downcast_mut::>() { + test_indexer.update_test_indexer_after_append(rpc, merkle_tree_pubkey, output_queue, num_inserted_zkps).await; + } + } +} + +// Implementation for PhotonIndexer - no-op +#[async_trait] +impl IndexerType for PhotonIndexer { + fn handle_state_bundle( + _indexer: &mut impl Indexer, + _new_merkle_tree: Pubkey, + _new_queue: Pubkey, + _new_cpi_context: Pubkey, + ) { + // No-op for production indexer + } + + fn handle_address_bundle( + _indexer: &mut impl Indexer, + _new_merkle_tree: &Keypair, + _new_queue: &Keypair, + ) { + // No-op for production indexer + } + + async fn finalize_batch_address_tree_update( + _rpc: &mut R, + _indexer: &mut impl Indexer, + _new_merkle_tree_pubkey: Pubkey, + ) { + // No-op for production indexer + } + + async fn update_test_indexer_after_nullification( + _rpc: &mut R, + _indexer: &mut impl Indexer, + _merkle_tree_pubkey: Pubkey, + _batch_index: usize + ) { + // No-op for production indexer + } + + async fn update_test_indexer_after_append( + _rpc: &mut R, + _indexer: &mut impl Indexer, + _merkle_tree_pubkey: Pubkey, + _output_queue: Pubkey, + _num_inserted_zkps: u64 + ) { + // No-op for production indexer + } +} + +pub async fn rollover_state_merkle_tree + IndexerType>( + config: Arc, + rpc: &mut R, + indexer: Arc>, + tree_accounts: &TreeAccounts, + epoch: u64, +) -> Result<(), ForesterError> { + let new_nullifier_queue_keypair = Keypair::new(); + let new_merkle_tree_keypair = Keypair::new(); + let new_cpi_signature_keypair = Keypair::new(); + + let rollover_signature = perform_state_merkle_tree_rollover_forester( + &config.payer_keypair, + &config.derivation_pubkey, + rpc, + &new_nullifier_queue_keypair, + &new_merkle_tree_keypair, + &new_cpi_signature_keypair, + &tree_accounts.merkle_tree, + &tree_accounts.queue, + &Pubkey::default(), + epoch) + .await?; + + info!("State rollover signature: {:?}", rollover_signature); + + I::handle_state_bundle( + &mut *indexer.lock().await, + new_merkle_tree_keypair.pubkey(), + new_nullifier_queue_keypair.pubkey(), + new_cpi_signature_keypair.pubkey(), + ); + + Ok(()) +} + +pub async fn rollover_address_merkle_tree + IndexerType>( + config: Arc, + rpc: &mut R, + indexer: Arc>, + tree_accounts: &TreeAccounts, + epoch: u64, +) -> Result<(), ForesterError> { + let new_nullifier_queue_keypair = Keypair::new(); + let new_merkle_tree_keypair = Keypair::new(); + + let rollover_signature = perform_address_merkle_tree_rollover(&config.payer_keypair, &config.derivation_pubkey, rpc, &new_nullifier_queue_keypair, &new_merkle_tree_keypair, &tree_accounts.merkle_tree, &tree_accounts.queue, epoch) + .await?; + + info!("Address rollover signature: {:?}", rollover_signature); + + I::handle_address_bundle( + &mut *indexer.lock().await, + &new_merkle_tree_keypair, + &new_nullifier_queue_keypair, + ); + + Ok(()) +} + +pub async fn finalize_batch_address_tree_update + IndexerType>( + rpc: &mut R, + indexer: Arc>, + new_merkle_tree_pubkey: Pubkey, +) -> Result<(), ForesterError> { + I::finalize_batch_address_tree_update( + &mut *rpc, + &mut *indexer.lock().await, + new_merkle_tree_pubkey, + ).await; + + Ok(()) +} + +pub async fn update_test_indexer_after_nullification + IndexerType>( + rpc: &mut R, + indexer: Arc>, + merkle_tree_pubkey: Pubkey, + batch_index: usize, +) -> Result<(), ForesterError> { + I::update_test_indexer_after_nullification( + &mut *rpc, + &mut *indexer.lock().await, + merkle_tree_pubkey, + batch_index, + ).await; + + Ok(()) +} + + +pub async fn update_test_indexer_after_append + IndexerType>( + rpc: &mut R, + indexer: Arc>, + merkle_tree_pubkey: Pubkey, + output_queue: Pubkey, + num_inserted_zkps: u64, +) -> Result<(), ForesterError> { + I::update_test_indexer_after_append( + &mut *rpc, + &mut *indexer.lock().await, + merkle_tree_pubkey, + output_queue, + num_inserted_zkps + ).await; + + Ok(()) +} \ No newline at end of file diff --git a/forester/src/lib.rs b/forester/src/lib.rs index d99a5790f..e891acf4e 100644 --- a/forester/src/lib.rs +++ b/forester/src/lib.rs @@ -18,6 +18,7 @@ pub mod telemetry; pub mod tree_data_sync; pub mod tree_finder; pub mod utils; +mod indexer_type; use std::{sync::Arc, time::Duration}; @@ -25,7 +26,6 @@ use account_compression::utils::constants::{ADDRESS_QUEUE_VALUES, STATE_NULLIFIE pub use config::{ForesterConfig, ForesterEpochInfo}; use forester_utils::{ forester_epoch::{TreeAccounts, TreeType}, - indexer::Indexer, }; use light_client::{ rpc::{RpcConnection, SolanaRpcConnection}, @@ -34,7 +34,7 @@ use light_client::{ use solana_sdk::commitment_config::CommitmentConfig; use tokio::sync::{mpsc, oneshot, Mutex}; use tracing::debug; - +use light_client::indexer::Indexer; use crate::{ epoch_manager::{run_service, WorkReport}, metrics::QUEUE_LENGTH, @@ -42,6 +42,8 @@ use crate::{ slot_tracker::SlotTracker, utils::get_protocol_config, }; +use crate::indexer_type::IndexerType; + pub async fn run_queue_info( config: Arc, @@ -80,7 +82,7 @@ pub async fn run_queue_info( } } -pub async fn run_pipeline>( +pub async fn run_pipeline + IndexerType>( config: Arc, indexer: Arc>, shutdown: oneshot::Receiver<()>, diff --git a/forester/src/photon_indexer.rs b/forester/src/photon_indexer.rs index cff068796..cad5e3b16 100644 --- a/forester/src/photon_indexer.rs +++ b/forester/src/photon_indexer.rs @@ -2,7 +2,6 @@ use std::fmt::Debug; use account_compression::initialize_address_merkle_tree::Pubkey; use async_trait::async_trait; -use forester_utils::indexer::{Indexer, IndexerError, MerkleProof, NewAddressProofWithContext}; use light_client::rpc::RpcConnection; use photon_api::{ apis::configuration::{ApiKey, Configuration}, @@ -10,7 +9,8 @@ use photon_api::{ }; use solana_sdk::bs58; use tracing::debug; - +use light_client::indexer::{AddressMerkleTreeBundle, Indexer, IndexerError, MerkleProof, NewAddressProofWithContext, ProofOfLeaf}; +use light_sdk::proof::ProofRpcResult; use crate::utils::decode_hash; pub struct PhotonIndexer { @@ -44,6 +44,7 @@ impl Debug for PhotonIndexer { #[async_trait] impl Indexer for PhotonIndexer { + async fn get_queue_elements( &self, _pubkey: [u8; 32], @@ -53,7 +54,8 @@ impl Indexer for PhotonIndexer { ) -> Result, IndexerError> { unimplemented!() } - async fn get_subtrees( + + fn get_subtrees( &self, _merkle_tree_pubkey: [u8; 32], ) -> Result, IndexerError> { @@ -118,7 +120,7 @@ impl Indexer for PhotonIndexer { } } - async fn get_rpc_compressed_accounts_by_owner( + async fn get_compressed_accounts_by_owner( &self, owner: &Pubkey, ) -> Result, IndexerError> { @@ -216,4 +218,33 @@ impl Indexer for PhotonIndexer { Ok(proofs) } + + async fn create_proof_for_compressed_accounts( + &mut self, + _compressed_accounts: Option>, + _state_merkle_tree_pubkeys: Option>, + _new_addresses: Option<&[[u8; 32]]>, + _address_merkle_tree_pubkeys: Option>, + _rpc: &mut R, + ) -> ProofRpcResult { + todo!() + } + + fn get_proofs_by_indices( + &mut self, + _merkle_tree_pubkey: Pubkey, + _indices: &[u64]) -> Vec { + todo!() + } + + fn get_leaf_indices_tx_hashes( + &mut self, + _merkle_tree_pubkey: Pubkey, + _zkp_batch_size: usize) -> Vec<(u32, [u8; 32], [u8; 32])> { + todo!() + } + + fn get_address_merkle_trees(&self) -> &Vec { + todo!() + } } diff --git a/forester/src/rollover/mod.rs b/forester/src/rollover/mod.rs index f963a4df8..3fa5b107a 100644 --- a/forester/src/rollover/mod.rs +++ b/forester/src/rollover/mod.rs @@ -2,7 +2,7 @@ mod operations; mod state; pub use operations::{ - get_tree_fullness, is_tree_ready_for_rollover, rollover_address_merkle_tree, - rollover_state_merkle_tree, + get_tree_fullness, is_tree_ready_for_rollover, + perform_address_merkle_tree_rollover, perform_state_merkle_tree_rollover_forester, }; pub use state::RolloverState; diff --git a/forester/src/rollover/operations.rs b/forester/src/rollover/operations.rs index f3bfd5e70..d63cd478c 100644 --- a/forester/src/rollover/operations.rs +++ b/forester/src/rollover/operations.rs @@ -1,22 +1,17 @@ -use std::sync::Arc; - use account_compression::{ - utils::constants::{STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT}, AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, QueueAccount, StateMerkleTreeAccount, StateMerkleTreeConfig, }; use forester_utils::{ address_merkle_tree_config::{get_address_bundle_config, get_state_bundle_config}, create_account_instruction, - forester_epoch::{TreeAccounts, TreeType}, + forester_epoch::TreeType, get_concurrent_merkle_tree, get_indexed_merkle_tree, - indexer::{AddressMerkleTreeAccounts, Indexer, StateMerkleTreeAccounts, StateMerkleTreeBundle}, registry::RentExemption, }; use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeAccount; use light_client::rpc::{RpcConnection, RpcError}; use light_hasher::Poseidon; -use light_merkle_tree_reference::MerkleTree; use light_registry::{ account_compression_cpi::sdk::{ create_rollover_address_merkle_tree_instruction, @@ -28,10 +23,9 @@ use solana_sdk::{ instruction::Instruction, pubkey::Pubkey, signature::Keypair, signer::Signer, transaction::Transaction, }; -use tokio::sync::Mutex; use tracing::info; - -use crate::{errors::ForesterError, ForesterConfig}; +use light_client::indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}; +use crate::{errors::ForesterError}; enum TreeAccount { State(StateMerkleTreeAccount), @@ -285,56 +279,6 @@ pub async fn is_tree_ready_for_rollover( } } -pub async fn rollover_state_merkle_tree>( - config: Arc, - rpc: &mut R, - indexer: Arc>, - tree_accounts: &TreeAccounts, - epoch: u64, -) -> Result<(), ForesterError> { - info!( - "Rolling over state merkle tree {:?}", - tree_accounts.merkle_tree - ); - let new_nullifier_queue_keypair = Keypair::new(); - let new_merkle_tree_keypair = Keypair::new(); - let new_cpi_signature_keypair = Keypair::new(); - - let rollover_signature = perform_state_merkle_tree_rollover_forester( - &config.payer_keypair, - &config.derivation_pubkey, - rpc, - &new_nullifier_queue_keypair, - &new_merkle_tree_keypair, - &new_cpi_signature_keypair, - &tree_accounts.merkle_tree, - &tree_accounts.queue, - &Pubkey::default(), - epoch, - ) - .await?; - info!("State rollover signature: {:?}", rollover_signature); - - let state_bundle = StateMerkleTreeBundle { - // TODO: fetch correct fee when this property is used - rollover_fee: 0, - accounts: StateMerkleTreeAccounts { - merkle_tree: new_merkle_tree_keypair.pubkey(), - nullifier_queue: new_nullifier_queue_keypair.pubkey(), - cpi_context: new_cpi_signature_keypair.pubkey(), - }, - merkle_tree: Box::new(MerkleTree::::new( - STATE_MERKLE_TREE_HEIGHT as usize, - STATE_MERKLE_TREE_CANOPY_DEPTH as usize, - )), - version: 1, - input_leaf_indices: vec![], - output_queue_elements: vec![], - }; - indexer.lock().await.add_state_bundle(state_bundle); - Ok(()) -} - #[allow(clippy::too_many_arguments)] pub async fn perform_state_merkle_tree_rollover_forester( payer: &Keypair, @@ -376,35 +320,6 @@ pub async fn perform_state_merkle_tree_rollover_forester( context.process_transaction(transaction).await } -pub async fn rollover_address_merkle_tree>( - config: Arc, - rpc: &mut R, - indexer: Arc>, - tree_data: &TreeAccounts, - epoch: u64, -) -> Result<(), ForesterError> { - let new_nullifier_queue_keypair = Keypair::new(); - let new_merkle_tree_keypair = Keypair::new(); - let rollover_signature = perform_address_merkle_tree_rollover( - &config.payer_keypair, - &config.derivation_pubkey, - rpc, - &new_nullifier_queue_keypair, - &new_merkle_tree_keypair, - &tree_data.merkle_tree, - &tree_data.queue, - epoch, - ) - .await?; - info!("Address rollover signature: {:?}", rollover_signature); - - indexer.lock().await.add_address_merkle_tree_accounts( - &new_merkle_tree_keypair, - &new_nullifier_queue_keypair, - None, - ); - Ok(()) -} #[allow(clippy::too_many_arguments)] pub async fn perform_address_merkle_tree_rollover( diff --git a/forester/src/send_transaction.rs b/forester/src/send_transaction.rs index 054ea2d55..1c4b9653e 100644 --- a/forester/src/send_transaction.rs +++ b/forester/src/send_transaction.rs @@ -7,7 +7,6 @@ use account_compression::utils::constants::{ use async_trait::async_trait; use forester_utils::{ forester_epoch::{TreeAccounts, TreeType}, - indexer::Indexer, }; use futures::future::join_all; use light_client::{ @@ -32,7 +31,7 @@ use tokio::{ time::{sleep, Instant}, }; use tracing::{debug, warn}; - +use light_client::indexer::Indexer; use crate::{ config::QueueConfig, epoch_manager::{MerkleProofType, WorkItem}, diff --git a/program-tests/utils/Cargo.toml b/program-tests/utils/Cargo.toml index 6fdf366af..27c871923 100644 --- a/program-tests/utils/Cargo.toml +++ b/program-tests/utils/Cargo.toml @@ -40,6 +40,7 @@ light-verifier = { workspace = true } light-utils = { workspace = true } light-program-test = { workspace = true } forester-utils = { workspace = true } +light-sdk = { workspace = true } memoffset = "0.9.1" rand = "0.8" photon-api = { workspace = true } diff --git a/program-tests/utils/src/conversions.rs b/program-tests/utils/src/conversions.rs new file mode 100644 index 000000000..eaa0777a3 --- /dev/null +++ b/program-tests/utils/src/conversions.rs @@ -0,0 +1,239 @@ +use light_sdk::{self as sdk, proof::CompressedProof}; +use light_system_program::invoke::processor::CompressedProof as ProgramCompressedProof; +use light_system_program::invoke::OutputCompressedAccountWithPackedContext as ProgramOutputCompressedAccountWithPackedContext; +use light_system_program::sdk::compressed_account::{ + CompressedAccount as ProgramCompressedAccount, + CompressedAccountData as ProgramCompressedAccountData, + CompressedAccountWithMerkleContext as ProgramCompressedAccountWithMerkleContext, + MerkleContext as ProgramMerkleContext, QueueIndex as ProgramQueueIndex, +}; + +use light_compressed_token::{ + token_data::AccountState as ProgramAccountState, TokenData as ProgramTokenData, +}; +use light_system_program::sdk::event::MerkleTreeSequenceNumber as ProgramMerkleTreeSequenceNumber; +use light_system_program::sdk::event::PublicTransactionEvent as ProgramPublicTransactionEvent; +pub fn sdk_to_program_queue_index( + sdk_queue_index: sdk::merkle_context::QueueIndex, +) -> ProgramQueueIndex { + ProgramQueueIndex { + queue_id: sdk_queue_index.queue_id, + index: sdk_queue_index.index, + } +} + +pub fn program_to_sdk_queue_index( + program_queue_index: ProgramQueueIndex, +) -> sdk::merkle_context::QueueIndex { + sdk::merkle_context::QueueIndex { + queue_id: program_queue_index.queue_id, + index: program_queue_index.index, + } +} + +pub fn sdk_to_program_merkle_context( + sdk_merkle_context: sdk::merkle_context::MerkleContext, +) -> ProgramMerkleContext { + ProgramMerkleContext { + merkle_tree_pubkey: sdk_merkle_context.merkle_tree_pubkey, + nullifier_queue_pubkey: sdk_merkle_context.nullifier_queue_pubkey, + leaf_index: sdk_merkle_context.leaf_index, + queue_index: sdk_merkle_context + .queue_index + .map(sdk_to_program_queue_index), + } +} + +pub fn program_to_sdk_merkle_context( + program_merkle_context: ProgramMerkleContext, +) -> sdk::merkle_context::MerkleContext { + sdk::merkle_context::MerkleContext { + merkle_tree_pubkey: program_merkle_context.merkle_tree_pubkey, + nullifier_queue_pubkey: program_merkle_context.nullifier_queue_pubkey, + leaf_index: program_merkle_context.leaf_index, + queue_index: program_merkle_context + .queue_index + .map(program_to_sdk_queue_index), + } +} +pub fn sdk_to_program_compressed_account_data( + sdk_data: sdk::compressed_account::CompressedAccountData, +) -> ProgramCompressedAccountData { + ProgramCompressedAccountData { + discriminator: sdk_data.discriminator, + data: sdk_data.data, + data_hash: sdk_data.data_hash, + } +} + +pub fn program_to_sdk_compressed_account_data( + program_data: ProgramCompressedAccountData, +) -> sdk::compressed_account::CompressedAccountData { + sdk::compressed_account::CompressedAccountData { + discriminator: program_data.discriminator, + data: program_data.data, + data_hash: program_data.data_hash, + } +} + +pub fn sdk_to_program_compressed_account( + sdk_account: sdk::compressed_account::CompressedAccount, +) -> ProgramCompressedAccount { + ProgramCompressedAccount { + owner: sdk_account.owner, + lamports: sdk_account.lamports, + address: sdk_account.address, + data: sdk_account.data.map(sdk_to_program_compressed_account_data), + } +} + +pub fn program_to_sdk_compressed_account( + program_account: ProgramCompressedAccount, +) -> sdk::compressed_account::CompressedAccount { + sdk::compressed_account::CompressedAccount { + owner: program_account.owner, + lamports: program_account.lamports, + address: program_account.address, + data: program_account + .data + .map(program_to_sdk_compressed_account_data), + } +} + +pub fn sdk_to_program_compressed_account_with_merkle_context( + sdk_account: sdk::compressed_account::CompressedAccountWithMerkleContext, +) -> ProgramCompressedAccountWithMerkleContext { + ProgramCompressedAccountWithMerkleContext { + compressed_account: sdk_to_program_compressed_account(sdk_account.compressed_account), + merkle_context: sdk_to_program_merkle_context(sdk_account.merkle_context), + } +} + +pub fn program_to_sdk_compressed_account_with_merkle_context( + program_account: ProgramCompressedAccountWithMerkleContext, +) -> sdk::compressed_account::CompressedAccountWithMerkleContext { + sdk::compressed_account::CompressedAccountWithMerkleContext { + compressed_account: program_to_sdk_compressed_account(program_account.compressed_account), + merkle_context: program_to_sdk_merkle_context(program_account.merkle_context), + } +} + +pub fn sdk_to_program_account_state(sdk_state: sdk::token::AccountState) -> ProgramAccountState { + match sdk_state { + sdk::token::AccountState::Initialized => ProgramAccountState::Initialized, + sdk::token::AccountState::Frozen => ProgramAccountState::Frozen, + } +} + +pub fn program_to_sdk_account_state( + program_state: ProgramAccountState, +) -> sdk::token::AccountState { + match program_state { + ProgramAccountState::Initialized => sdk::token::AccountState::Initialized, + ProgramAccountState::Frozen => sdk::token::AccountState::Frozen, + } +} + +pub fn sdk_to_program_token_data(sdk_token: sdk::token::TokenData) -> ProgramTokenData { + ProgramTokenData { + mint: sdk_token.mint, + owner: sdk_token.owner, + amount: sdk_token.amount, + delegate: sdk_token.delegate, + state: sdk_to_program_account_state(sdk_token.state), + tlv: sdk_token.tlv, + } +} + +pub fn program_to_sdk_token_data(program_token: ProgramTokenData) -> sdk::token::TokenData { + sdk::token::TokenData { + mint: program_token.mint, + owner: program_token.owner, + amount: program_token.amount, + delegate: program_token.delegate, + state: program_to_sdk_account_state(program_token.state), + tlv: program_token.tlv, + } +} + +pub fn program_to_sdk_compressed_proof(program_proof: ProgramCompressedProof) -> CompressedProof { + CompressedProof { + a: program_proof.a, + b: program_proof.b, + c: program_proof.c, + } +} + +pub fn sdk_to_program_compressed_proof(sdk_proof: CompressedProof) -> ProgramCompressedProof { + ProgramCompressedProof { + a: sdk_proof.a, + b: sdk_proof.b, + c: sdk_proof.c, + } +} + +pub fn sdk_to_program_public_transaction_event( + event: sdk::event::PublicTransactionEvent, +) -> ProgramPublicTransactionEvent { + ProgramPublicTransactionEvent { + input_compressed_account_hashes: event.input_compressed_account_hashes, + output_compressed_account_hashes: event.output_compressed_account_hashes, + output_compressed_accounts: event + .output_compressed_accounts + .into_iter() + .map(|account| ProgramOutputCompressedAccountWithPackedContext { + compressed_account: sdk_to_program_compressed_account(account.compressed_account), + merkle_tree_index: account.merkle_tree_index, + }) + .collect(), + output_leaf_indices: event.output_leaf_indices, + sequence_numbers: event + .sequence_numbers + .into_iter() + .map(|sequence_number| ProgramMerkleTreeSequenceNumber { + pubkey: sequence_number.pubkey, + seq: sequence_number.seq, + }) + .collect(), + relay_fee: event.relay_fee, + is_compress: event.is_compress, + compress_or_decompress_lamports: event.compress_or_decompress_lamports, + pubkey_array: event.pubkey_array, + message: event.message, + } +} + +pub fn program_to_sdk_public_transaction_event( + event: ProgramPublicTransactionEvent, +) -> sdk::event::PublicTransactionEvent { + sdk::event::PublicTransactionEvent { + input_compressed_account_hashes: event.input_compressed_account_hashes, + output_compressed_account_hashes: event.output_compressed_account_hashes, + output_compressed_accounts: event + .output_compressed_accounts + .into_iter() + .map( + |account| sdk::compressed_account::OutputCompressedAccountWithPackedContext { + compressed_account: program_to_sdk_compressed_account( + account.compressed_account, + ), + merkle_tree_index: account.merkle_tree_index, + }, + ) + .collect(), + output_leaf_indices: event.output_leaf_indices, + sequence_numbers: event + .sequence_numbers + .into_iter() + .map(|sequence_number| sdk::event::MerkleTreeSequenceNumber { + pubkey: sequence_number.pubkey, + seq: sequence_number.seq, + }) + .collect(), + relay_fee: event.relay_fee, + is_compress: event.is_compress, + compress_or_decompress_lamports: event.compress_or_decompress_lamports, + pubkey_array: event.pubkey_array, + message: event.message, + } +} \ No newline at end of file diff --git a/program-tests/utils/src/e2e_test_env.rs b/program-tests/utils/src/e2e_test_env.rs index 5c5a43ee2..0b21d30f9 100644 --- a/program-tests/utils/src/e2e_test_env.rs +++ b/program-tests/utils/src/e2e_test_env.rs @@ -72,10 +72,6 @@ use forester_utils::{ address_merkle_tree_config::{address_tree_ready_for_rollover, state_tree_ready_for_rollover}, airdrop_lamports, forester_epoch::{Epoch, Forester, TreeAccounts, TreeType}, - indexer::{ - AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, - StateMerkleTreeBundle, TokenDataWithContext, - }, registry::register_test_forester, AccountZeroCopy, }; @@ -125,7 +121,9 @@ use solana_sdk::{ signer::{SeedDerivable, Signer}, }; use spl_token::solana_program::native_token::LAMPORTS_PER_SOL; - +use light_client::indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, StateMerkleTreeBundle, TokenDataWithMerkleContext}; +use light_client::rpc::merkle_tree::MerkleTreeExt; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; use crate::{ address_tree_rollover::{ assert_rolled_over_address_merkle_tree_and_queue, @@ -136,7 +134,6 @@ use crate::{ assert_finalized_epoch_registration, assert_report_work, fetch_epoch_and_forester_pdas, }, create_address_merkle_tree_and_queue_account_with_assert, - indexer::TestIndexer, spl::{ approve_test, burn_test, compress_test, compressed_transfer_test, create_mint_helper, create_token_account, decompress_test, freeze_test, mint_tokens_helper, revoke_test, @@ -212,7 +209,7 @@ impl Stats { println!("Finalized registrations {}", self.finalized_registrations); } } -pub async fn init_program_test_env( +pub async fn init_program_test_env( rpc: R, env_accounts: &EnvAccounts, skip_prover: bool, @@ -288,7 +285,7 @@ pub struct TestForester { is_registered: Option, } -pub struct E2ETestEnv> { +pub struct E2ETestEnv + TestIndexerExtensions> { pub payer: Keypair, pub governance_keypair: Keypair, pub indexer: I, @@ -311,7 +308,7 @@ pub struct E2ETestEnv> { pub registration_epoch: u64, } -impl> E2ETestEnv +impl + TestIndexerExtensions> E2ETestEnv where R: RpcConnection, I: Indexer, @@ -2314,7 +2311,7 @@ where pub async fn select_random_compressed_token_accounts( &mut self, user: &Pubkey, - ) -> (Pubkey, Vec) { + ) -> (Pubkey, Vec) { self.select_random_compressed_token_accounts_delegated(user, false, None, false) .await } @@ -2322,7 +2319,7 @@ where pub async fn select_random_compressed_token_accounts_frozen( &mut self, user: &Pubkey, - ) -> (Pubkey, Vec) { + ) -> (Pubkey, Vec) { self.select_random_compressed_token_accounts_delegated(user, false, None, true) .await } @@ -2333,7 +2330,7 @@ where delegated: bool, delegate: Option, frozen: bool, - ) -> (Pubkey, Vec) { + ) -> (Pubkey, Vec) { let user_token_accounts = &mut self.indexer.get_compressed_token_accounts_by_owner(user); // clean up dust so that we don't run into issues that account balances are too low user_token_accounts.retain(|t| t.token_data.amount > 1000); @@ -2423,14 +2420,14 @@ where token_account.token_data.mint == mint && tree_version == version }) .map(|token_account| (*token_account).clone()) - .collect::>(); + .collect::>(); } if delegated { token_accounts_with_mint = token_accounts_with_mint .iter() .filter(|token_account| token_account.token_data.delegate.is_some()) .map(|token_account| (*token_account).clone()) - .collect::>(); + .collect::>(); if token_accounts_with_mint.is_empty() { return (mint, Vec::new()); } @@ -2440,14 +2437,14 @@ where .iter() .filter(|token_account| token_account.token_data.delegate.unwrap() == delegate) .map(|token_account| (*token_account).clone()) - .collect::>(); + .collect::>(); } if frozen { token_accounts_with_mint = token_accounts_with_mint .iter() .filter(|token_account| token_account.token_data.state == AccountState::Frozen) .map(|token_account| (*token_account).clone()) - .collect::>(); + .collect::>(); if token_accounts_with_mint.is_empty() { return (mint, Vec::new()); } @@ -2456,7 +2453,7 @@ where .iter() .filter(|token_account| token_account.token_data.state == AccountState::Initialized) .map(|token_account| (*token_account).clone()) - .collect::>(); + .collect::>(); } let range_end = if token_accounts_with_mint.len() == 1 { 1 diff --git a/program-tests/utils/src/indexer/mod.rs b/program-tests/utils/src/indexer/mod.rs index 4cb99d00e..6a14bdebe 100644 --- a/program-tests/utils/src/indexer/mod.rs +++ b/program-tests/utils/src/indexer/mod.rs @@ -1,2 +1,2 @@ -pub mod test_indexer; -pub use test_indexer::TestIndexer; +// pub mod test_indexer; +// pub use test_indexer::TestIndexer; diff --git a/program-tests/utils/src/indexer/test_indexer.rs b/program-tests/utils/src/indexer/test_indexer.rs index f057264ae..d0ed4242e 100644 --- a/program-tests/utils/src/indexer/test_indexer.rs +++ b/program-tests/utils/src/indexer/test_indexer.rs @@ -1,1779 +1,1779 @@ -use std::{ - future::Future, - marker::PhantomData, - sync::{Arc, Mutex}, - time::Duration, -}; - -use account_compression::{ - utils::constants::{STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT}, - AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, - StateMerkleTreeAccount, StateMerkleTreeConfig, -}; -use anchor_lang::AnchorDeserialize; -use async_trait::async_trait; -use forester_utils::{ - get_concurrent_merkle_tree, get_indexed_merkle_tree, - indexer::{ - AddressMerkleTreeAccounts, AddressMerkleTreeBundle, BatchedTreeProofRpcResult, Indexer, - IndexerError, MerkleProof, NewAddressProofWithContext, ProofOfLeaf, ProofRpcResult, - StateMerkleTreeAccounts, StateMerkleTreeBundle, TokenDataWithContext, - }, - AccountZeroCopy, -}; -use light_batched_merkle_tree::{ - batch::BatchState, - constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, - initialize_address_tree::InitAddressTreeAccountsInstructionData, - initialize_state_tree::InitStateTreeAccountsInstructionData, - merkle_tree::BatchedMerkleTreeAccount, - queue::{BatchedQueueAccount, BatchedQueueMetadata}, -}; -use light_client::{ - rpc::{RpcConnection, RpcError}, - transaction_params::FeeConfig, -}; -use light_compressed_token::{ - constants::TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR, get_token_pool_pda, - mint_sdk::create_create_token_pool_instruction, TokenData, -}; -use light_hasher::{Hasher, Poseidon}; -use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}; -use light_macros::pubkey; -use light_merkle_tree_reference::MerkleTree; -use light_program_test::{ - test_batch_forester::{create_batch_address_merkle_tree, create_batched_state_merkle_tree}, - test_env::{ - create_address_merkle_tree_and_queue_account, create_state_merkle_tree_and_queue_account, - EnvAccounts, BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR, - }, -}; -use light_prover_client::{ - gnark::{ - batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson, - combined_json_formatter::CombinedJsonStruct, - combined_json_formatter_legacy::CombinedJsonStruct as CombinedJsonStructLegacy, - constants::{PROVE_PATH, SERVER_ADDRESS}, - helpers::{ - big_int_to_string, spawn_prover, string_to_big_int, ProofType, ProverConfig, ProverMode, - }, - inclusion_json_formatter::BatchInclusionJsonStruct, - inclusion_json_formatter_legacy::BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy, - non_inclusion_json_formatter::BatchNonInclusionJsonStruct, - non_inclusion_json_formatter_legacy::BatchNonInclusionJsonStruct as BatchNonInclusionJsonStructLegacy, - proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, - }, - helpers::bigint_to_u8_32, - inclusion::merkle_inclusion_proof_inputs::{InclusionMerkleProofInputs, InclusionProofInputs}, - inclusion_legacy::merkle_inclusion_proof_inputs::InclusionProofInputs as InclusionProofInputsLegacy, - non_inclusion::merkle_non_inclusion_proof_inputs::{ - get_non_inclusion_proof_inputs, NonInclusionProofInputs, - }, - non_inclusion_legacy::merkle_non_inclusion_proof_inputs::NonInclusionProofInputs as NonInclusionProofInputsLegacy, -}; -use light_system_program::{ - invoke::processor::CompressedProof, - sdk::{ - compressed_account::{CompressedAccountWithMerkleContext, MerkleContext, QueueIndex}, - event::PublicTransactionEvent, - }, -}; -use light_utils::{ - bigint::bigint_to_be_bytes_array, - hashchain::{create_hash_chain_from_slice, create_tx_hash}, -}; -use log::{debug, info, warn}; -use num_bigint::{BigInt, BigUint}; -use num_traits::ops::bytes::FromBytes; -use reqwest::Client; -use solana_sdk::{ - bs58, instruction::Instruction, program_pack::Pack, pubkey::Pubkey, signature::Keypair, - signer::Signer, -}; -use spl_token::instruction::initialize_mint; - -use crate::{ - create_address_merkle_tree_and_queue_account_with_assert, e2e_test_env::KeypairActionConfig, - spl::create_initialize_mint_instructions, -}; - -#[derive(Debug)] -pub struct TestIndexer { - pub state_merkle_trees: Vec, - pub address_merkle_trees: Vec, - pub payer: Keypair, - pub group_pda: Pubkey, - pub compressed_accounts: Vec, - pub nullified_compressed_accounts: Vec, - pub token_compressed_accounts: Vec, - pub token_nullified_compressed_accounts: Vec, - pub events: Vec, - pub prover_config: Option, - phantom: PhantomData, -} - -#[async_trait] -impl Indexer for TestIndexer { - async fn get_queue_elements( - &self, - pubkey: [u8; 32], - _batch: u64, - start_offset: u64, - end_offset: u64, - ) -> Result, IndexerError> { - let pubkey = Pubkey::new_from_array(pubkey); - let address_tree_bundle = self - .address_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == pubkey); - if let Some(address_tree_bundle) = address_tree_bundle { - return Ok(address_tree_bundle.queue_elements - [start_offset as usize..end_offset as usize] - .to_vec()); - } - let state_tree_bundle = self - .state_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == pubkey); - if let Some(state_tree_bundle) = state_tree_bundle { - return Ok(state_tree_bundle.output_queue_elements - [start_offset as usize..end_offset as usize] - .to_vec()); - } - Err(IndexerError::Custom("Merkle tree not found".to_string())) - } - - fn get_proof_by_index(&mut self, merkle_tree_pubkey: Pubkey, index: u64) -> ProofOfLeaf { - let mut bundle = self - .state_merkle_trees - .iter_mut() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - .unwrap(); - - while bundle.merkle_tree.leaves().len() <= index as usize { - bundle.merkle_tree.append(&[0u8; 32]).unwrap(); - } - - let leaf = match bundle.merkle_tree.get_leaf(index as usize) { - Ok(leaf) => leaf, - Err(_) => { - bundle.merkle_tree.append(&[0u8; 32]).unwrap(); - bundle.merkle_tree.get_leaf(index as usize).unwrap() - } - }; - - let proof = bundle - .merkle_tree - .get_proof_of_leaf(index as usize, true) - .unwrap() - .to_vec(); - - ProofOfLeaf { leaf, proof } - } - - fn get_proofs_by_indices( - &mut self, - merkle_tree_pubkey: Pubkey, - indices: &[u64], - ) -> Vec { - indices - .iter() - .map(|&index| self.get_proof_by_index(merkle_tree_pubkey, index)) - .collect() - } - - /// leaf index, leaf, tx hash - fn get_leaf_indices_tx_hashes( - &mut self, - merkle_tree_pubkey: Pubkey, - zkp_batch_size: usize, - ) -> Vec<(u32, [u8; 32], [u8; 32])> { - let mut state_merkle_tree_bundle = self - .state_merkle_trees - .iter_mut() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - .unwrap(); - - state_merkle_tree_bundle.input_leaf_indices[..zkp_batch_size].to_vec() - } - - async fn get_subtrees( - &self, - merkle_tree_pubkey: [u8; 32], - ) -> Result, IndexerError> { - let merkle_tree_pubkey = Pubkey::new_from_array(merkle_tree_pubkey); - let address_tree_bundle = self - .address_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); - if let Some(address_tree_bundle) = address_tree_bundle { - Ok(address_tree_bundle.merkle_tree.merkle_tree.get_subtrees()) - } else { - let state_tree_bundle = self - .state_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); - if let Some(state_tree_bundle) = state_tree_bundle { - Ok(state_tree_bundle.merkle_tree.get_subtrees()) - } else { - Err(IndexerError::Custom("Merkle tree not found".to_string())) - } - } - } - - async fn get_multiple_compressed_account_proofs( - &self, - hashes: Vec, - ) -> Result, IndexerError> { - info!("Getting proofs for {:?}", hashes); - let mut proofs: Vec = Vec::new(); - hashes.iter().for_each(|hash| { - let hash_array: [u8; 32] = bs58::decode(hash) - .into_vec() - .unwrap() - .as_slice() - .try_into() - .unwrap(); - - self.state_merkle_trees.iter().for_each(|tree| { - if let Some(leaf_index) = tree.merkle_tree.get_leaf_index(&hash_array) { - let proof = tree - .merkle_tree - .get_proof_of_leaf(leaf_index, false) - .unwrap(); - proofs.push(MerkleProof { - hash: hash.clone(), - leaf_index: leaf_index as u64, - merkle_tree: tree.accounts.merkle_tree.to_string(), - proof: proof.to_vec(), - root_seq: tree.merkle_tree.sequence_number as u64, - }); - } - }) - }); - Ok(proofs) - } - - async fn get_rpc_compressed_accounts_by_owner( - &self, - owner: &Pubkey, - ) -> Result, IndexerError> { - let result = self.get_compressed_accounts_by_owner(owner); - let mut hashes: Vec = Vec::new(); - for account in result.iter() { - let hash = account.hash()?; - let bs58_hash = bs58::encode(hash).into_string(); - hashes.push(bs58_hash); - } - Ok(hashes) - } - - async fn get_multiple_new_address_proofs( - &self, - merkle_tree_pubkey: [u8; 32], - addresses: Vec<[u8; 32]>, - ) -> Result>, IndexerError> { - self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, false) - .await - } - - async fn get_multiple_new_address_proofs_full( - &self, - merkle_tree_pubkey: [u8; 32], - addresses: Vec<[u8; 32]>, - ) -> Result>, IndexerError> { - self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, true) - .await - } - - fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) { - let decoded_hash: [u8; 32] = bs58::decode(account_hash) - .into_vec() - .unwrap() - .as_slice() - .try_into() - .unwrap(); - - if let Some(state_tree_bundle) = self - .state_merkle_trees - .iter_mut() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - { - if let Some(leaf_index) = state_tree_bundle.merkle_tree.get_leaf_index(&decoded_hash) { - state_tree_bundle - .merkle_tree - .update(&[0u8; 32], leaf_index) - .unwrap(); - } - } - } - - fn address_tree_updated( - &mut self, - merkle_tree_pubkey: Pubkey, - context: &NewAddressProofWithContext<16>, - ) { - info!("Updating address tree..."); - let mut address_tree_bundle: &mut AddressMerkleTreeBundle = self - .address_merkle_trees - .iter_mut() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - .unwrap(); - - let new_low_element = context.new_low_element.clone().unwrap(); - let new_element = context.new_element.clone().unwrap(); - let new_element_next_value = context.new_element_next_value.clone().unwrap(); - address_tree_bundle - .merkle_tree - .update(&new_low_element, &new_element, &new_element_next_value) - .unwrap(); - address_tree_bundle - .indexed_array - .append_with_low_element_index(new_low_element.index, &new_element.value) - .unwrap(); - info!("Address tree updated"); - } - - fn get_state_merkle_tree_accounts(&self, pubkeys: &[Pubkey]) -> Vec { - pubkeys - .iter() - .map(|x| { - self.state_merkle_trees - .iter() - .find(|y| y.accounts.merkle_tree == *x || y.accounts.nullifier_queue == *x) - .unwrap() - .accounts - }) - .collect::>() - } - - fn add_event_and_compressed_accounts( - &mut self, - slot: u64, - event: &PublicTransactionEvent, - ) -> ( - Vec, - Vec, - ) { - let mut compressed_accounts = Vec::new(); - let mut token_compressed_accounts = Vec::new(); - let event_inputs_len = event.input_compressed_account_hashes.len(); - let event_outputs_len = event.output_compressed_account_hashes.len(); - for i in 0..std::cmp::max(event_inputs_len, event_outputs_len) { - self.process_v1_compressed_account( - slot, - event, - i, - &mut token_compressed_accounts, - &mut compressed_accounts, - ); - } - - self.events.push(event.clone()); - (compressed_accounts, token_compressed_accounts) - } - - fn get_state_merkle_trees(&self) -> &Vec { - &self.state_merkle_trees - } - - fn get_state_merkle_trees_mut(&mut self) -> &mut Vec { - &mut self.state_merkle_trees - } - - fn get_address_merkle_trees(&self) -> &Vec { - &self.address_merkle_trees - } - - fn get_address_merkle_trees_mut(&mut self) -> &mut Vec { - &mut self.address_merkle_trees - } - - fn get_token_compressed_accounts(&self) -> &Vec { - &self.token_compressed_accounts - } - - fn get_payer(&self) -> &Keypair { - &self.payer - } - - fn get_group_pda(&self) -> &Pubkey { - &self.group_pda - } - - async fn create_proof_for_compressed_accounts( - &mut self, - compressed_accounts: Option>, - state_merkle_tree_pubkeys: Option>, - new_addresses: Option<&[[u8; 32]]>, - address_merkle_tree_pubkeys: Option>, - rpc: &mut R, - ) -> ProofRpcResult { - if compressed_accounts.is_some() - && ![1usize, 2usize, 3usize, 4usize, 8usize] - .contains(&compressed_accounts.as_ref().unwrap().len()) - { - panic!( - "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}", - compressed_accounts.unwrap().len() - ) - } - if new_addresses.is_some() && ![1usize, 2usize].contains(&new_addresses.unwrap().len()) { - panic!("new_addresses must be of length 1, 2") - } - let client = Client::new(); - let (root_indices, address_root_indices, json_payload) = - match (compressed_accounts, new_addresses) { - (Some(accounts), None) => { - let (payload, payload_legacy, indices) = self - .process_inclusion_proofs( - &state_merkle_tree_pubkeys.unwrap(), - &accounts, - rpc, - ) - .await; - if let Some(payload) = payload { - (indices, Vec::new(), payload.to_string()) - } else { - (indices, Vec::new(), payload_legacy.unwrap().to_string()) - } - } - (None, Some(addresses)) => { - let (payload, payload_legacy, indices) = self - .process_non_inclusion_proofs( - address_merkle_tree_pubkeys.unwrap().as_slice(), - addresses, - rpc, - ) - .await; - let payload_string = if let Some(payload) = payload { - payload.to_string() - } else { - payload_legacy.unwrap().to_string() - }; - (Vec::::new(), indices, payload_string) - } - (Some(accounts), Some(addresses)) => { - let (inclusion_payload, inclusion_payload_legacy, inclusion_indices) = self - .process_inclusion_proofs( - &state_merkle_tree_pubkeys.unwrap(), - &accounts, - rpc, - ) - .await; - - let ( - non_inclusion_payload, - non_inclusion_payload_legacy, - non_inclusion_indices, - ) = self - .process_non_inclusion_proofs( - address_merkle_tree_pubkeys.unwrap().as_slice(), - addresses, - rpc, - ) - .await; - let json_payload = if let Some(non_inclusion_payload) = non_inclusion_payload { - let public_input_hash = BigInt::from_bytes_be( - num_bigint::Sign::Plus, - &create_hash_chain_from_slice(&[ - bigint_to_u8_32( - &string_to_big_int( - &inclusion_payload.as_ref().unwrap().public_input_hash, - ) - .unwrap(), - ) - .unwrap(), - bigint_to_u8_32( - &string_to_big_int(&non_inclusion_payload.public_input_hash) - .unwrap(), - ) - .unwrap(), - ]) - .unwrap(), - ); - println!( - "inclusion public input hash offchain {:?}", - bigint_to_u8_32( - &string_to_big_int( - &inclusion_payload.as_ref().unwrap().public_input_hash, - ) - .unwrap(), - ) - .unwrap() - ); - println!( - "non inclusion public input hash offchain {:?}", - bigint_to_u8_32( - &string_to_big_int(&non_inclusion_payload.public_input_hash) - .unwrap() - ) - .unwrap() - ); - - println!( - "public input hash offchain {:?}", - public_input_hash.to_bytes_be() - ); - - CombinedJsonStruct { - circuit_type: ProofType::Combined.to_string(), - state_tree_height: DEFAULT_BATCH_STATE_TREE_HEIGHT, - address_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, - public_input_hash: big_int_to_string(&public_input_hash), - inclusion: inclusion_payload.unwrap().inputs, - non_inclusion: non_inclusion_payload.inputs, - } - .to_string() - } else if let Some(non_inclusion_payload) = non_inclusion_payload_legacy { - CombinedJsonStructLegacy { - circuit_type: ProofType::Combined.to_string(), - state_tree_height: 26, - address_tree_height: 26, - inclusion: inclusion_payload_legacy.unwrap().inputs, - non_inclusion: non_inclusion_payload.inputs, - } - .to_string() - } else { - panic!("Unsupported tree height") - }; - (inclusion_indices, non_inclusion_indices, json_payload) - } - _ => { - panic!("At least one of compressed_accounts or new_addresses must be provided") - } - }; - - println!("json_payload {:?}", json_payload); - let mut retries = 3; - while retries > 0 { - let response_result = client - .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) - .header("Content-Type", "text/plain; charset=utf-8") - .body(json_payload.clone()) - .send() - .await - .expect("Failed to execute request."); - println!("response_result {:?}", response_result); - if response_result.status().is_success() { - let body = response_result.text().await.unwrap(); - println!("body {:?}", body); - println!("root_indices {:?}", root_indices); - println!("address_root_indices {:?}", address_root_indices); - let proof_json = deserialize_gnark_proof_json(&body).unwrap(); - let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); - let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); - let root_indices = root_indices.iter().map(|x| Some(*x)).collect(); - return ProofRpcResult { - root_indices, - address_root_indices: address_root_indices.clone(), - proof: CompressedProof { - a: proof_a, - b: proof_b, - c: proof_c, - }, - }; - } else { - warn!("Error: {}", response_result.text().await.unwrap()); - tokio::time::sleep(Duration::from_secs(1)).await; - retries -= 1; - } - } - panic!("Failed to get proof from server"); - } - - async fn create_proof_for_compressed_accounts2( - &mut self, - compressed_accounts: Option>, - state_merkle_tree_pubkeys: Option>, - new_addresses: Option<&[[u8; 32]]>, - address_merkle_tree_pubkeys: Option>, - rpc: &mut R, - ) -> BatchedTreeProofRpcResult { - let mut indices_to_remove = Vec::new(); - - // for all accounts in batched trees, check whether values are in tree or queue - let (compressed_accounts, state_merkle_tree_pubkeys) = - if let Some((compressed_accounts, state_merkle_tree_pubkeys)) = - compressed_accounts.zip(state_merkle_tree_pubkeys) - { - for (i, (compressed_account, state_merkle_tree_pubkey)) in compressed_accounts - .iter() - .zip(state_merkle_tree_pubkeys.iter()) - .enumerate() - { - let accounts = self.state_merkle_trees.iter().find(|x| { - x.accounts.merkle_tree == *state_merkle_tree_pubkey && x.version == 2 - }); - if let Some(accounts) = accounts { - let output_queue_pubkey = accounts.accounts.nullifier_queue; - let mut queue = - AccountZeroCopy::::new(rpc, output_queue_pubkey) - .await; - let queue_zero_copy = BatchedQueueAccount::output_queue_from_bytes_mut( - queue.account.data.as_mut_slice(), - ) - .unwrap(); - for value_array in queue_zero_copy.value_vecs.iter() { - let index = value_array.iter().position(|x| *x == *compressed_account); - if index.is_some() { - indices_to_remove.push(i); - } - } - } - } - let compress_accounts = compressed_accounts - .iter() - .enumerate() - .filter(|(i, _)| !indices_to_remove.contains(i)) - .map(|(_, x)| *x) - .collect::>(); - let state_merkle_tree_pubkeys = state_merkle_tree_pubkeys - .iter() - .enumerate() - .filter(|(i, _)| !indices_to_remove.contains(i)) - .map(|(_, x)| *x) - .collect::>(); - if compress_accounts.is_empty() { - (None, None) - } else { - (Some(compress_accounts), Some(state_merkle_tree_pubkeys)) - } - } else { - (None, None) - }; - let rpc_result = if (compressed_accounts.is_some() - && !compressed_accounts.as_ref().unwrap().is_empty()) - || address_merkle_tree_pubkeys.is_some() - { - Some( - self.create_proof_for_compressed_accounts( - compressed_accounts, - state_merkle_tree_pubkeys, - new_addresses, - address_merkle_tree_pubkeys, - rpc, - ) - .await, - ) - } else { - None - }; - let address_root_indices = if let Some(rpc_result) = rpc_result.as_ref() { - rpc_result.address_root_indices.clone() - } else { - Vec::new() - }; - let root_indices = { - let mut root_indices = if let Some(rpc_result) = rpc_result.as_ref() { - rpc_result.root_indices.clone() - } else { - Vec::new() - }; - for index in indices_to_remove { - root_indices.insert(index, None); - } - root_indices - }; - BatchedTreeProofRpcResult { - proof: rpc_result.map(|x| x.proof), - root_indices, - address_root_indices, - } - } - - fn add_address_merkle_tree_accounts( - &mut self, - merkle_tree_keypair: &Keypair, - queue_keypair: &Keypair, - _owning_program_id: Option, - ) -> AddressMerkleTreeAccounts { - info!("Adding address merkle tree accounts..."); - let address_merkle_tree_accounts = AddressMerkleTreeAccounts { - merkle_tree: merkle_tree_keypair.pubkey(), - queue: queue_keypair.pubkey(), - }; - self.address_merkle_trees - .push(Self::add_address_merkle_tree_bundle( - address_merkle_tree_accounts, - )); - info!( - "Address merkle tree accounts added. Total: {}", - self.address_merkle_trees.len() - ); - address_merkle_tree_accounts - } - - /// returns compressed_accounts with the owner pubkey - /// does not return token accounts. - fn get_compressed_accounts_by_owner( - &self, - owner: &Pubkey, - ) -> Vec { - self.compressed_accounts - .iter() - .filter(|x| x.compressed_account.owner == *owner) - .cloned() - .collect() - } - - fn get_compressed_token_accounts_by_owner(&self, owner: &Pubkey) -> Vec { - self.token_compressed_accounts - .iter() - .filter(|x| x.token_data.owner == *owner) - .cloned() - .collect() - } - - fn add_state_bundle(&mut self, state_bundle: StateMerkleTreeBundle) { - self.get_state_merkle_trees_mut().push(state_bundle); - } - - async fn update_test_indexer_after_append( - &mut self, - rpc: &mut R, - merkle_tree_pubkey: Pubkey, - output_queue_pubkey: Pubkey, - num_inserted_zkps: u64, - ) { - let mut state_merkle_tree_bundle = self - .state_merkle_trees - .iter_mut() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - .unwrap(); - - let (merkle_tree_next_index, root) = { - let mut merkle_tree_account = - rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); - let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( - merkle_tree_account.data.as_mut_slice(), - ) - .unwrap(); - ( - merkle_tree.get_metadata().next_index as usize, - *merkle_tree.root_history.last().unwrap(), - ) - }; - - let (max_num_zkp_updates, zkp_batch_size) = { - let mut output_queue_account = - rpc.get_account(output_queue_pubkey).await.unwrap().unwrap(); - let output_queue = BatchedQueueAccount::output_queue_from_bytes_mut( - output_queue_account.data.as_mut_slice(), - ) - .unwrap(); - - let output_queue_account = output_queue.get_metadata(); - let max_num_zkp_updates = output_queue_account.batch_metadata.get_num_zkp_batches(); - let zkp_batch_size = output_queue_account.batch_metadata.zkp_batch_size; - (max_num_zkp_updates, zkp_batch_size) - }; - - let leaves = state_merkle_tree_bundle.output_queue_elements.to_vec(); - - let start = (num_inserted_zkps as usize) * zkp_batch_size as usize; - let end = start + zkp_batch_size as usize; - let batch_update_leaves = leaves[start..end].to_vec(); - - for (i, _) in batch_update_leaves.iter().enumerate() { - // if leaves[i] == [0u8; 32] { - let index = merkle_tree_next_index + i - zkp_batch_size as usize; - // This is dangerous it should call self.get_leaf_by_index() but it - // can t for mutable borrow - // TODO: call a get_leaf_by_index equivalent, we could move the method to the reference merkle tree - let leaf = state_merkle_tree_bundle - .merkle_tree - .get_leaf(index) - .unwrap(); - if leaf == [0u8; 32] { - state_merkle_tree_bundle - .merkle_tree - .update(&batch_update_leaves[i], index) - .unwrap(); - } - } - assert_eq!( - root, - state_merkle_tree_bundle.merkle_tree.root(), - "update indexer after append root invalid" - ); - - let num_inserted_zkps = num_inserted_zkps + 1; - // check can we get rid of this and use the data from the merkle tree - if num_inserted_zkps == max_num_zkp_updates { - for _ in 0..zkp_batch_size * max_num_zkp_updates { - state_merkle_tree_bundle.output_queue_elements.remove(0); - } - } - } - - async fn update_test_indexer_after_nullification( - &mut self, - rpc: &mut R, - merkle_tree_pubkey: Pubkey, - batch_index: usize, - ) { - let state_merkle_tree_bundle = self - .state_merkle_trees - .iter_mut() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - .unwrap(); - - let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); - let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( - merkle_tree_account.data.as_mut_slice(), - ) - .unwrap(); - - let batch = &merkle_tree.batches[batch_index]; - if batch.get_state() == BatchState::Inserted || batch.get_state() == BatchState::Full { - let batch_size = batch.zkp_batch_size; - let leaf_indices_tx_hashes = - state_merkle_tree_bundle.input_leaf_indices[..batch_size as usize].to_vec(); - for (index, leaf, tx_hash) in leaf_indices_tx_hashes.iter() { - let index = *index as usize; - let leaf = *leaf; - let index_bytes = index.to_be_bytes(); - - let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, tx_hash]).unwrap(); - - state_merkle_tree_bundle.input_leaf_indices.remove(0); - state_merkle_tree_bundle - .merkle_tree - .update(&nullifier, index) - .unwrap(); - } - } - } - - async fn finalize_batched_address_tree_update( - &mut self, - rpc: &mut R, - merkle_tree_pubkey: Pubkey, - ) { - let mut account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); - let onchain_account = - BatchedMerkleTreeAccount::address_tree_from_bytes_mut(account.data.as_mut_slice()) - .unwrap(); - let address_tree = self - .address_merkle_trees - .iter_mut() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - .unwrap(); - let address_tree_index = address_tree.merkle_tree.merkle_tree.rightmost_index; - let onchain_next_index = onchain_account.get_metadata().next_index; - let diff_onchain_indexer = onchain_next_index - address_tree_index as u64; - let addresses = address_tree.queue_elements[0..diff_onchain_indexer as usize].to_vec(); - - for _ in 0..diff_onchain_indexer { - address_tree.queue_elements.remove(0); - } - for new_element_value in &addresses { - address_tree - .merkle_tree - .append( - &BigUint::from_bytes_be(new_element_value), - &mut address_tree.indexed_array, - ) - .unwrap(); - } - - let onchain_root = onchain_account.root_history.last().unwrap(); - let new_root = address_tree.merkle_tree.root(); - assert_eq!(*onchain_root, new_root); - println!("finalized batched address tree update"); - } -} - -impl TestIndexer { - async fn _get_multiple_new_address_proofs( - &self, - merkle_tree_pubkey: [u8; 32], - addresses: Vec<[u8; 32]>, - full: bool, - ) -> Result>, IndexerError> { - let mut proofs: Vec> = Vec::new(); - - for address in addresses.iter() { - info!("Getting new address proof for {:?}", address); - let pubkey = Pubkey::from(merkle_tree_pubkey); - let address_tree_bundle = self - .address_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == pubkey) - .unwrap(); - - let address_biguint = BigUint::from_bytes_be(address.as_slice()); - let (old_low_address, _old_low_address_next_value) = address_tree_bundle - .indexed_array - .find_low_element_for_nonexistent(&address_biguint) - .unwrap(); - let address_bundle = address_tree_bundle - .indexed_array - .new_element_with_low_element_index(old_low_address.index, &address_biguint) - .unwrap(); - - let (old_low_address, old_low_address_next_value) = address_tree_bundle - .indexed_array - .find_low_element_for_nonexistent(&address_biguint) - .unwrap(); - - // Get the Merkle proof for updating low element. - let low_address_proof = address_tree_bundle - .merkle_tree - .get_proof_of_leaf(old_low_address.index, full) - .unwrap(); - - let low_address_index: u64 = old_low_address.index as u64; - let low_address_value: [u8; 32] = - bigint_to_be_bytes_array(&old_low_address.value).unwrap(); - let low_address_next_index: u64 = old_low_address.next_index as u64; - let low_address_next_value: [u8; 32] = - bigint_to_be_bytes_array(&old_low_address_next_value).unwrap(); - let low_address_proof: [[u8; 32]; NET_HEIGHT] = low_address_proof.to_array().unwrap(); - let proof = NewAddressProofWithContext:: { - merkle_tree: merkle_tree_pubkey, - low_address_index, - low_address_value, - low_address_next_index, - low_address_next_value, - low_address_proof, - root: address_tree_bundle.merkle_tree.root(), - root_seq: address_tree_bundle.merkle_tree.merkle_tree.sequence_number as u64, - new_low_element: Some(address_bundle.new_low_element), - new_element: Some(address_bundle.new_element), - new_element_next_value: Some(address_bundle.new_element_next_value), - }; - proofs.push(proof); - } - Ok(proofs) - } - - fn count_matching_hashes(&self, query_hashes: &[String]) -> usize { - self.nullified_compressed_accounts - .iter() - .map(|account| self.compute_hash(account)) - .filter(|bs58_hash| query_hashes.contains(bs58_hash)) - .count() - } - - fn compute_hash(&self, account: &CompressedAccountWithMerkleContext) -> String { - // replace AccountType with actual type - let hash = account - .compressed_account - .hash::( - &account.merkle_context.merkle_tree_pubkey, - &account.merkle_context.leaf_index, - ) - .unwrap(); - bs58::encode(hash).into_string() - } - - pub async fn init_from_env( - payer: &Keypair, - env: &EnvAccounts, - prover_config: Option, - ) -> Self { - Self::new( - vec![ - StateMerkleTreeAccounts { - merkle_tree: env.merkle_tree_pubkey, - nullifier_queue: env.nullifier_queue_pubkey, - cpi_context: env.cpi_context_account_pubkey, - }, - StateMerkleTreeAccounts { - merkle_tree: env.batched_state_merkle_tree, - nullifier_queue: env.batched_output_queue, - cpi_context: env.batched_cpi_context, - }, - ], - vec![ - AddressMerkleTreeAccounts { - merkle_tree: env.address_merkle_tree_pubkey, - queue: env.address_merkle_tree_queue_pubkey, - }, - AddressMerkleTreeAccounts { - merkle_tree: env.batch_address_merkle_tree, - queue: env.batch_address_merkle_tree, - }, - ], - payer.insecure_clone(), - env.group_pda, - prover_config, - ) - .await - } - - pub async fn new( - state_merkle_tree_accounts: Vec, - address_merkle_tree_accounts: Vec, - payer: Keypair, - group_pda: Pubkey, - prover_config: Option, - ) -> Self { - if let Some(ref prover_config) = prover_config { - // TODO: remove restart input and check whether prover is already - // running with correct config - spawn_prover(true, prover_config.clone()).await; - } - let mut state_merkle_trees = Vec::new(); - for state_merkle_tree_account in state_merkle_tree_accounts.iter() { - let test_batched_output_queue = - Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap(); - let (version, merkle_tree) = if state_merkle_tree_account.nullifier_queue - == test_batched_output_queue.pubkey() - { - let merkle_tree = Box::new(MerkleTree::::new( - DEFAULT_BATCH_STATE_TREE_HEIGHT as usize, - 0, - )); - (2, merkle_tree) - } else { - let merkle_tree = Box::new(MerkleTree::::new( - STATE_MERKLE_TREE_HEIGHT as usize, - STATE_MERKLE_TREE_CANOPY_DEPTH as usize, - )); - (1, merkle_tree) - }; - - state_merkle_trees.push(StateMerkleTreeBundle { - accounts: *state_merkle_tree_account, - merkle_tree, - rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64, - version, - output_queue_elements: vec![], - input_leaf_indices: vec![], - }); - } - - let mut address_merkle_trees = Vec::new(); - for address_merkle_tree_account in address_merkle_tree_accounts { - address_merkle_trees.push(Self::add_address_merkle_tree_bundle( - address_merkle_tree_account, - )); - } - - Self { - state_merkle_trees, - address_merkle_trees, - payer, - compressed_accounts: vec![], - nullified_compressed_accounts: vec![], - events: vec![], - token_compressed_accounts: vec![], - token_nullified_compressed_accounts: vec![], - prover_config, - phantom: Default::default(), - group_pda, - } - } - - pub fn add_address_merkle_tree_bundle( - address_merkle_tree_accounts: AddressMerkleTreeAccounts, - // TODO: add config here - ) -> AddressMerkleTreeBundle { - let (height, canopy) = - if address_merkle_tree_accounts.merkle_tree == address_merkle_tree_accounts.queue { - (40, 0) - } else { - (26, STATE_MERKLE_TREE_CANOPY_DEPTH as usize) - }; - let mut merkle_tree = - Box::new(IndexedMerkleTree::::new(height, canopy).unwrap()); - merkle_tree.init().unwrap(); - let mut indexed_array = Box::>::default(); - indexed_array.init().unwrap(); - AddressMerkleTreeBundle { - merkle_tree, - indexed_array, - accounts: address_merkle_tree_accounts, - rollover_fee: FeeConfig::default().address_queue_rollover as i64, - queue_elements: vec![], - } - } - - async fn add_address_merkle_tree_v1( - &mut self, - rpc: &mut R, - merkle_tree_keypair: &Keypair, - queue_keypair: &Keypair, - owning_program_id: Option, - ) -> AddressMerkleTreeAccounts { - create_address_merkle_tree_and_queue_account_with_assert( - &self.payer, - true, - rpc, - merkle_tree_keypair, - queue_keypair, - owning_program_id, - None, - &AddressMerkleTreeConfig::default(), - &AddressQueueConfig::default(), - 0, - ) - .await - .unwrap(); - self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) - } - - async fn add_address_merkle_tree_v2( - &mut self, - rpc: &mut R, - merkle_tree_keypair: &Keypair, - queue_keypair: &Keypair, - owning_program_id: Option, - ) -> AddressMerkleTreeAccounts { - info!( - "Adding address merkle tree accounts v2 {:?}", - merkle_tree_keypair.pubkey() - ); - - let params = InitAddressTreeAccountsInstructionData::test_default(); - - info!( - "Creating batched address merkle tree {:?}", - merkle_tree_keypair.pubkey() - ); - create_batch_address_merkle_tree(rpc, &self.payer, merkle_tree_keypair, params) - .await - .unwrap(); - info!( - "Batched address merkle tree created {:?}", - merkle_tree_keypair.pubkey() - ); - - self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) - } - - pub async fn add_address_merkle_tree( - &mut self, - rpc: &mut R, - merkle_tree_keypair: &Keypair, - queue_keypair: &Keypair, - owning_program_id: Option, - version: u64, - ) -> AddressMerkleTreeAccounts { - if version == 1 { - self.add_address_merkle_tree_v1( - rpc, - merkle_tree_keypair, - queue_keypair, - owning_program_id, - ) - .await - } else if version == 2 { - self.add_address_merkle_tree_v2( - rpc, - merkle_tree_keypair, - queue_keypair, - owning_program_id, - ) - .await - } else { - panic!( - "add_address_merkle_tree: Version not supported, {}. Versions: 1, 2", - version - ) - } - } - - #[allow(clippy::too_many_arguments)] - pub async fn add_state_merkle_tree( - &mut self, - rpc: &mut R, - merkle_tree_keypair: &Keypair, - queue_keypair: &Keypair, - cpi_context_keypair: &Keypair, - owning_program_id: Option, - forester: Option, - version: u64, - ) { - let (rollover_fee, merkle_tree) = match version { - 1 => { - create_state_merkle_tree_and_queue_account( - &self.payer, - true, - rpc, - merkle_tree_keypair, - queue_keypair, - Some(cpi_context_keypair), - owning_program_id, - forester, - self.state_merkle_trees.len() as u64, - &StateMerkleTreeConfig::default(), - &NullifierQueueConfig::default(), - ) - .await - .unwrap(); - let merkle_tree = Box::new(MerkleTree::::new( - STATE_MERKLE_TREE_HEIGHT as usize, - STATE_MERKLE_TREE_CANOPY_DEPTH as usize, - )); - (FeeConfig::default().state_merkle_tree_rollover as i64,merkle_tree) - } - 2 => { - let params = InitStateTreeAccountsInstructionData::test_default(); - - create_batched_state_merkle_tree( - &self.payer, - true, - rpc, - merkle_tree_keypair, - queue_keypair, - cpi_context_keypair, - params, - ).await; - let merkle_tree = Box::new(MerkleTree::::new( - DEFAULT_BATCH_STATE_TREE_HEIGHT as usize, - 0 - )); - (FeeConfig::test_batched().state_merkle_tree_rollover as i64,merkle_tree) - } - _ => panic!( - "add_state_merkle_tree: Version not supported, {}. Versions: 1 concurrent, 2 batched", - version - ), - }; - let state_merkle_tree_account = StateMerkleTreeAccounts { - merkle_tree: merkle_tree_keypair.pubkey(), - nullifier_queue: queue_keypair.pubkey(), - cpi_context: cpi_context_keypair.pubkey(), - }; - - self.state_merkle_trees.push(StateMerkleTreeBundle { - merkle_tree, - accounts: state_merkle_tree_account, - rollover_fee, - version, - output_queue_elements: vec![], - input_leaf_indices: vec![], - }); - } - - async fn process_inclusion_proofs( - &self, - merkle_tree_pubkeys: &[Pubkey], - accounts: &[[u8; 32]], - rpc: &mut R, - ) -> ( - Option, - Option, - Vec, - ) { - let mut inclusion_proofs = Vec::new(); - let mut root_indices = Vec::new(); - let mut height = 0; - - // Collect all proofs first before any await points - let proof_data: Vec<_> = accounts - .iter() - .zip(merkle_tree_pubkeys.iter()) - .map(|(account, &pubkey)| { - let bundle = &self - .state_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == pubkey) - .unwrap(); - let merkle_tree = &bundle.merkle_tree; - let leaf_index = merkle_tree.get_leaf_index(account).unwrap(); - let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap(); - - // Convert proof to owned data that implements Send - let proof: Vec = proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(); - - if height == 0 { - height = merkle_tree.height; - } else { - assert_eq!(height, merkle_tree.height); - } - - ( - bundle.version, - pubkey, - leaf_index, - proof, - merkle_tree.root(), - ) - }) - .collect(); - - // Now handle the async operations with the collected data - for (i, (version, pubkey, leaf_index, proof, merkle_root)) in - proof_data.into_iter().enumerate() - { - inclusion_proofs.push(InclusionMerkleProofInputs { - root: BigInt::from_be_bytes(merkle_root.as_slice()), - leaf: BigInt::from_be_bytes(&accounts[i]), - path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()), - path_elements: proof, - }); - - let (root_index, root) = if version == 1 { - let fetched_merkle_tree = unsafe { - get_concurrent_merkle_tree::( - rpc, pubkey, - ) - .await - }; - ( - fetched_merkle_tree.root_index() as u32, - fetched_merkle_tree.root(), - ) - } else { - let mut merkle_tree_account = rpc.get_account(pubkey).await.unwrap().unwrap(); - let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( - merkle_tree_account.data.as_mut_slice(), - ) - .unwrap(); - ( - merkle_tree.get_root_index(), - merkle_tree.get_root().unwrap(), - ) - }; - - assert_eq!(merkle_root, root, "Merkle tree root mismatch"); - root_indices.push(root_index as u16); - } - - let (batch_inclusion_proof_inputs, legacy) = if height - == DEFAULT_BATCH_STATE_TREE_HEIGHT as usize - { - let inclusion_proof_inputs = - InclusionProofInputs::new(inclusion_proofs.as_slice()).unwrap(); - ( - Some(BatchInclusionJsonStruct::from_inclusion_proof_inputs( - &inclusion_proof_inputs, - )), - None, - ) - } else if height == STATE_MERKLE_TREE_HEIGHT as usize { - let inclusion_proof_inputs = InclusionProofInputsLegacy(inclusion_proofs.as_slice()); - ( - None, - Some(BatchInclusionJsonStructLegacy::from_inclusion_proof_inputs( - &inclusion_proof_inputs, - )), - ) - } else { - panic!("Unsupported tree height") - }; - - (batch_inclusion_proof_inputs, legacy, root_indices) - } - - pub async fn process_non_inclusion_proofs( - &self, - address_merkle_tree_pubkeys: &[Pubkey], - addresses: &[[u8; 32]], - rpc: &mut R, - ) -> ( - Option, - Option, - Vec, - ) { - let mut non_inclusion_proofs = Vec::new(); - let mut address_root_indices = Vec::new(); - let mut tree_heights = Vec::new(); - for tree in self.address_merkle_trees.iter() { - println!("height {:?}", tree.merkle_tree.merkle_tree.height); - println!("accounts {:?}", tree.accounts); - } - println!("process_non_inclusion_proofs: addresses {:?}", addresses); - for (i, address) in addresses.iter().enumerate() { - let address_tree = &self - .address_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i]) - .unwrap(); - tree_heights.push(address_tree.merkle_tree.merkle_tree.height); - - let proof_inputs = get_non_inclusion_proof_inputs( - address, - &address_tree.merkle_tree, - &address_tree.indexed_array, - ); - non_inclusion_proofs.push(proof_inputs); - - // We don't have address queues in v2 (batch) address Merkle trees - // hence both accounts in this struct are the same. - let is_v2 = address_tree.accounts.merkle_tree == address_tree.accounts.queue; - println!("is v2 {:?}", is_v2); - println!( - "address_merkle_tree_pubkeys[i] {:?}", - address_merkle_tree_pubkeys[i] - ); - println!("address_tree.accounts {:?}", address_tree.accounts); - if is_v2 { - let account = rpc - .get_account(address_merkle_tree_pubkeys[i]) - .await - .unwrap(); - if let Some(mut account) = account { - let account = BatchedMerkleTreeAccount::address_tree_from_bytes_mut( - account.data.as_mut_slice(), - ) - .unwrap(); - address_root_indices.push(account.get_root_index() as u16); - } else { - panic!( - "TestIndexer.process_non_inclusion_proofs(): Address tree account not found." - ); - } - } else { - let fetched_address_merkle_tree = unsafe { - get_indexed_merkle_tree::( - rpc, - address_merkle_tree_pubkeys[i], - ) - .await - }; - address_root_indices.push(fetched_address_merkle_tree.root_index() as u16); - } - } - // if tree heights are not the same, panic - if tree_heights.iter().any(|&x| x != tree_heights[0]) { - panic!( - "All address merkle trees must have the same height {:?}", - tree_heights - ); - } - let (batch_non_inclusion_proof_inputs, batch_non_inclusion_proof_inputs_legacy) = - if tree_heights[0] == 26 { - let non_inclusion_proof_inputs = - NonInclusionProofInputsLegacy::new(non_inclusion_proofs.as_slice()); - ( - None, - Some( - BatchNonInclusionJsonStructLegacy::from_non_inclusion_proof_inputs( - &non_inclusion_proof_inputs, - ), - ), - ) - } else if tree_heights[0] == 40 { - let non_inclusion_proof_inputs = - NonInclusionProofInputs::new(non_inclusion_proofs.as_slice()).unwrap(); - ( - Some( - BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs( - &non_inclusion_proof_inputs, - ), - ), - None, - ) - } else { - panic!("Unsupported tree height") - }; - ( - batch_non_inclusion_proof_inputs, - batch_non_inclusion_proof_inputs_legacy, - address_root_indices, - ) - } - - /// deserializes an event - /// adds the output_compressed_accounts to the compressed_accounts - /// removes the input_compressed_accounts from the compressed_accounts - /// adds the input_compressed_accounts to the nullified_compressed_accounts - pub fn add_lamport_compressed_accounts(&mut self, slot: u64, event_bytes: Vec) { - let event_bytes = event_bytes.clone(); - let event = PublicTransactionEvent::deserialize(&mut event_bytes.as_slice()).unwrap(); - self.add_event_and_compressed_accounts(slot, &event); - } - - /// deserializes an event - /// adds the output_compressed_accounts to the compressed_accounts - /// removes the input_compressed_accounts from the compressed_accounts - /// adds the input_compressed_accounts to the nullified_compressed_accounts - /// deserialiazes token data from the output_compressed_accounts - /// adds the token_compressed_accounts to the token_compressed_accounts - pub fn add_compressed_accounts_with_token_data( - &mut self, - slot: u64, - event: &PublicTransactionEvent, - ) { - self.add_event_and_compressed_accounts(slot, event); - } - - /// returns the compressed sol balance of the owner pubkey - pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 { - self.compressed_accounts - .iter() - .filter(|x| x.compressed_account.owner == *owner) - .map(|x| x.compressed_account.lamports) - .sum() - } - - /// returns the compressed token balance of the owner pubkey for a token by mint - pub fn get_compressed_token_balance(&self, owner: &Pubkey, mint: &Pubkey) -> u64 { - self.token_compressed_accounts - .iter() - .filter(|x| { - x.compressed_account.compressed_account.owner == *owner - && x.token_data.mint == *mint - }) - .map(|x| x.token_data.amount) - .sum() - } - - fn process_v1_compressed_account( - &mut self, - slot: u64, - event: &PublicTransactionEvent, - i: usize, - token_compressed_accounts: &mut Vec, - compressed_accounts: &mut Vec, - ) { - let mut input_addresses = vec![]; - if event.input_compressed_account_hashes.len() > i { - let tx_hash: [u8; 32] = create_tx_hash( - &event.input_compressed_account_hashes, - &event.output_compressed_account_hashes, - slot, - ) - .unwrap(); - println!("tx_hash {:?}", tx_hash); - println!("slot {:?}", slot); - let hash = event.input_compressed_account_hashes[i]; - let index = self.compressed_accounts.iter().position(|x| { - x.compressed_account - .hash::( - &x.merkle_context.merkle_tree_pubkey, - &x.merkle_context.leaf_index, - ) - .unwrap() - == hash - }); - let (leaf_index, merkle_tree_pubkey) = if let Some(index) = index { - self.nullified_compressed_accounts - .push(self.compressed_accounts[index].clone()); - let leaf_index = self.compressed_accounts[index].merkle_context.leaf_index; - let merkle_tree_pubkey = self.compressed_accounts[index] - .merkle_context - .merkle_tree_pubkey; - if let Some(address) = self.compressed_accounts[index].compressed_account.address { - input_addresses.push(address); - } - self.compressed_accounts.remove(index); - (leaf_index, merkle_tree_pubkey) - } else { - let index = self - .token_compressed_accounts - .iter() - .position(|x| { - x.compressed_account - .compressed_account - .hash::( - &x.compressed_account.merkle_context.merkle_tree_pubkey, - &x.compressed_account.merkle_context.leaf_index, - ) - .unwrap() - == hash - }) - .expect("input compressed account not found"); - self.token_nullified_compressed_accounts - .push(self.token_compressed_accounts[index].clone()); - let leaf_index = self.token_compressed_accounts[index] - .compressed_account - .merkle_context - .leaf_index; - let merkle_tree_pubkey = self.token_compressed_accounts[index] - .compressed_account - .merkle_context - .merkle_tree_pubkey; - self.token_compressed_accounts.remove(index); - (leaf_index, merkle_tree_pubkey) - }; - let bundle = &mut self - .get_state_merkle_trees_mut() - .iter_mut() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - .unwrap(); - // Store leaf indices of input accounts for batched trees - if bundle.version == 2 { - let leaf_hash = event.input_compressed_account_hashes[i]; - bundle - .input_leaf_indices - .push((leaf_index, leaf_hash, tx_hash)); - } - } - let mut new_addresses = vec![]; - if event.output_compressed_accounts.len() > i { - let compressed_account = &event.output_compressed_accounts[i]; - println!("output compressed account {:?}", compressed_account); - if let Some(address) = compressed_account.compressed_account.address { - if !input_addresses.iter().any(|x| x == &address) { - new_addresses.push(address); - } - } - - let merkle_tree = self.state_merkle_trees.iter().find(|x| { - x.accounts.merkle_tree - == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] - }); - // Check for output queue - let merkle_tree = if let Some(merkle_tree) = merkle_tree { - merkle_tree - } else { - self.state_merkle_trees - .iter() - .find(|x| { - x.accounts.nullifier_queue - == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] - }) - .unwrap() - }; - println!("found merkle tree {:?}", merkle_tree.accounts.merkle_tree); - let nullifier_queue_pubkey = merkle_tree.accounts.nullifier_queue; - let merkle_tree_pubkey = merkle_tree.accounts.merkle_tree; - // if data is some, try to deserialize token data, if it fails, add to compressed_accounts - // if data is none add to compressed_accounts - // new accounts are inserted in front so that the newest accounts are found first - match compressed_account.compressed_account.data.as_ref() { - Some(data) => { - if compressed_account.compressed_account.owner == light_compressed_token::ID - && data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR - { - if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { - let token_account = TokenDataWithContext { - token_data, - compressed_account: CompressedAccountWithMerkleContext { - compressed_account: compressed_account - .compressed_account - .clone(), - merkle_context: MerkleContext { - leaf_index: event.output_leaf_indices[i], - merkle_tree_pubkey, - nullifier_queue_pubkey, - queue_index: None, - }, - }, - }; - token_compressed_accounts.push(token_account.clone()); - self.token_compressed_accounts.insert(0, token_account); - } - } else { - let compressed_account = CompressedAccountWithMerkleContext { - compressed_account: compressed_account.compressed_account.clone(), - merkle_context: MerkleContext { - leaf_index: event.output_leaf_indices[i], - merkle_tree_pubkey, - nullifier_queue_pubkey, - queue_index: None, - }, - }; - compressed_accounts.push(compressed_account.clone()); - self.compressed_accounts.insert(0, compressed_account); - } - } - None => { - let compressed_account = CompressedAccountWithMerkleContext { - compressed_account: compressed_account.compressed_account.clone(), - merkle_context: MerkleContext { - leaf_index: event.output_leaf_indices[i], - merkle_tree_pubkey, - nullifier_queue_pubkey, - queue_index: None, - }, - }; - compressed_accounts.push(compressed_account.clone()); - self.compressed_accounts.insert(0, compressed_account); - } - }; - let seq = event - .sequence_numbers - .iter() - .find(|x| x.pubkey == merkle_tree_pubkey); - let seq = if let Some(seq) = seq { - seq - } else { - event - .sequence_numbers - .iter() - .find(|x| x.pubkey == nullifier_queue_pubkey) - .unwrap() - }; - let is_batched = seq.seq == u64::MAX; - - println!("Output is batched {:?}", is_batched); - if !is_batched { - let merkle_tree = &mut self - .state_merkle_trees - .iter_mut() - .find(|x| { - x.accounts.merkle_tree - == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] - }) - .unwrap(); - merkle_tree - .merkle_tree - .append( - &compressed_account - .compressed_account - .hash::( - &event.pubkey_array[event.output_compressed_accounts[i] - .merkle_tree_index - as usize], - &event.output_leaf_indices[i], - ) - .unwrap(), - ) - .expect("insert failed"); - } else { - let merkle_tree = &mut self - .state_merkle_trees - .iter_mut() - .find(|x| { - x.accounts.nullifier_queue - == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] - }) - .unwrap(); - - merkle_tree - .output_queue_elements - .push(event.output_compressed_account_hashes[i]); - } - } - println!("new addresses {:?}", new_addresses); - println!("event.pubkey_array {:?}", event.pubkey_array); - println!( - "address merkle trees {:?}", - self.address_merkle_trees - .iter() - .map(|x| x.accounts.merkle_tree) - .collect::>() - ); - // checks whether there are addresses in outputs which don't exist in inputs. - // if so check pubkey_array for the first address Merkle tree and append to the bundles queue elements. - // Note: - // - creating addresses in multiple address Merkle trees in one tx is not supported - // TODO: reimplement this is not a good solution - // - take addresses and address Merkle tree pubkeys from cpi to account compression program - if !new_addresses.is_empty() { - for pubkey in event.pubkey_array.iter() { - if let Some((_, address_merkle_tree)) = self - .address_merkle_trees - .iter_mut() - .enumerate() - .find(|(i, x)| x.accounts.merkle_tree == *pubkey) - { - address_merkle_tree - .queue_elements - .append(&mut new_addresses); - } - } - } - } - - pub(crate) fn get_address_merkle_tree( - &self, - merkle_tree_pubkey: Pubkey, - ) -> Option<&AddressMerkleTreeBundle> { - self.address_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - } -} +// use std::{ +// future::Future, +// marker::PhantomData, +// sync::{Arc, Mutex}, +// time::Duration, +// }; +// +// use account_compression::{ +// utils::constants::{STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT}, +// AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, +// StateMerkleTreeAccount, StateMerkleTreeConfig, +// }; +// use anchor_lang::AnchorDeserialize; +// use async_trait::async_trait; +// use forester_utils::{ +// get_concurrent_merkle_tree, get_indexed_merkle_tree, +// indexer::{ +// AddressMerkleTreeAccounts, AddressMerkleTreeBundle, BatchedTreeProofRpcResult, Indexer, +// IndexerError, MerkleProof, NewAddressProofWithContext, ProofOfLeaf, ProofRpcResult, +// StateMerkleTreeAccounts, StateMerkleTreeBundle, TokenDataWithContext, +// }, +// AccountZeroCopy, +// }; +// use light_batched_merkle_tree::{ +// batch::BatchState, +// constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, +// initialize_address_tree::InitAddressTreeAccountsInstructionData, +// initialize_state_tree::InitStateTreeAccountsInstructionData, +// merkle_tree::BatchedMerkleTreeAccount, +// queue::{BatchedQueueAccount, BatchedQueueMetadata}, +// }; +// use light_client::{ +// rpc::{RpcConnection, RpcError}, +// transaction_params::FeeConfig, +// }; +// use light_compressed_token::{ +// constants::TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR, get_token_pool_pda, +// mint_sdk::create_create_token_pool_instruction, TokenData, +// }; +// use light_hasher::{Hasher, Poseidon}; +// use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}; +// use light_macros::pubkey; +// use light_merkle_tree_reference::MerkleTree; +// use light_program_test::{ +// test_batch_forester::{create_batch_address_merkle_tree, create_batched_state_merkle_tree}, +// test_env::{ +// create_address_merkle_tree_and_queue_account, create_state_merkle_tree_and_queue_account, +// EnvAccounts, BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR, +// }, +// }; +// use light_prover_client::{ +// gnark::{ +// batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson, +// combined_json_formatter::CombinedJsonStruct, +// combined_json_formatter_legacy::CombinedJsonStruct as CombinedJsonStructLegacy, +// constants::{PROVE_PATH, SERVER_ADDRESS}, +// helpers::{ +// big_int_to_string, spawn_prover, string_to_big_int, ProofType, ProverConfig, ProverMode, +// }, +// inclusion_json_formatter::BatchInclusionJsonStruct, +// inclusion_json_formatter_legacy::BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy, +// non_inclusion_json_formatter::BatchNonInclusionJsonStruct, +// non_inclusion_json_formatter_legacy::BatchNonInclusionJsonStruct as BatchNonInclusionJsonStructLegacy, +// proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, +// }, +// helpers::bigint_to_u8_32, +// inclusion::merkle_inclusion_proof_inputs::{InclusionMerkleProofInputs, InclusionProofInputs}, +// inclusion_legacy::merkle_inclusion_proof_inputs::InclusionProofInputs as InclusionProofInputsLegacy, +// non_inclusion::merkle_non_inclusion_proof_inputs::{ +// get_non_inclusion_proof_inputs, NonInclusionProofInputs, +// }, +// non_inclusion_legacy::merkle_non_inclusion_proof_inputs::NonInclusionProofInputs as NonInclusionProofInputsLegacy, +// }; +// use light_system_program::{ +// invoke::processor::CompressedProof, +// sdk::{ +// compressed_account::{CompressedAccountWithMerkleContext, MerkleContext, QueueIndex}, +// event::PublicTransactionEvent, +// }, +// }; +// use light_utils::{ +// bigint::bigint_to_be_bytes_array, +// hashchain::{create_hash_chain_from_slice, create_tx_hash}, +// }; +// use log::{debug, info, warn}; +// use num_bigint::{BigInt, BigUint}; +// use num_traits::ops::bytes::FromBytes; +// use reqwest::Client; +// use solana_sdk::{ +// bs58, instruction::Instruction, program_pack::Pack, pubkey::Pubkey, signature::Keypair, +// signer::Signer, +// }; +// use spl_token::instruction::initialize_mint; +// +// use crate::{ +// create_address_merkle_tree_and_queue_account_with_assert, e2e_test_env::KeypairActionConfig, +// spl::create_initialize_mint_instructions, +// }; +// +// #[derive(Debug)] +// pub struct TestIndexer { +// pub state_merkle_trees: Vec, +// pub address_merkle_trees: Vec, +// pub payer: Keypair, +// pub group_pda: Pubkey, +// pub compressed_accounts: Vec, +// pub nullified_compressed_accounts: Vec, +// pub token_compressed_accounts: Vec, +// pub token_nullified_compressed_accounts: Vec, +// pub events: Vec, +// pub prover_config: Option, +// phantom: PhantomData, +// } +// +// #[async_trait] +// impl Indexer for TestIndexer { +// async fn get_queue_elements( +// &self, +// pubkey: [u8; 32], +// _batch: u64, +// start_offset: u64, +// end_offset: u64, +// ) -> Result, IndexerError> { +// let pubkey = Pubkey::new_from_array(pubkey); +// let address_tree_bundle = self +// .address_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == pubkey); +// if let Some(address_tree_bundle) = address_tree_bundle { +// return Ok(address_tree_bundle.queue_elements +// [start_offset as usize..end_offset as usize] +// .to_vec()); +// } +// let state_tree_bundle = self +// .state_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == pubkey); +// if let Some(state_tree_bundle) = state_tree_bundle { +// return Ok(state_tree_bundle.output_queue_elements +// [start_offset as usize..end_offset as usize] +// .to_vec()); +// } +// Err(IndexerError::Custom("Merkle tree not found".to_string())) +// } +// +// fn get_proof_by_index(&mut self, merkle_tree_pubkey: Pubkey, index: u64) -> ProofOfLeaf { +// let mut bundle = self +// .state_merkle_trees +// .iter_mut() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) +// .unwrap(); +// +// while bundle.merkle_tree.leaves().len() <= index as usize { +// bundle.merkle_tree.append(&[0u8; 32]).unwrap(); +// } +// +// let leaf = match bundle.merkle_tree.get_leaf(index as usize) { +// Ok(leaf) => leaf, +// Err(_) => { +// bundle.merkle_tree.append(&[0u8; 32]).unwrap(); +// bundle.merkle_tree.get_leaf(index as usize).unwrap() +// } +// }; +// +// let proof = bundle +// .merkle_tree +// .get_proof_of_leaf(index as usize, true) +// .unwrap() +// .to_vec(); +// +// ProofOfLeaf { leaf, proof } +// } +// +// fn get_proofs_by_indices( +// &mut self, +// merkle_tree_pubkey: Pubkey, +// indices: &[u64], +// ) -> Vec { +// indices +// .iter() +// .map(|&index| self.get_proof_by_index(merkle_tree_pubkey, index)) +// .collect() +// } +// +// /// leaf index, leaf, tx hash +// fn get_leaf_indices_tx_hashes( +// &mut self, +// merkle_tree_pubkey: Pubkey, +// zkp_batch_size: usize, +// ) -> Vec<(u32, [u8; 32], [u8; 32])> { +// let mut state_merkle_tree_bundle = self +// .state_merkle_trees +// .iter_mut() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) +// .unwrap(); +// +// state_merkle_tree_bundle.input_leaf_indices[..zkp_batch_size].to_vec() +// } +// +// async fn get_subtrees( +// &self, +// merkle_tree_pubkey: [u8; 32], +// ) -> Result, IndexerError> { +// let merkle_tree_pubkey = Pubkey::new_from_array(merkle_tree_pubkey); +// let address_tree_bundle = self +// .address_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); +// if let Some(address_tree_bundle) = address_tree_bundle { +// Ok(address_tree_bundle.merkle_tree.merkle_tree.get_subtrees()) +// } else { +// let state_tree_bundle = self +// .state_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); +// if let Some(state_tree_bundle) = state_tree_bundle { +// Ok(state_tree_bundle.merkle_tree.get_subtrees()) +// } else { +// Err(IndexerError::Custom("Merkle tree not found".to_string())) +// } +// } +// } +// +// async fn get_multiple_compressed_account_proofs( +// &self, +// hashes: Vec, +// ) -> Result, IndexerError> { +// info!("Getting proofs for {:?}", hashes); +// let mut proofs: Vec = Vec::new(); +// hashes.iter().for_each(|hash| { +// let hash_array: [u8; 32] = bs58::decode(hash) +// .into_vec() +// .unwrap() +// .as_slice() +// .try_into() +// .unwrap(); +// +// self.state_merkle_trees.iter().for_each(|tree| { +// if let Some(leaf_index) = tree.merkle_tree.get_leaf_index(&hash_array) { +// let proof = tree +// .merkle_tree +// .get_proof_of_leaf(leaf_index, false) +// .unwrap(); +// proofs.push(MerkleProof { +// hash: hash.clone(), +// leaf_index: leaf_index as u64, +// merkle_tree: tree.accounts.merkle_tree.to_string(), +// proof: proof.to_vec(), +// root_seq: tree.merkle_tree.sequence_number as u64, +// }); +// } +// }) +// }); +// Ok(proofs) +// } +// +// async fn get_rpc_compressed_accounts_by_owner( +// &self, +// owner: &Pubkey, +// ) -> Result, IndexerError> { +// let result = self.get_compressed_accounts_by_owner(owner); +// let mut hashes: Vec = Vec::new(); +// for account in result.iter() { +// let hash = account.hash()?; +// let bs58_hash = bs58::encode(hash).into_string(); +// hashes.push(bs58_hash); +// } +// Ok(hashes) +// } +// +// async fn get_multiple_new_address_proofs( +// &self, +// merkle_tree_pubkey: [u8; 32], +// addresses: Vec<[u8; 32]>, +// ) -> Result>, IndexerError> { +// self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, false) +// .await +// } +// +// async fn get_multiple_new_address_proofs_full( +// &self, +// merkle_tree_pubkey: [u8; 32], +// addresses: Vec<[u8; 32]>, +// ) -> Result>, IndexerError> { +// self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, true) +// .await +// } +// +// fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) { +// let decoded_hash: [u8; 32] = bs58::decode(account_hash) +// .into_vec() +// .unwrap() +// .as_slice() +// .try_into() +// .unwrap(); +// +// if let Some(state_tree_bundle) = self +// .state_merkle_trees +// .iter_mut() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) +// { +// if let Some(leaf_index) = state_tree_bundle.merkle_tree.get_leaf_index(&decoded_hash) { +// state_tree_bundle +// .merkle_tree +// .update(&[0u8; 32], leaf_index) +// .unwrap(); +// } +// } +// } +// +// fn address_tree_updated( +// &mut self, +// merkle_tree_pubkey: Pubkey, +// context: &NewAddressProofWithContext<16>, +// ) { +// info!("Updating address tree..."); +// let mut address_tree_bundle: &mut AddressMerkleTreeBundle = self +// .address_merkle_trees +// .iter_mut() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) +// .unwrap(); +// +// let new_low_element = context.new_low_element.clone().unwrap(); +// let new_element = context.new_element.clone().unwrap(); +// let new_element_next_value = context.new_element_next_value.clone().unwrap(); +// address_tree_bundle +// .merkle_tree +// .update(&new_low_element, &new_element, &new_element_next_value) +// .unwrap(); +// address_tree_bundle +// .indexed_array +// .append_with_low_element_index(new_low_element.index, &new_element.value) +// .unwrap(); +// info!("Address tree updated"); +// } +// +// fn get_state_merkle_tree_accounts(&self, pubkeys: &[Pubkey]) -> Vec { +// pubkeys +// .iter() +// .map(|x| { +// self.state_merkle_trees +// .iter() +// .find(|y| y.accounts.merkle_tree == *x || y.accounts.nullifier_queue == *x) +// .unwrap() +// .accounts +// }) +// .collect::>() +// } +// +// fn add_event_and_compressed_accounts( +// &mut self, +// slot: u64, +// event: &PublicTransactionEvent, +// ) -> ( +// Vec, +// Vec, +// ) { +// let mut compressed_accounts = Vec::new(); +// let mut token_compressed_accounts = Vec::new(); +// let event_inputs_len = event.input_compressed_account_hashes.len(); +// let event_outputs_len = event.output_compressed_account_hashes.len(); +// for i in 0..std::cmp::max(event_inputs_len, event_outputs_len) { +// self.process_v1_compressed_account( +// slot, +// event, +// i, +// &mut token_compressed_accounts, +// &mut compressed_accounts, +// ); +// } +// +// self.events.push(event.clone()); +// (compressed_accounts, token_compressed_accounts) +// } +// +// fn get_state_merkle_trees(&self) -> &Vec { +// &self.state_merkle_trees +// } +// +// fn get_state_merkle_trees_mut(&mut self) -> &mut Vec { +// &mut self.state_merkle_trees +// } +// +// fn get_address_merkle_trees(&self) -> &Vec { +// &self.address_merkle_trees +// } +// +// fn get_address_merkle_trees_mut(&mut self) -> &mut Vec { +// &mut self.address_merkle_trees +// } +// +// fn get_token_compressed_accounts(&self) -> &Vec { +// &self.token_compressed_accounts +// } +// +// fn get_payer(&self) -> &Keypair { +// &self.payer +// } +// +// fn get_group_pda(&self) -> &Pubkey { +// &self.group_pda +// } +// +// async fn create_proof_for_compressed_accounts( +// &mut self, +// compressed_accounts: Option>, +// state_merkle_tree_pubkeys: Option>, +// new_addresses: Option<&[[u8; 32]]>, +// address_merkle_tree_pubkeys: Option>, +// rpc: &mut R, +// ) -> ProofRpcResult { +// if compressed_accounts.is_some() +// && ![1usize, 2usize, 3usize, 4usize, 8usize] +// .contains(&compressed_accounts.as_ref().unwrap().len()) +// { +// panic!( +// "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}", +// compressed_accounts.unwrap().len() +// ) +// } +// if new_addresses.is_some() && ![1usize, 2usize].contains(&new_addresses.unwrap().len()) { +// panic!("new_addresses must be of length 1, 2") +// } +// let client = Client::new(); +// let (root_indices, address_root_indices, json_payload) = +// match (compressed_accounts, new_addresses) { +// (Some(accounts), None) => { +// let (payload, payload_legacy, indices) = self +// .process_inclusion_proofs( +// &state_merkle_tree_pubkeys.unwrap(), +// &accounts, +// rpc, +// ) +// .await; +// if let Some(payload) = payload { +// (indices, Vec::new(), payload.to_string()) +// } else { +// (indices, Vec::new(), payload_legacy.unwrap().to_string()) +// } +// } +// (None, Some(addresses)) => { +// let (payload, payload_legacy, indices) = self +// .process_non_inclusion_proofs( +// address_merkle_tree_pubkeys.unwrap().as_slice(), +// addresses, +// rpc, +// ) +// .await; +// let payload_string = if let Some(payload) = payload { +// payload.to_string() +// } else { +// payload_legacy.unwrap().to_string() +// }; +// (Vec::::new(), indices, payload_string) +// } +// (Some(accounts), Some(addresses)) => { +// let (inclusion_payload, inclusion_payload_legacy, inclusion_indices) = self +// .process_inclusion_proofs( +// &state_merkle_tree_pubkeys.unwrap(), +// &accounts, +// rpc, +// ) +// .await; +// +// let ( +// non_inclusion_payload, +// non_inclusion_payload_legacy, +// non_inclusion_indices, +// ) = self +// .process_non_inclusion_proofs( +// address_merkle_tree_pubkeys.unwrap().as_slice(), +// addresses, +// rpc, +// ) +// .await; +// let json_payload = if let Some(non_inclusion_payload) = non_inclusion_payload { +// let public_input_hash = BigInt::from_bytes_be( +// num_bigint::Sign::Plus, +// &create_hash_chain_from_slice(&[ +// bigint_to_u8_32( +// &string_to_big_int( +// &inclusion_payload.as_ref().unwrap().public_input_hash, +// ) +// .unwrap(), +// ) +// .unwrap(), +// bigint_to_u8_32( +// &string_to_big_int(&non_inclusion_payload.public_input_hash) +// .unwrap(), +// ) +// .unwrap(), +// ]) +// .unwrap(), +// ); +// println!( +// "inclusion public input hash offchain {:?}", +// bigint_to_u8_32( +// &string_to_big_int( +// &inclusion_payload.as_ref().unwrap().public_input_hash, +// ) +// .unwrap(), +// ) +// .unwrap() +// ); +// println!( +// "non inclusion public input hash offchain {:?}", +// bigint_to_u8_32( +// &string_to_big_int(&non_inclusion_payload.public_input_hash) +// .unwrap() +// ) +// .unwrap() +// ); +// +// println!( +// "public input hash offchain {:?}", +// public_input_hash.to_bytes_be() +// ); +// +// CombinedJsonStruct { +// circuit_type: ProofType::Combined.to_string(), +// state_tree_height: DEFAULT_BATCH_STATE_TREE_HEIGHT, +// address_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, +// public_input_hash: big_int_to_string(&public_input_hash), +// inclusion: inclusion_payload.unwrap().inputs, +// non_inclusion: non_inclusion_payload.inputs, +// } +// .to_string() +// } else if let Some(non_inclusion_payload) = non_inclusion_payload_legacy { +// CombinedJsonStructLegacy { +// circuit_type: ProofType::Combined.to_string(), +// state_tree_height: 26, +// address_tree_height: 26, +// inclusion: inclusion_payload_legacy.unwrap().inputs, +// non_inclusion: non_inclusion_payload.inputs, +// } +// .to_string() +// } else { +// panic!("Unsupported tree height") +// }; +// (inclusion_indices, non_inclusion_indices, json_payload) +// } +// _ => { +// panic!("At least one of compressed_accounts or new_addresses must be provided") +// } +// }; +// +// println!("json_payload {:?}", json_payload); +// let mut retries = 3; +// while retries > 0 { +// let response_result = client +// .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) +// .header("Content-Type", "text/plain; charset=utf-8") +// .body(json_payload.clone()) +// .send() +// .await +// .expect("Failed to execute request."); +// println!("response_result {:?}", response_result); +// if response_result.status().is_success() { +// let body = response_result.text().await.unwrap(); +// println!("body {:?}", body); +// println!("root_indices {:?}", root_indices); +// println!("address_root_indices {:?}", address_root_indices); +// let proof_json = deserialize_gnark_proof_json(&body).unwrap(); +// let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); +// let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); +// let root_indices = root_indices.iter().map(|x| Some(*x)).collect(); +// return ProofRpcResult { +// root_indices, +// address_root_indices: address_root_indices.clone(), +// proof: CompressedProof { +// a: proof_a, +// b: proof_b, +// c: proof_c, +// }, +// }; +// } else { +// warn!("Error: {}", response_result.text().await.unwrap()); +// tokio::time::sleep(Duration::from_secs(1)).await; +// retries -= 1; +// } +// } +// panic!("Failed to get proof from server"); +// } +// +// async fn create_proof_for_compressed_accounts2( +// &mut self, +// compressed_accounts: Option>, +// state_merkle_tree_pubkeys: Option>, +// new_addresses: Option<&[[u8; 32]]>, +// address_merkle_tree_pubkeys: Option>, +// rpc: &mut R, +// ) -> BatchedTreeProofRpcResult { +// let mut indices_to_remove = Vec::new(); +// +// // for all accounts in batched trees, check whether values are in tree or queue +// let (compressed_accounts, state_merkle_tree_pubkeys) = +// if let Some((compressed_accounts, state_merkle_tree_pubkeys)) = +// compressed_accounts.zip(state_merkle_tree_pubkeys) +// { +// for (i, (compressed_account, state_merkle_tree_pubkey)) in compressed_accounts +// .iter() +// .zip(state_merkle_tree_pubkeys.iter()) +// .enumerate() +// { +// let accounts = self.state_merkle_trees.iter().find(|x| { +// x.accounts.merkle_tree == *state_merkle_tree_pubkey && x.version == 2 +// }); +// if let Some(accounts) = accounts { +// let output_queue_pubkey = accounts.accounts.nullifier_queue; +// let mut queue = +// AccountZeroCopy::::new(rpc, output_queue_pubkey) +// .await; +// let queue_zero_copy = BatchedQueueAccount::output_queue_from_bytes_mut( +// queue.account.data.as_mut_slice(), +// ) +// .unwrap(); +// for value_array in queue_zero_copy.value_vecs.iter() { +// let index = value_array.iter().position(|x| *x == *compressed_account); +// if index.is_some() { +// indices_to_remove.push(i); +// } +// } +// } +// } +// let compress_accounts = compressed_accounts +// .iter() +// .enumerate() +// .filter(|(i, _)| !indices_to_remove.contains(i)) +// .map(|(_, x)| *x) +// .collect::>(); +// let state_merkle_tree_pubkeys = state_merkle_tree_pubkeys +// .iter() +// .enumerate() +// .filter(|(i, _)| !indices_to_remove.contains(i)) +// .map(|(_, x)| *x) +// .collect::>(); +// if compress_accounts.is_empty() { +// (None, None) +// } else { +// (Some(compress_accounts), Some(state_merkle_tree_pubkeys)) +// } +// } else { +// (None, None) +// }; +// let rpc_result = if (compressed_accounts.is_some() +// && !compressed_accounts.as_ref().unwrap().is_empty()) +// || address_merkle_tree_pubkeys.is_some() +// { +// Some( +// self.create_proof_for_compressed_accounts( +// compressed_accounts, +// state_merkle_tree_pubkeys, +// new_addresses, +// address_merkle_tree_pubkeys, +// rpc, +// ) +// .await, +// ) +// } else { +// None +// }; +// let address_root_indices = if let Some(rpc_result) = rpc_result.as_ref() { +// rpc_result.address_root_indices.clone() +// } else { +// Vec::new() +// }; +// let root_indices = { +// let mut root_indices = if let Some(rpc_result) = rpc_result.as_ref() { +// rpc_result.root_indices.clone() +// } else { +// Vec::new() +// }; +// for index in indices_to_remove { +// root_indices.insert(index, None); +// } +// root_indices +// }; +// BatchedTreeProofRpcResult { +// proof: rpc_result.map(|x| x.proof), +// root_indices, +// address_root_indices, +// } +// } +// +// fn add_address_merkle_tree_accounts( +// &mut self, +// merkle_tree_keypair: &Keypair, +// queue_keypair: &Keypair, +// _owning_program_id: Option, +// ) -> AddressMerkleTreeAccounts { +// info!("Adding address merkle tree accounts..."); +// let address_merkle_tree_accounts = AddressMerkleTreeAccounts { +// merkle_tree: merkle_tree_keypair.pubkey(), +// queue: queue_keypair.pubkey(), +// }; +// self.address_merkle_trees +// .push(Self::add_address_merkle_tree_bundle( +// address_merkle_tree_accounts, +// )); +// info!( +// "Address merkle tree accounts added. Total: {}", +// self.address_merkle_trees.len() +// ); +// address_merkle_tree_accounts +// } +// +// /// returns compressed_accounts with the owner pubkey +// /// does not return token accounts. +// fn get_compressed_accounts_by_owner( +// &self, +// owner: &Pubkey, +// ) -> Vec { +// self.compressed_accounts +// .iter() +// .filter(|x| x.compressed_account.owner == *owner) +// .cloned() +// .collect() +// } +// +// fn get_compressed_token_accounts_by_owner(&self, owner: &Pubkey) -> Vec { +// self.token_compressed_accounts +// .iter() +// .filter(|x| x.token_data.owner == *owner) +// .cloned() +// .collect() +// } +// +// fn add_state_bundle(&mut self, state_bundle: StateMerkleTreeBundle) { +// self.get_state_merkle_trees_mut().push(state_bundle); +// } +// +// async fn update_test_indexer_after_append( +// &mut self, +// rpc: &mut R, +// merkle_tree_pubkey: Pubkey, +// output_queue_pubkey: Pubkey, +// num_inserted_zkps: u64, +// ) { +// let mut state_merkle_tree_bundle = self +// .state_merkle_trees +// .iter_mut() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) +// .unwrap(); +// +// let (merkle_tree_next_index, root) = { +// let mut merkle_tree_account = +// rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); +// let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( +// merkle_tree_account.data.as_mut_slice(), +// ) +// .unwrap(); +// ( +// merkle_tree.get_metadata().next_index as usize, +// *merkle_tree.root_history.last().unwrap(), +// ) +// }; +// +// let (max_num_zkp_updates, zkp_batch_size) = { +// let mut output_queue_account = +// rpc.get_account(output_queue_pubkey).await.unwrap().unwrap(); +// let output_queue = BatchedQueueAccount::output_queue_from_bytes_mut( +// output_queue_account.data.as_mut_slice(), +// ) +// .unwrap(); +// +// let output_queue_account = output_queue.get_metadata(); +// let max_num_zkp_updates = output_queue_account.batch_metadata.get_num_zkp_batches(); +// let zkp_batch_size = output_queue_account.batch_metadata.zkp_batch_size; +// (max_num_zkp_updates, zkp_batch_size) +// }; +// +// let leaves = state_merkle_tree_bundle.output_queue_elements.to_vec(); +// +// let start = (num_inserted_zkps as usize) * zkp_batch_size as usize; +// let end = start + zkp_batch_size as usize; +// let batch_update_leaves = leaves[start..end].to_vec(); +// +// for (i, _) in batch_update_leaves.iter().enumerate() { +// // if leaves[i] == [0u8; 32] { +// let index = merkle_tree_next_index + i - zkp_batch_size as usize; +// // This is dangerous it should call self.get_leaf_by_index() but it +// // can t for mutable borrow +// // TODO: call a get_leaf_by_index equivalent, we could move the method to the reference merkle tree +// let leaf = state_merkle_tree_bundle +// .merkle_tree +// .get_leaf(index) +// .unwrap(); +// if leaf == [0u8; 32] { +// state_merkle_tree_bundle +// .merkle_tree +// .update(&batch_update_leaves[i], index) +// .unwrap(); +// } +// } +// assert_eq!( +// root, +// state_merkle_tree_bundle.merkle_tree.root(), +// "update indexer after append root invalid" +// ); +// +// let num_inserted_zkps = num_inserted_zkps + 1; +// // check can we get rid of this and use the data from the merkle tree +// if num_inserted_zkps == max_num_zkp_updates { +// for _ in 0..zkp_batch_size * max_num_zkp_updates { +// state_merkle_tree_bundle.output_queue_elements.remove(0); +// } +// } +// } +// +// async fn update_test_indexer_after_nullification( +// &mut self, +// rpc: &mut R, +// merkle_tree_pubkey: Pubkey, +// batch_index: usize, +// ) { +// let state_merkle_tree_bundle = self +// .state_merkle_trees +// .iter_mut() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) +// .unwrap(); +// +// let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); +// let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( +// merkle_tree_account.data.as_mut_slice(), +// ) +// .unwrap(); +// +// let batch = &merkle_tree.batches[batch_index]; +// if batch.get_state() == BatchState::Inserted || batch.get_state() == BatchState::Full { +// let batch_size = batch.zkp_batch_size; +// let leaf_indices_tx_hashes = +// state_merkle_tree_bundle.input_leaf_indices[..batch_size as usize].to_vec(); +// for (index, leaf, tx_hash) in leaf_indices_tx_hashes.iter() { +// let index = *index as usize; +// let leaf = *leaf; +// let index_bytes = index.to_be_bytes(); +// +// let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, tx_hash]).unwrap(); +// +// state_merkle_tree_bundle.input_leaf_indices.remove(0); +// state_merkle_tree_bundle +// .merkle_tree +// .update(&nullifier, index) +// .unwrap(); +// } +// } +// } +// +// async fn finalize_batched_address_tree_update( +// &mut self, +// rpc: &mut R, +// merkle_tree_pubkey: Pubkey, +// ) { +// let mut account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); +// let onchain_account = +// BatchedMerkleTreeAccount::address_tree_from_bytes_mut(account.data.as_mut_slice()) +// .unwrap(); +// let address_tree = self +// .address_merkle_trees +// .iter_mut() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) +// .unwrap(); +// let address_tree_index = address_tree.merkle_tree.merkle_tree.rightmost_index; +// let onchain_next_index = onchain_account.get_metadata().next_index; +// let diff_onchain_indexer = onchain_next_index - address_tree_index as u64; +// let addresses = address_tree.queue_elements[0..diff_onchain_indexer as usize].to_vec(); +// +// for _ in 0..diff_onchain_indexer { +// address_tree.queue_elements.remove(0); +// } +// for new_element_value in &addresses { +// address_tree +// .merkle_tree +// .append( +// &BigUint::from_bytes_be(new_element_value), +// &mut address_tree.indexed_array, +// ) +// .unwrap(); +// } +// +// let onchain_root = onchain_account.root_history.last().unwrap(); +// let new_root = address_tree.merkle_tree.root(); +// assert_eq!(*onchain_root, new_root); +// println!("finalized batched address tree update"); +// } +// } +// +// impl TestIndexer { +// async fn _get_multiple_new_address_proofs( +// &self, +// merkle_tree_pubkey: [u8; 32], +// addresses: Vec<[u8; 32]>, +// full: bool, +// ) -> Result>, IndexerError> { +// let mut proofs: Vec> = Vec::new(); +// +// for address in addresses.iter() { +// info!("Getting new address proof for {:?}", address); +// let pubkey = Pubkey::from(merkle_tree_pubkey); +// let address_tree_bundle = self +// .address_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == pubkey) +// .unwrap(); +// +// let address_biguint = BigUint::from_bytes_be(address.as_slice()); +// let (old_low_address, _old_low_address_next_value) = address_tree_bundle +// .indexed_array +// .find_low_element_for_nonexistent(&address_biguint) +// .unwrap(); +// let address_bundle = address_tree_bundle +// .indexed_array +// .new_element_with_low_element_index(old_low_address.index, &address_biguint) +// .unwrap(); +// +// let (old_low_address, old_low_address_next_value) = address_tree_bundle +// .indexed_array +// .find_low_element_for_nonexistent(&address_biguint) +// .unwrap(); +// +// // Get the Merkle proof for updating low element. +// let low_address_proof = address_tree_bundle +// .merkle_tree +// .get_proof_of_leaf(old_low_address.index, full) +// .unwrap(); +// +// let low_address_index: u64 = old_low_address.index as u64; +// let low_address_value: [u8; 32] = +// bigint_to_be_bytes_array(&old_low_address.value).unwrap(); +// let low_address_next_index: u64 = old_low_address.next_index as u64; +// let low_address_next_value: [u8; 32] = +// bigint_to_be_bytes_array(&old_low_address_next_value).unwrap(); +// let low_address_proof: [[u8; 32]; NET_HEIGHT] = low_address_proof.to_array().unwrap(); +// let proof = NewAddressProofWithContext:: { +// merkle_tree: merkle_tree_pubkey, +// low_address_index, +// low_address_value, +// low_address_next_index, +// low_address_next_value, +// low_address_proof, +// root: address_tree_bundle.merkle_tree.root(), +// root_seq: address_tree_bundle.merkle_tree.merkle_tree.sequence_number as u64, +// new_low_element: Some(address_bundle.new_low_element), +// new_element: Some(address_bundle.new_element), +// new_element_next_value: Some(address_bundle.new_element_next_value), +// }; +// proofs.push(proof); +// } +// Ok(proofs) +// } +// +// fn count_matching_hashes(&self, query_hashes: &[String]) -> usize { +// self.nullified_compressed_accounts +// .iter() +// .map(|account| self.compute_hash(account)) +// .filter(|bs58_hash| query_hashes.contains(bs58_hash)) +// .count() +// } +// +// fn compute_hash(&self, account: &CompressedAccountWithMerkleContext) -> String { +// // replace AccountType with actual type +// let hash = account +// .compressed_account +// .hash::( +// &account.merkle_context.merkle_tree_pubkey, +// &account.merkle_context.leaf_index, +// ) +// .unwrap(); +// bs58::encode(hash).into_string() +// } +// +// pub async fn init_from_env( +// payer: &Keypair, +// env: &EnvAccounts, +// prover_config: Option, +// ) -> Self { +// Self::new( +// vec![ +// StateMerkleTreeAccounts { +// merkle_tree: env.merkle_tree_pubkey, +// nullifier_queue: env.nullifier_queue_pubkey, +// cpi_context: env.cpi_context_account_pubkey, +// }, +// StateMerkleTreeAccounts { +// merkle_tree: env.batched_state_merkle_tree, +// nullifier_queue: env.batched_output_queue, +// cpi_context: env.batched_cpi_context, +// }, +// ], +// vec![ +// AddressMerkleTreeAccounts { +// merkle_tree: env.address_merkle_tree_pubkey, +// queue: env.address_merkle_tree_queue_pubkey, +// }, +// AddressMerkleTreeAccounts { +// merkle_tree: env.batch_address_merkle_tree, +// queue: env.batch_address_merkle_tree, +// }, +// ], +// payer.insecure_clone(), +// env.group_pda, +// prover_config, +// ) +// .await +// } +// +// pub async fn new( +// state_merkle_tree_accounts: Vec, +// address_merkle_tree_accounts: Vec, +// payer: Keypair, +// group_pda: Pubkey, +// prover_config: Option, +// ) -> Self { +// if let Some(ref prover_config) = prover_config { +// // TODO: remove restart input and check whether prover is already +// // running with correct config +// spawn_prover(true, prover_config.clone()).await; +// } +// let mut state_merkle_trees = Vec::new(); +// for state_merkle_tree_account in state_merkle_tree_accounts.iter() { +// let test_batched_output_queue = +// Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap(); +// let (version, merkle_tree) = if state_merkle_tree_account.nullifier_queue +// == test_batched_output_queue.pubkey() +// { +// let merkle_tree = Box::new(MerkleTree::::new( +// DEFAULT_BATCH_STATE_TREE_HEIGHT as usize, +// 0, +// )); +// (2, merkle_tree) +// } else { +// let merkle_tree = Box::new(MerkleTree::::new( +// STATE_MERKLE_TREE_HEIGHT as usize, +// STATE_MERKLE_TREE_CANOPY_DEPTH as usize, +// )); +// (1, merkle_tree) +// }; +// +// state_merkle_trees.push(StateMerkleTreeBundle { +// accounts: *state_merkle_tree_account, +// merkle_tree, +// rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64, +// version, +// output_queue_elements: vec![], +// input_leaf_indices: vec![], +// }); +// } +// +// let mut address_merkle_trees = Vec::new(); +// for address_merkle_tree_account in address_merkle_tree_accounts { +// address_merkle_trees.push(Self::add_address_merkle_tree_bundle( +// address_merkle_tree_account, +// )); +// } +// +// Self { +// state_merkle_trees, +// address_merkle_trees, +// payer, +// compressed_accounts: vec![], +// nullified_compressed_accounts: vec![], +// events: vec![], +// token_compressed_accounts: vec![], +// token_nullified_compressed_accounts: vec![], +// prover_config, +// phantom: Default::default(), +// group_pda, +// } +// } +// +// pub fn add_address_merkle_tree_bundle( +// address_merkle_tree_accounts: AddressMerkleTreeAccounts, +// // TODO: add config here +// ) -> AddressMerkleTreeBundle { +// let (height, canopy) = +// if address_merkle_tree_accounts.merkle_tree == address_merkle_tree_accounts.queue { +// (40, 0) +// } else { +// (26, STATE_MERKLE_TREE_CANOPY_DEPTH as usize) +// }; +// let mut merkle_tree = +// Box::new(IndexedMerkleTree::::new(height, canopy).unwrap()); +// merkle_tree.init().unwrap(); +// let mut indexed_array = Box::>::default(); +// indexed_array.init().unwrap(); +// AddressMerkleTreeBundle { +// merkle_tree, +// indexed_array, +// accounts: address_merkle_tree_accounts, +// rollover_fee: FeeConfig::default().address_queue_rollover as i64, +// queue_elements: vec![], +// } +// } +// +// async fn add_address_merkle_tree_v1( +// &mut self, +// rpc: &mut R, +// merkle_tree_keypair: &Keypair, +// queue_keypair: &Keypair, +// owning_program_id: Option, +// ) -> AddressMerkleTreeAccounts { +// create_address_merkle_tree_and_queue_account_with_assert( +// &self.payer, +// true, +// rpc, +// merkle_tree_keypair, +// queue_keypair, +// owning_program_id, +// None, +// &AddressMerkleTreeConfig::default(), +// &AddressQueueConfig::default(), +// 0, +// ) +// .await +// .unwrap(); +// self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) +// } +// +// async fn add_address_merkle_tree_v2( +// &mut self, +// rpc: &mut R, +// merkle_tree_keypair: &Keypair, +// queue_keypair: &Keypair, +// owning_program_id: Option, +// ) -> AddressMerkleTreeAccounts { +// info!( +// "Adding address merkle tree accounts v2 {:?}", +// merkle_tree_keypair.pubkey() +// ); +// +// let params = InitAddressTreeAccountsInstructionData::test_default(); +// +// info!( +// "Creating batched address merkle tree {:?}", +// merkle_tree_keypair.pubkey() +// ); +// create_batch_address_merkle_tree(rpc, &self.payer, merkle_tree_keypair, params) +// .await +// .unwrap(); +// info!( +// "Batched address merkle tree created {:?}", +// merkle_tree_keypair.pubkey() +// ); +// +// self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) +// } +// +// pub async fn add_address_merkle_tree( +// &mut self, +// rpc: &mut R, +// merkle_tree_keypair: &Keypair, +// queue_keypair: &Keypair, +// owning_program_id: Option, +// version: u64, +// ) -> AddressMerkleTreeAccounts { +// if version == 1 { +// self.add_address_merkle_tree_v1( +// rpc, +// merkle_tree_keypair, +// queue_keypair, +// owning_program_id, +// ) +// .await +// } else if version == 2 { +// self.add_address_merkle_tree_v2( +// rpc, +// merkle_tree_keypair, +// queue_keypair, +// owning_program_id, +// ) +// .await +// } else { +// panic!( +// "add_address_merkle_tree: Version not supported, {}. Versions: 1, 2", +// version +// ) +// } +// } +// +// #[allow(clippy::too_many_arguments)] +// pub async fn add_state_merkle_tree( +// &mut self, +// rpc: &mut R, +// merkle_tree_keypair: &Keypair, +// queue_keypair: &Keypair, +// cpi_context_keypair: &Keypair, +// owning_program_id: Option, +// forester: Option, +// version: u64, +// ) { +// let (rollover_fee, merkle_tree) = match version { +// 1 => { +// create_state_merkle_tree_and_queue_account( +// &self.payer, +// true, +// rpc, +// merkle_tree_keypair, +// queue_keypair, +// Some(cpi_context_keypair), +// owning_program_id, +// forester, +// self.state_merkle_trees.len() as u64, +// &StateMerkleTreeConfig::default(), +// &NullifierQueueConfig::default(), +// ) +// .await +// .unwrap(); +// let merkle_tree = Box::new(MerkleTree::::new( +// STATE_MERKLE_TREE_HEIGHT as usize, +// STATE_MERKLE_TREE_CANOPY_DEPTH as usize, +// )); +// (FeeConfig::default().state_merkle_tree_rollover as i64,merkle_tree) +// } +// 2 => { +// let params = InitStateTreeAccountsInstructionData::test_default(); +// +// create_batched_state_merkle_tree( +// &self.payer, +// true, +// rpc, +// merkle_tree_keypair, +// queue_keypair, +// cpi_context_keypair, +// params, +// ).await; +// let merkle_tree = Box::new(MerkleTree::::new( +// DEFAULT_BATCH_STATE_TREE_HEIGHT as usize, +// 0 +// )); +// (FeeConfig::test_batched().state_merkle_tree_rollover as i64,merkle_tree) +// } +// _ => panic!( +// "add_state_merkle_tree: Version not supported, {}. Versions: 1 concurrent, 2 batched", +// version +// ), +// }; +// let state_merkle_tree_account = StateMerkleTreeAccounts { +// merkle_tree: merkle_tree_keypair.pubkey(), +// nullifier_queue: queue_keypair.pubkey(), +// cpi_context: cpi_context_keypair.pubkey(), +// }; +// +// self.state_merkle_trees.push(StateMerkleTreeBundle { +// merkle_tree, +// accounts: state_merkle_tree_account, +// rollover_fee, +// version, +// output_queue_elements: vec![], +// input_leaf_indices: vec![], +// }); +// } +// +// async fn process_inclusion_proofs( +// &self, +// merkle_tree_pubkeys: &[Pubkey], +// accounts: &[[u8; 32]], +// rpc: &mut R, +// ) -> ( +// Option, +// Option, +// Vec, +// ) { +// let mut inclusion_proofs = Vec::new(); +// let mut root_indices = Vec::new(); +// let mut height = 0; +// +// // Collect all proofs first before any await points +// let proof_data: Vec<_> = accounts +// .iter() +// .zip(merkle_tree_pubkeys.iter()) +// .map(|(account, &pubkey)| { +// let bundle = &self +// .state_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == pubkey) +// .unwrap(); +// let merkle_tree = &bundle.merkle_tree; +// let leaf_index = merkle_tree.get_leaf_index(account).unwrap(); +// let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap(); +// +// // Convert proof to owned data that implements Send +// let proof: Vec = proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(); +// +// if height == 0 { +// height = merkle_tree.height; +// } else { +// assert_eq!(height, merkle_tree.height); +// } +// +// ( +// bundle.version, +// pubkey, +// leaf_index, +// proof, +// merkle_tree.root(), +// ) +// }) +// .collect(); +// +// // Now handle the async operations with the collected data +// for (i, (version, pubkey, leaf_index, proof, merkle_root)) in +// proof_data.into_iter().enumerate() +// { +// inclusion_proofs.push(InclusionMerkleProofInputs { +// root: BigInt::from_be_bytes(merkle_root.as_slice()), +// leaf: BigInt::from_be_bytes(&accounts[i]), +// path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()), +// path_elements: proof, +// }); +// +// let (root_index, root) = if version == 1 { +// let fetched_merkle_tree = unsafe { +// get_concurrent_merkle_tree::( +// rpc, pubkey, +// ) +// .await +// }; +// ( +// fetched_merkle_tree.root_index() as u32, +// fetched_merkle_tree.root(), +// ) +// } else { +// let mut merkle_tree_account = rpc.get_account(pubkey).await.unwrap().unwrap(); +// let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( +// merkle_tree_account.data.as_mut_slice(), +// ) +// .unwrap(); +// ( +// merkle_tree.get_root_index(), +// merkle_tree.get_root().unwrap(), +// ) +// }; +// +// assert_eq!(merkle_root, root, "Merkle tree root mismatch"); +// root_indices.push(root_index as u16); +// } +// +// let (batch_inclusion_proof_inputs, legacy) = if height +// == DEFAULT_BATCH_STATE_TREE_HEIGHT as usize +// { +// let inclusion_proof_inputs = +// InclusionProofInputs::new(inclusion_proofs.as_slice()).unwrap(); +// ( +// Some(BatchInclusionJsonStruct::from_inclusion_proof_inputs( +// &inclusion_proof_inputs, +// )), +// None, +// ) +// } else if height == STATE_MERKLE_TREE_HEIGHT as usize { +// let inclusion_proof_inputs = InclusionProofInputsLegacy(inclusion_proofs.as_slice()); +// ( +// None, +// Some(BatchInclusionJsonStructLegacy::from_inclusion_proof_inputs( +// &inclusion_proof_inputs, +// )), +// ) +// } else { +// panic!("Unsupported tree height") +// }; +// +// (batch_inclusion_proof_inputs, legacy, root_indices) +// } +// +// pub async fn process_non_inclusion_proofs( +// &self, +// address_merkle_tree_pubkeys: &[Pubkey], +// addresses: &[[u8; 32]], +// rpc: &mut R, +// ) -> ( +// Option, +// Option, +// Vec, +// ) { +// let mut non_inclusion_proofs = Vec::new(); +// let mut address_root_indices = Vec::new(); +// let mut tree_heights = Vec::new(); +// for tree in self.address_merkle_trees.iter() { +// println!("height {:?}", tree.merkle_tree.merkle_tree.height); +// println!("accounts {:?}", tree.accounts); +// } +// println!("process_non_inclusion_proofs: addresses {:?}", addresses); +// for (i, address) in addresses.iter().enumerate() { +// let address_tree = &self +// .address_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i]) +// .unwrap(); +// tree_heights.push(address_tree.merkle_tree.merkle_tree.height); +// +// let proof_inputs = get_non_inclusion_proof_inputs( +// address, +// &address_tree.merkle_tree, +// &address_tree.indexed_array, +// ); +// non_inclusion_proofs.push(proof_inputs); +// +// // We don't have address queues in v2 (batch) address Merkle trees +// // hence both accounts in this struct are the same. +// let is_v2 = address_tree.accounts.merkle_tree == address_tree.accounts.queue; +// println!("is v2 {:?}", is_v2); +// println!( +// "address_merkle_tree_pubkeys[i] {:?}", +// address_merkle_tree_pubkeys[i] +// ); +// println!("address_tree.accounts {:?}", address_tree.accounts); +// if is_v2 { +// let account = rpc +// .get_account(address_merkle_tree_pubkeys[i]) +// .await +// .unwrap(); +// if let Some(mut account) = account { +// let account = BatchedMerkleTreeAccount::address_tree_from_bytes_mut( +// account.data.as_mut_slice(), +// ) +// .unwrap(); +// address_root_indices.push(account.get_root_index() as u16); +// } else { +// panic!( +// "TestIndexer.process_non_inclusion_proofs(): Address tree account not found." +// ); +// } +// } else { +// let fetched_address_merkle_tree = unsafe { +// get_indexed_merkle_tree::( +// rpc, +// address_merkle_tree_pubkeys[i], +// ) +// .await +// }; +// address_root_indices.push(fetched_address_merkle_tree.root_index() as u16); +// } +// } +// // if tree heights are not the same, panic +// if tree_heights.iter().any(|&x| x != tree_heights[0]) { +// panic!( +// "All address merkle trees must have the same height {:?}", +// tree_heights +// ); +// } +// let (batch_non_inclusion_proof_inputs, batch_non_inclusion_proof_inputs_legacy) = +// if tree_heights[0] == 26 { +// let non_inclusion_proof_inputs = +// NonInclusionProofInputsLegacy::new(non_inclusion_proofs.as_slice()); +// ( +// None, +// Some( +// BatchNonInclusionJsonStructLegacy::from_non_inclusion_proof_inputs( +// &non_inclusion_proof_inputs, +// ), +// ), +// ) +// } else if tree_heights[0] == 40 { +// let non_inclusion_proof_inputs = +// NonInclusionProofInputs::new(non_inclusion_proofs.as_slice()).unwrap(); +// ( +// Some( +// BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs( +// &non_inclusion_proof_inputs, +// ), +// ), +// None, +// ) +// } else { +// panic!("Unsupported tree height") +// }; +// ( +// batch_non_inclusion_proof_inputs, +// batch_non_inclusion_proof_inputs_legacy, +// address_root_indices, +// ) +// } +// +// /// deserializes an event +// /// adds the output_compressed_accounts to the compressed_accounts +// /// removes the input_compressed_accounts from the compressed_accounts +// /// adds the input_compressed_accounts to the nullified_compressed_accounts +// pub fn add_lamport_compressed_accounts(&mut self, slot: u64, event_bytes: Vec) { +// let event_bytes = event_bytes.clone(); +// let event = PublicTransactionEvent::deserialize(&mut event_bytes.as_slice()).unwrap(); +// self.add_event_and_compressed_accounts(slot, &event); +// } +// +// /// deserializes an event +// /// adds the output_compressed_accounts to the compressed_accounts +// /// removes the input_compressed_accounts from the compressed_accounts +// /// adds the input_compressed_accounts to the nullified_compressed_accounts +// /// deserialiazes token data from the output_compressed_accounts +// /// adds the token_compressed_accounts to the token_compressed_accounts +// pub fn add_compressed_accounts_with_token_data( +// &mut self, +// slot: u64, +// event: &PublicTransactionEvent, +// ) { +// self.add_event_and_compressed_accounts(slot, event); +// } +// +// /// returns the compressed sol balance of the owner pubkey +// pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 { +// self.compressed_accounts +// .iter() +// .filter(|x| x.compressed_account.owner == *owner) +// .map(|x| x.compressed_account.lamports) +// .sum() +// } +// +// /// returns the compressed token balance of the owner pubkey for a token by mint +// pub fn get_compressed_token_balance(&self, owner: &Pubkey, mint: &Pubkey) -> u64 { +// self.token_compressed_accounts +// .iter() +// .filter(|x| { +// x.compressed_account.compressed_account.owner == *owner +// && x.token_data.mint == *mint +// }) +// .map(|x| x.token_data.amount) +// .sum() +// } +// +// fn process_v1_compressed_account( +// &mut self, +// slot: u64, +// event: &PublicTransactionEvent, +// i: usize, +// token_compressed_accounts: &mut Vec, +// compressed_accounts: &mut Vec, +// ) { +// let mut input_addresses = vec![]; +// if event.input_compressed_account_hashes.len() > i { +// let tx_hash: [u8; 32] = create_tx_hash( +// &event.input_compressed_account_hashes, +// &event.output_compressed_account_hashes, +// slot, +// ) +// .unwrap(); +// println!("tx_hash {:?}", tx_hash); +// println!("slot {:?}", slot); +// let hash = event.input_compressed_account_hashes[i]; +// let index = self.compressed_accounts.iter().position(|x| { +// x.compressed_account +// .hash::( +// &x.merkle_context.merkle_tree_pubkey, +// &x.merkle_context.leaf_index, +// ) +// .unwrap() +// == hash +// }); +// let (leaf_index, merkle_tree_pubkey) = if let Some(index) = index { +// self.nullified_compressed_accounts +// .push(self.compressed_accounts[index].clone()); +// let leaf_index = self.compressed_accounts[index].merkle_context.leaf_index; +// let merkle_tree_pubkey = self.compressed_accounts[index] +// .merkle_context +// .merkle_tree_pubkey; +// if let Some(address) = self.compressed_accounts[index].compressed_account.address { +// input_addresses.push(address); +// } +// self.compressed_accounts.remove(index); +// (leaf_index, merkle_tree_pubkey) +// } else { +// let index = self +// .token_compressed_accounts +// .iter() +// .position(|x| { +// x.compressed_account +// .compressed_account +// .hash::( +// &x.compressed_account.merkle_context.merkle_tree_pubkey, +// &x.compressed_account.merkle_context.leaf_index, +// ) +// .unwrap() +// == hash +// }) +// .expect("input compressed account not found"); +// self.token_nullified_compressed_accounts +// .push(self.token_compressed_accounts[index].clone()); +// let leaf_index = self.token_compressed_accounts[index] +// .compressed_account +// .merkle_context +// .leaf_index; +// let merkle_tree_pubkey = self.token_compressed_accounts[index] +// .compressed_account +// .merkle_context +// .merkle_tree_pubkey; +// self.token_compressed_accounts.remove(index); +// (leaf_index, merkle_tree_pubkey) +// }; +// let bundle = &mut self +// .get_state_merkle_trees_mut() +// .iter_mut() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) +// .unwrap(); +// // Store leaf indices of input accounts for batched trees +// if bundle.version == 2 { +// let leaf_hash = event.input_compressed_account_hashes[i]; +// bundle +// .input_leaf_indices +// .push((leaf_index, leaf_hash, tx_hash)); +// } +// } +// let mut new_addresses = vec![]; +// if event.output_compressed_accounts.len() > i { +// let compressed_account = &event.output_compressed_accounts[i]; +// println!("output compressed account {:?}", compressed_account); +// if let Some(address) = compressed_account.compressed_account.address { +// if !input_addresses.iter().any(|x| x == &address) { +// new_addresses.push(address); +// } +// } +// +// let merkle_tree = self.state_merkle_trees.iter().find(|x| { +// x.accounts.merkle_tree +// == event.pubkey_array +// [event.output_compressed_accounts[i].merkle_tree_index as usize] +// }); +// // Check for output queue +// let merkle_tree = if let Some(merkle_tree) = merkle_tree { +// merkle_tree +// } else { +// self.state_merkle_trees +// .iter() +// .find(|x| { +// x.accounts.nullifier_queue +// == event.pubkey_array +// [event.output_compressed_accounts[i].merkle_tree_index as usize] +// }) +// .unwrap() +// }; +// println!("found merkle tree {:?}", merkle_tree.accounts.merkle_tree); +// let nullifier_queue_pubkey = merkle_tree.accounts.nullifier_queue; +// let merkle_tree_pubkey = merkle_tree.accounts.merkle_tree; +// // if data is some, try to deserialize token data, if it fails, add to compressed_accounts +// // if data is none add to compressed_accounts +// // new accounts are inserted in front so that the newest accounts are found first +// match compressed_account.compressed_account.data.as_ref() { +// Some(data) => { +// if compressed_account.compressed_account.owner == light_compressed_token::ID +// && data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR +// { +// if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { +// let token_account = TokenDataWithContext { +// token_data, +// compressed_account: CompressedAccountWithMerkleContext { +// compressed_account: compressed_account +// .compressed_account +// .clone(), +// merkle_context: MerkleContext { +// leaf_index: event.output_leaf_indices[i], +// merkle_tree_pubkey, +// nullifier_queue_pubkey, +// queue_index: None, +// }, +// }, +// }; +// token_compressed_accounts.push(token_account.clone()); +// self.token_compressed_accounts.insert(0, token_account); +// } +// } else { +// let compressed_account = CompressedAccountWithMerkleContext { +// compressed_account: compressed_account.compressed_account.clone(), +// merkle_context: MerkleContext { +// leaf_index: event.output_leaf_indices[i], +// merkle_tree_pubkey, +// nullifier_queue_pubkey, +// queue_index: None, +// }, +// }; +// compressed_accounts.push(compressed_account.clone()); +// self.compressed_accounts.insert(0, compressed_account); +// } +// } +// None => { +// let compressed_account = CompressedAccountWithMerkleContext { +// compressed_account: compressed_account.compressed_account.clone(), +// merkle_context: MerkleContext { +// leaf_index: event.output_leaf_indices[i], +// merkle_tree_pubkey, +// nullifier_queue_pubkey, +// queue_index: None, +// }, +// }; +// compressed_accounts.push(compressed_account.clone()); +// self.compressed_accounts.insert(0, compressed_account); +// } +// }; +// let seq = event +// .sequence_numbers +// .iter() +// .find(|x| x.pubkey == merkle_tree_pubkey); +// let seq = if let Some(seq) = seq { +// seq +// } else { +// event +// .sequence_numbers +// .iter() +// .find(|x| x.pubkey == nullifier_queue_pubkey) +// .unwrap() +// }; +// let is_batched = seq.seq == u64::MAX; +// +// println!("Output is batched {:?}", is_batched); +// if !is_batched { +// let merkle_tree = &mut self +// .state_merkle_trees +// .iter_mut() +// .find(|x| { +// x.accounts.merkle_tree +// == event.pubkey_array +// [event.output_compressed_accounts[i].merkle_tree_index as usize] +// }) +// .unwrap(); +// merkle_tree +// .merkle_tree +// .append( +// &compressed_account +// .compressed_account +// .hash::( +// &event.pubkey_array[event.output_compressed_accounts[i] +// .merkle_tree_index +// as usize], +// &event.output_leaf_indices[i], +// ) +// .unwrap(), +// ) +// .expect("insert failed"); +// } else { +// let merkle_tree = &mut self +// .state_merkle_trees +// .iter_mut() +// .find(|x| { +// x.accounts.nullifier_queue +// == event.pubkey_array +// [event.output_compressed_accounts[i].merkle_tree_index as usize] +// }) +// .unwrap(); +// +// merkle_tree +// .output_queue_elements +// .push(event.output_compressed_account_hashes[i]); +// } +// } +// println!("new addresses {:?}", new_addresses); +// println!("event.pubkey_array {:?}", event.pubkey_array); +// println!( +// "address merkle trees {:?}", +// self.address_merkle_trees +// .iter() +// .map(|x| x.accounts.merkle_tree) +// .collect::>() +// ); +// // checks whether there are addresses in outputs which don't exist in inputs. +// // if so check pubkey_array for the first address Merkle tree and append to the bundles queue elements. +// // Note: +// // - creating addresses in multiple address Merkle trees in one tx is not supported +// // TODO: reimplement this is not a good solution +// // - take addresses and address Merkle tree pubkeys from cpi to account compression program +// if !new_addresses.is_empty() { +// for pubkey in event.pubkey_array.iter() { +// if let Some((_, address_merkle_tree)) = self +// .address_merkle_trees +// .iter_mut() +// .enumerate() +// .find(|(i, x)| x.accounts.merkle_tree == *pubkey) +// { +// address_merkle_tree +// .queue_elements +// .append(&mut new_addresses); +// } +// } +// } +// } +// +// pub(crate) fn get_address_merkle_tree( +// &self, +// merkle_tree_pubkey: Pubkey, +// ) -> Option<&AddressMerkleTreeBundle> { +// self.address_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) +// } +// } diff --git a/program-tests/utils/src/lib.rs b/program-tests/utils/src/lib.rs index e02de430e..46ac8e2fe 100644 --- a/program-tests/utils/src/lib.rs +++ b/program-tests/utils/src/lib.rs @@ -29,12 +29,13 @@ pub mod state_tree_rollover; pub mod system_program; #[allow(unused)] pub mod test_forester; +mod conversions; + pub use create_address_test_program::ID as CREATE_ADDRESS_TEST_PROGRAM_ID; pub use forester_utils::{ airdrop_lamports, create_account_instruction, forester_epoch::{Epoch, TreeAccounts, TreeType}, get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree, - indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, TokenDataWithContext}, registry::{ create_rollover_address_merkle_tree_instructions, create_rollover_state_merkle_tree_instructions, register_test_forester, diff --git a/program-tests/utils/src/spl.rs b/program-tests/utils/src/spl.rs index 9911f47b3..05ff9bafe 100644 --- a/program-tests/utils/src/spl.rs +++ b/program-tests/utils/src/spl.rs @@ -1,7 +1,6 @@ use anchor_spl::token::{Mint, TokenAccount}; use forester_utils::{ create_account_instruction, - indexer::{Indexer, TokenDataWithContext}, }; use light_client::{ rpc::{errors::RpcError, RpcConnection}, @@ -38,13 +37,16 @@ use solana_sdk::{ signature::{Keypair, Signature, Signer}, }; use spl_token::instruction::initialize_mint; - +use light_client::indexer::Indexer; +use light_program_test::indexer::TestIndexerExtensions; +use light_sdk::token::TokenDataWithMerkleContext; use crate::{ assert_compressed_tx::get_merkle_tree_snapshots, assert_token_tx::{assert_create_mint, assert_mint_to, assert_transfer}, }; +use crate::conversions::{program_to_sdk_public_transaction_event, sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof, sdk_to_program_merkle_context, sdk_to_program_token_data}; -pub async fn mint_tokens_helper>( +pub async fn mint_tokens_helper + TestIndexerExtensions>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, @@ -510,7 +512,7 @@ pub async fn compressed_transfer_test>( } #[allow(clippy::too_many_arguments)] -pub async fn compressed_transfer_22_test>( +pub async fn compressed_transfer_22_test + TestIndexerExtensions>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, @@ -519,7 +521,7 @@ pub async fn compressed_transfer_22_test>( recipients: &[Pubkey], amounts: &[u64], mut lamports: Option>>, - input_compressed_accounts: &[TokenDataWithContext], + input_compressed_accounts: &[TokenDataWithMerkleContext], output_merkle_tree_pubkeys: &[Pubkey], delegate_change_account_index: Option, delegate_is_signer: bool, @@ -622,12 +624,15 @@ pub async fn compressed_transfer_22_test>( &input_merkle_tree_context, &output_compressed_accounts, &rpc_result.root_indices, - &rpc_result.proof, - &input_compressed_account_token_data, // input_token_data + &Some(sdk_to_program_compressed_proof(rpc_result.proof)), + &input_compressed_account_token_data + .into_iter() + .map(sdk_to_program_token_data) + .collect::>(), // input_token_data &input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), *mint, delegate_pubkey, // owner_if_delegate_change_account_index @@ -692,7 +697,7 @@ pub async fn compressed_transfer_22_test>( .unwrap(); let slot = rpc.get_slot().await.unwrap(); let (created_change_output_account, created_token_output_accounts) = - test_indexer.add_event_and_compressed_accounts(slot, &event); + test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event)); let delegates = if let Some(index) = delegate_change_account_index { let mut delegates = vec![None; created_token_output_accounts.len()]; delegates[index as usize] = Some(payer.pubkey()); @@ -711,7 +716,11 @@ pub async fn compressed_transfer_22_test>( rpc, test_indexer, &output_compressed_accounts, - created_output_accounts.as_slice(), + created_output_accounts + .into_iter() + .map(sdk_to_program_compressed_account_with_merkle_context) + .collect::>() + .as_slice(), lamports, &input_compressed_account_hashes, &snapshots, @@ -723,11 +732,11 @@ pub async fn compressed_transfer_22_test>( } #[allow(clippy::too_many_arguments)] -pub async fn decompress_test>( +pub async fn decompress_test + TestIndexerExtensions>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, - input_compressed_accounts: Vec, + input_compressed_accounts: Vec, amount: u64, output_merkle_tree_pubkey: &Pubkey, recipient_token_account: &Pubkey, @@ -775,18 +784,20 @@ pub async fn decompress_test>( &input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect::>(), // input_compressed_account_merkle_tree_pubkeys &[change_out_compressed_account], // output_compressed_accounts &proof_rpc_result.root_indices, // root_indices - &proof_rpc_result.proof, + &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof)), input_compressed_accounts .iter() - .map(|x| x.token_data.clone()) + .map(|x| sdk_to_program_token_data(x.token_data.clone())) .collect::>() .as_slice(), // input_token_data &input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) + .map(|x| sdk_to_program_compressed_account(x.clone())) .cloned() .collect::>(), mint, // mint @@ -858,7 +869,7 @@ pub async fn decompress_test>( .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); assert_transfer( rpc, test_indexer, @@ -866,6 +877,7 @@ pub async fn decompress_test>( created_output_accounts .iter() .map(|x| x.compressed_account.clone()) + .map(sdk_to_program_compressed_account_with_merkle_context) .collect::>() .as_slice(), None, @@ -925,7 +937,7 @@ pub async fn decompress_test>( } #[allow(clippy::too_many_arguments)] -pub async fn perform_compress_spl_token_account>( +pub async fn perform_compress_spl_token_account + TestIndexerExtensions>( rpc: &mut R, test_indexer: &mut I, payer: &Keypair, @@ -965,7 +977,7 @@ pub async fn perform_compress_spl_token_account> .unwrap(); // TODO: replace with get_transaction_slot() this only works with Program test let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_event_and_compressed_accounts(slot, &event); + test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); let created_compressed_token_account = test_indexer.get_compressed_token_accounts_by_owner(&token_owner.pubkey())[0].clone(); let expected_token_data = TokenData { diff --git a/program-tests/utils/src/system_program.rs b/program-tests/utils/src/system_program.rs index a0b8b0d3c..2ee1c636d 100644 --- a/program-tests/utils/src/system_program.rs +++ b/program-tests/utils/src/system_program.rs @@ -1,4 +1,3 @@ -use forester_utils::indexer::Indexer; use light_client::{ rpc::{errors::RpcError, RpcConnection}, transaction_params::TransactionParams, @@ -19,13 +18,14 @@ use solana_sdk::{ pubkey::Pubkey, signature::{Keypair, Signature, Signer}, }; - +use light_client::indexer::Indexer; +use light_program_test::indexer::TestIndexerExtensions; use crate::assert_compressed_tx::{ assert_compressed_transaction, get_merkle_tree_snapshots, AssertCompressedTransactionInputs, }; #[allow(clippy::too_many_arguments)] -pub async fn create_addresses_test>( +pub async fn create_addresses_test + TestIndexerExtensions>( rpc: &mut R, test_indexer: &mut I, address_merkle_tree_pubkeys: &[Pubkey], @@ -292,7 +292,7 @@ pub struct CompressedTransactionTestInputs<'a, R: RpcConnection, I: Indexer> } #[allow(clippy::too_many_arguments)] -pub async fn compressed_transaction_test>( +pub async fn compressed_transaction_test + TestIndexerExtensions>( inputs: CompressedTransactionTestInputs<'_, R, I>, ) -> Result { let mut compressed_account_hashes = Vec::new(); diff --git a/program-tests/utils/src/test_forester.rs b/program-tests/utils/src/test_forester.rs index 4ff9e9246..f32704456 100644 --- a/program-tests/utils/src/test_forester.rs +++ b/program-tests/utils/src/test_forester.rs @@ -7,7 +7,6 @@ use account_compression::{ use anchor_lang::{system_program, InstructionData, ToAccountMetas}; use forester_utils::{ get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree, - indexer::{AddressMerkleTreeBundle, StateMerkleTreeBundle}, }; use light_client::rpc::{errors::RpcError, RpcConnection}; use light_concurrent_merkle_tree::event::MerkleTreeEvent; @@ -31,6 +30,7 @@ use solana_sdk::{ transaction::Transaction, }; use thiserror::Error; +use light_client::indexer::{AddressMerkleTreeBundle, StateMerkleTreeBundle}; // doesn't keep its own Merkle tree but gets it from the indexer // can also get all the state and Address Merkle trees from the indexer // the lightweight version is just a function diff --git a/sdk-libs/client/src/indexer/mod.rs b/sdk-libs/client/src/indexer/mod.rs index 709c9770c..c471c13df 100644 --- a/sdk-libs/client/src/indexer/mod.rs +++ b/sdk-libs/client/src/indexer/mod.rs @@ -1,5 +1,5 @@ -use std::{fmt::Debug, future::Future}; - +use std::fmt::Debug; +use async_trait::async_trait; use light_concurrent_merkle_tree::light_hasher::Poseidon; use light_indexed_merkle_tree::{ array::{IndexedArray, IndexedElement}, @@ -7,13 +7,13 @@ use light_indexed_merkle_tree::{ }; use light_merkle_tree_reference::MerkleTree; use light_sdk::{ - compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent, - proof::ProofRpcResult, token::TokenDataWithMerkleContext, + compressed_account::CompressedAccountWithMerkleContext, + proof::ProofRpcResult }; use num_bigint::BigUint; use solana_sdk::pubkey::Pubkey; use thiserror::Error; - +use light_sdk::token::TokenData; use crate::rpc::RpcConnection; #[derive(Error, Debug)] @@ -30,28 +30,93 @@ pub enum IndexerError { Unknown, } +pub struct ProofOfLeaf { + pub leaf: [u8; 32], + pub proof: Vec<[u8; 32]>, +} + +#[async_trait] pub trait Indexer: Sync + Send + Debug + 'static { - fn add_event_and_compressed_accounts( - &mut self, - event: &PublicTransactionEvent, - ) -> ( - Vec, - Vec, - ); - fn create_proof_for_compressed_accounts( + /// Returns queue elements from the queue with the given pubkey. For input + /// queues account compression program does not store queue elements in the + /// account data but only emits these in the public transaction event. The + /// indexer needs the queue elements to create batch update proofs. + async fn get_queue_elements( + &self, + pubkey: [u8; 32], + batch: u64, + start_offset: u64, + end_offset: u64, + ) -> Result, IndexerError>; + + fn get_subtrees( + &self, + merkle_tree_pubkey: [u8; 32], + ) -> Result, IndexerError>; + + // fn add_event_and_compressed_accounts( + // &mut self, + // slot: u64, + // event: &PublicTransactionEvent, + // ) -> ( + // Vec, + // Vec, + // ); + + async fn create_proof_for_compressed_accounts( &mut self, - compressed_accounts: Option<&[[u8; 32]]>, - state_merkle_tree_pubkeys: Option<&[Pubkey]>, + compressed_accounts: Option>, + state_merkle_tree_pubkeys: Option>, new_addresses: Option<&[[u8; 32]]>, address_merkle_tree_pubkeys: Option>, rpc: &mut R, - ) -> impl Future; + ) -> ProofRpcResult; + + async fn get_multiple_compressed_account_proofs( + &self, + hashes: Vec, + ) -> Result, IndexerError>; - fn get_compressed_accounts_by_owner( + async fn get_compressed_accounts_by_owner( &self, owner: &Pubkey, - ) -> Vec; + ) -> Result, IndexerError>; + + async fn get_multiple_new_address_proofs( + &self, + merkle_tree_pubkey: [u8; 32], + addresses: Vec<[u8; 32]>, + ) -> Result>, IndexerError>; + + async fn get_multiple_new_address_proofs_full( + &self, + merkle_tree_pubkey: [u8; 32], + addresses: Vec<[u8; 32]>, + ) -> Result>, IndexerError>; + + // TODO: remove? + fn get_proofs_by_indices( + &mut self, + merkle_tree_pubkey: Pubkey, + indices: &[u64], + ) -> Vec; + + // TODO: remove? + fn get_leaf_indices_tx_hashes( + &mut self, + merkle_tree_pubkey: Pubkey, + zkp_batch_size: usize, + ) -> Vec<(u32, [u8; 32], [u8; 32])>; + + // TODO: remove? + fn get_address_merkle_trees(&self) -> &Vec; +} + +#[derive(Debug, Clone)] +pub struct TokenDataWithMerkleContext { + pub token_data: TokenData, + pub compressed_account: CompressedAccountWithMerkleContext, } #[derive(Debug, Clone)] @@ -64,8 +129,8 @@ pub struct MerkleProof { } // For consistency with the Photon API. -#[derive(Clone, Default, Debug, PartialEq)] -pub struct NewAddressProofWithContext { +#[derive(Clone, Debug, PartialEq)] +pub struct NewAddressProofWithContext { pub merkle_tree: [u8; 32], pub root: [u8; 32], pub root_seq: u64, @@ -73,7 +138,7 @@ pub struct NewAddressProofWithContext { pub low_address_value: [u8; 32], pub low_address_next_index: u64, pub low_address_next_value: [u8; 32], - pub low_address_proof: [[u8; 32]; 16], + pub low_address_proof: [[u8; 32]; NET_HEIGHT], pub new_low_element: Option>, pub new_element: Option>, pub new_element_next_value: Option, @@ -94,15 +159,129 @@ pub struct AddressMerkleTreeAccounts { #[derive(Debug, Clone)] pub struct StateMerkleTreeBundle { - pub rollover_fee: u64, + pub rollover_fee: i64, pub merkle_tree: Box>, pub accounts: StateMerkleTreeAccounts, + pub version: u64, + pub output_queue_elements: Vec<[u8; 32]>, + /// leaf index, leaf, tx hash + pub input_leaf_indices: Vec<(u32, [u8; 32], [u8; 32])>, } #[derive(Debug, Clone)] pub struct AddressMerkleTreeBundle { - pub rollover_fee: u64, + pub rollover_fee: i64, pub merkle_tree: Box>, pub indexed_array: Box>, pub accounts: AddressMerkleTreeAccounts, + pub queue_elements: Vec<[u8; 32]>, } + +// use std::{fmt::Debug, future::Future}; +// +// use light_concurrent_merkle_tree::light_hasher::Poseidon; +// use light_indexed_merkle_tree::{ +// array::{IndexedArray, IndexedElement}, +// reference::IndexedMerkleTree, +// }; +// use light_merkle_tree_reference::MerkleTree; +// use light_sdk::{ +// compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent, +// proof::ProofRpcResult, token::TokenDataWithMerkleContext, +// }; +// use num_bigint::BigUint; +// use solana_sdk::pubkey::Pubkey; +// use thiserror::Error; +// +// use crate::rpc::RpcConnection; +// +// #[derive(Error, Debug)] +// pub enum IndexerError { +// #[error("RPC Error: {0}")] +// RpcError(#[from] solana_client::client_error::ClientError), +// #[error("failed to deserialize account data")] +// DeserializeError(#[from] solana_sdk::program_error::ProgramError), +// #[error("failed to copy merkle tree")] +// CopyMerkleTreeError(#[from] std::io::Error), +// #[error("error: {0:?}")] +// Custom(String), +// #[error("unknown error")] +// Unknown, +// } +// +// pub trait Indexer: Sync + Send + Debug + 'static { +// fn add_event_and_compressed_accounts( +// &mut self, +// event: &PublicTransactionEvent, +// ) -> ( +// Vec, +// Vec, +// ); +// +// fn create_proof_for_compressed_accounts( +// &mut self, +// compressed_accounts: Option<&[[u8; 32]]>, +// state_merkle_tree_pubkeys: Option<&[Pubkey]>, +// new_addresses: Option<&[[u8; 32]]>, +// address_merkle_tree_pubkeys: Option>, +// rpc: &mut R, +// ) -> impl Future; +// +// fn get_compressed_accounts_by_owner( +// &self, +// owner: &Pubkey, +// ) -> Vec; +// } +// +// #[derive(Debug, Clone)] +// pub struct MerkleProof { +// pub hash: String, +// pub leaf_index: u64, +// pub merkle_tree: String, +// pub proof: Vec<[u8; 32]>, +// pub root_seq: u64, +// } +// +// // For consistency with the Photon API. +// #[derive(Clone, Default, Debug, PartialEq)] +// pub struct NewAddressProofWithContext { +// pub merkle_tree: [u8; 32], +// pub root: [u8; 32], +// pub root_seq: u64, +// pub low_address_index: u64, +// pub low_address_value: [u8; 32], +// pub low_address_next_index: u64, +// pub low_address_next_value: [u8; 32], +// pub low_address_proof: [[u8; 32]; 16], +// pub new_low_element: Option>, +// pub new_element: Option>, +// pub new_element_next_value: Option, +// } +// +// #[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] +// pub struct StateMerkleTreeAccounts { +// pub merkle_tree: Pubkey, +// pub nullifier_queue: Pubkey, +// pub cpi_context: Pubkey, +// } +// +// #[derive(Debug, Clone, Copy)] +// pub struct AddressMerkleTreeAccounts { +// pub merkle_tree: Pubkey, +// pub queue: Pubkey, +// } +// +// #[derive(Debug, Clone)] +// pub struct StateMerkleTreeBundle { +// pub rollover_fee: u64, +// pub merkle_tree: Box>, +// pub accounts: StateMerkleTreeAccounts, +// } +// +// #[derive(Debug, Clone)] +// pub struct AddressMerkleTreeBundle { +// pub rollover_fee: u64, +// pub merkle_tree: Box>, +// pub indexed_array: Box>, +// pub accounts: AddressMerkleTreeAccounts, +// } diff --git a/sdk-libs/client/src/photon_rpc/photon_client.rs b/sdk-libs/client/src/photon_rpc/photon_client.rs index 023b453c4..65620a3e0 100644 --- a/sdk-libs/client/src/photon_rpc/photon_client.rs +++ b/sdk-libs/client/src/photon_rpc/photon_client.rs @@ -114,7 +114,7 @@ impl PhotonClient { &self, merkle_tree_pubkey: Pubkey, addresses: Vec
, - ) -> Result, PhotonClientError> { + ) -> Result>, PhotonClientError> { let params: Vec = addresses .iter() .map(|x| photon_api::models::AddressWithTree { @@ -141,7 +141,7 @@ impl PhotonClient { } let photon_proofs = result.unwrap().result.unwrap().value; - let mut proofs: Vec = Vec::new(); + let mut proofs: Vec> = Vec::new(); for photon_proof in photon_proofs { let tree_pubkey = Hash::from_base58(&photon_proof.merkle_tree).unwrap(); let low_address_value = Hash::from_base58(&photon_proof.lower_range_address).unwrap(); diff --git a/sdk-libs/program-test/Cargo.toml b/sdk-libs/program-test/Cargo.toml index 597e77027..a9ec2ab93 100644 --- a/sdk-libs/program-test/Cargo.toml +++ b/sdk-libs/program-test/Cargo.toml @@ -13,6 +13,7 @@ light-prover-client = { workspace = true } light-sdk = { workspace = true } light-indexed-merkle-tree = { workspace = true } light-merkle-tree-reference = { workspace = true } +light-merkle-tree-metadata = { workspace = true } light-hasher = { workspace = true } light-registry = { workspace = true } light-system-program = { workspace = true } diff --git a/sdk-libs/program-test/src/indexer/extensions.rs b/sdk-libs/program-test/src/indexer/extensions.rs new file mode 100644 index 000000000..bbe947ae3 --- /dev/null +++ b/sdk-libs/program-test/src/indexer/extensions.rs @@ -0,0 +1,108 @@ +use async_trait::async_trait; +use account_compression::initialize_address_merkle_tree::Pubkey; +use light_client::indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, NewAddressProofWithContext, ProofOfLeaf, StateMerkleTreeAccounts, StateMerkleTreeBundle}; +use light_client::rpc::RpcConnection; +use light_sdk::compressed_account::CompressedAccountWithMerkleContext; +use light_sdk::event::PublicTransactionEvent; +use light_sdk::proof::{BatchedTreeProofRpcResult}; +use light_sdk::token::TokenDataWithMerkleContext; +use solana_sdk::signature::Keypair; + +#[async_trait] +pub trait TestIndexerExtensions: Indexer { + fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str); + + fn address_tree_updated( + &mut self, + merkle_tree_pubkey: Pubkey, + context: &NewAddressProofWithContext<16>, + ); + + fn get_state_merkle_tree_accounts(&self, pubkeys: &[Pubkey]) -> Vec; + + fn get_state_merkle_trees(&self) -> &Vec; + + fn get_state_merkle_trees_mut(&mut self) -> &mut Vec; + + // fn get_address_merkle_trees(&self) -> &Vec; + + fn get_address_merkle_trees_mut(&mut self) -> &mut Vec; + + fn get_token_compressed_accounts(&self) -> &Vec; + + fn get_payer(&self) -> &Keypair; + + fn get_group_pda(&self) -> &Pubkey; + + async fn create_proof_for_compressed_accounts2( + &mut self, + compressed_accounts: Option>, + state_merkle_tree_pubkeys: Option>, + new_addresses: Option<&[[u8; 32]]>, + address_merkle_tree_pubkeys: Option>, + rpc: &mut R, + ) -> BatchedTreeProofRpcResult; + + fn add_address_merkle_tree_accounts( + &mut self, + merkle_tree_keypair: &Keypair, + queue_keypair: &Keypair, + owning_program_id: Option, + ) -> AddressMerkleTreeAccounts; + + fn get_compressed_accounts_with_merkle_context_by_owner( + &self, + owner: &Pubkey, + ) -> Vec; + + fn get_compressed_token_accounts_by_owner( + &self, + owner: &Pubkey, + ) -> Vec; + + fn add_state_bundle(&mut self, state_bundle: StateMerkleTreeBundle); + + fn add_event_and_compressed_accounts( + &mut self, + slot: u64, + event: &PublicTransactionEvent, + ) -> ( + Vec, + Vec, + ); + + fn get_proof_by_index(&mut self, merkle_tree_pubkey: Pubkey, index: u64) -> ProofOfLeaf; + + // fn get_proofs_by_indices( + // &mut self, + // merkle_tree_pubkey: Pubkey, + // indices: &[u64], + // ) -> Vec; + // + // fn get_leaf_indices_tx_hashes( + // &mut self, + // merkle_tree_pubkey: Pubkey, + // zkp_batch_size: usize, + // ) -> Vec<(u32, [u8; 32], [u8; 32])>; + + async fn update_test_indexer_after_append( + &mut self, + rpc: &mut R, + merkle_tree_pubkey: Pubkey, + output_queue_pubkey: Pubkey, + num_inserted_zkps: u64, + ); + + async fn update_test_indexer_after_nullification( + &mut self, + rpc: &mut R, + merkle_tree_pubkey: Pubkey, + batch_index: usize, + ); + + async fn finalize_batched_address_tree_update( + &mut self, + rpc: &mut R, + merkle_tree_pubkey: Pubkey, + ); +} \ No newline at end of file diff --git a/sdk-libs/program-test/src/indexer/mod.rs b/sdk-libs/program-test/src/indexer/mod.rs new file mode 100644 index 000000000..d3b01fff0 --- /dev/null +++ b/sdk-libs/program-test/src/indexer/mod.rs @@ -0,0 +1,6 @@ +mod extensions; +mod test_indexer; +mod utils; + +pub use extensions::TestIndexerExtensions; +pub use test_indexer::TestIndexer; diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs new file mode 100644 index 000000000..dcb4d84d3 --- /dev/null +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -0,0 +1,1887 @@ +use crate::indexer::TestIndexerExtensions; +use crate::test_env::{create_state_merkle_tree_and_queue_account, EnvAccounts, BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR}; +use account_compression::{AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeAccount, StateMerkleTreeConfig}; +use async_trait::async_trait; +use forester_utils::{get_concurrent_merkle_tree, get_indexed_merkle_tree, AccountZeroCopy}; +use light_batched_merkle_tree::{ + constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, + merkle_tree::{BatchedMerkleTreeAccount}, +}; +use light_client::indexer::{IndexerError, MerkleProof, NewAddressProofWithContext, ProofOfLeaf}; +use light_client::{ + indexer::{ + AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, + StateMerkleTreeBundle, + }, + rpc::{merkle_tree::MerkleTreeExt, RpcConnection}, + transaction_params::FeeConfig, +}; +use light_hasher::{Hasher, Poseidon}; +use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}; +use light_merkle_tree_reference::MerkleTree; +use light_prover_client::inclusion_legacy::merkle_inclusion_proof_inputs::InclusionProofInputs as InclusionProofInputsLegacy; +use light_prover_client::{ + gnark::helpers::{big_int_to_string, spawn_prover, string_to_big_int, ProofType, ProverConfig}, + helpers::bigint_to_u8_32, +}; +use light_prover_client::{ + gnark::inclusion_json_formatter_legacy::BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy, + inclusion::merkle_inclusion_proof_inputs::InclusionProofInputs, +}; +use light_prover_client::{ + gnark::{ + combined_json_formatter::CombinedJsonStruct, + combined_json_formatter_legacy::CombinedJsonStruct as CombinedJsonStructLegacy, + constants::{PROVE_PATH, SERVER_ADDRESS}, + inclusion_json_formatter::BatchInclusionJsonStruct, + non_inclusion_json_formatter::BatchNonInclusionJsonStruct, + non_inclusion_json_formatter_legacy::BatchNonInclusionJsonStruct as BatchNonInclusionJsonStructLegacy, + proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, + }, + inclusion::merkle_inclusion_proof_inputs::InclusionMerkleProofInputs, + non_inclusion::merkle_non_inclusion_proof_inputs::{ + get_non_inclusion_proof_inputs, NonInclusionProofInputs, + }, + non_inclusion_legacy::merkle_non_inclusion_proof_inputs::NonInclusionProofInputs as NonInclusionProofInputsLegacy, +}; +use light_sdk::{ + compressed_account::CompressedAccountWithMerkleContext, + event::PublicTransactionEvent, + merkle_context::MerkleContext, + proof::{CompressedProof, ProofRpcResult}, + token::{TokenData, TokenDataWithMerkleContext}, + STATE_MERKLE_TREE_CANOPY_DEPTH, +}; +use light_utils::bigint::bigint_to_be_bytes_array; +use light_utils::hashchain::{create_hash_chain_from_slice, create_tx_hash}; +use log::{info, warn}; +use num_bigint::{BigInt, BigUint}; +use num_traits::FromBytes; +use reqwest::Client; +use solana_sdk::bs58; +use solana_sdk::pubkey::Pubkey; +use solana_sdk::signature::{Keypair, Signer}; +use std::{marker::PhantomData, time::Duration}; +use borsh::BorshDeserialize; +use light_batched_merkle_tree::batch::BatchState; +use light_batched_merkle_tree::initialize_address_tree::InitAddressTreeAccountsInstructionData; +use light_batched_merkle_tree::initialize_state_tree::InitStateTreeAccountsInstructionData; +use light_batched_merkle_tree::queue::{BatchedQueueAccount, BatchedQueueMetadata}; +use light_sdk::proof::BatchedTreeProofRpcResult; +use crate::indexer::utils::create_address_merkle_tree_and_queue_account_with_assert; +use crate::test_batch_forester::{create_batch_address_merkle_tree, create_batched_state_merkle_tree}; + +#[derive(Debug)] +pub struct TestIndexer +where + R: RpcConnection + MerkleTreeExt, +{ + pub state_merkle_trees: Vec, + pub address_merkle_trees: Vec, + pub payer: Keypair, + pub group_pda: Pubkey, + pub compressed_accounts: Vec, + pub nullified_compressed_accounts: Vec, + pub token_compressed_accounts: Vec, + pub token_nullified_compressed_accounts: Vec, + pub events: Vec, + pub prover_config: Option, + phantom: PhantomData, +} + +#[async_trait] +impl Indexer for TestIndexer +where + R: RpcConnection + MerkleTreeExt, +{ + async fn get_queue_elements( + &self, + pubkey: [u8; 32], + _batch: u64, + start_offset: u64, + end_offset: u64, + ) -> Result, IndexerError> { + let pubkey = Pubkey::new_from_array(pubkey); + let address_tree_bundle = self + .address_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == pubkey); + if let Some(address_tree_bundle) = address_tree_bundle { + return Ok(address_tree_bundle.queue_elements + [start_offset as usize..end_offset as usize] + .to_vec()); + } + let state_tree_bundle = self + .state_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == pubkey); + if let Some(state_tree_bundle) = state_tree_bundle { + return Ok(state_tree_bundle.output_queue_elements + [start_offset as usize..end_offset as usize] + .to_vec()); + } + Err(IndexerError::Custom("Merkle tree not found".to_string())) + } + + fn get_subtrees( + &self, + merkle_tree_pubkey: [u8; 32], + ) -> Result, IndexerError> { + let merkle_tree_pubkey = Pubkey::new_from_array(merkle_tree_pubkey); + let address_tree_bundle = self + .address_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); + if let Some(address_tree_bundle) = address_tree_bundle { + Ok(address_tree_bundle.merkle_tree.merkle_tree.get_subtrees()) + } else { + let state_tree_bundle = self + .state_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); + if let Some(state_tree_bundle) = state_tree_bundle { + Ok(state_tree_bundle.merkle_tree.get_subtrees()) + } else { + Err(IndexerError::Custom("Merkle tree not found".to_string())) + } + } + } + + // fn add_event_and_compressed_accounts( + // &mut self, + // event: &PublicTransactionEvent, + // ) -> ( + // Vec, + // Vec, + // ) { + // for hash in event.input_compressed_account_hashes.iter() { + // let index = self.compressed_accounts.iter().position(|x| { + // x.compressed_account + // .hash::( + // &x.merkle_context.merkle_tree_pubkey, + // &x.merkle_context.leaf_index, + // ) + // .unwrap() + // == *hash + // }); + // if let Some(index) = index { + // self.nullified_compressed_accounts + // .push(self.compressed_accounts[index].clone()); + // self.compressed_accounts.remove(index); + // continue; + // }; + // if index.is_none() { + // let index = self + // .token_compressed_accounts + // .iter() + // .position(|x| { + // x.compressed_account + // .compressed_account + // .hash::( + // &x.compressed_account.merkle_context.merkle_tree_pubkey, + // &x.compressed_account.merkle_context.leaf_index, + // ) + // .unwrap() + // == *hash + // }) + // .expect("input compressed account not found"); + // self.token_nullified_compressed_accounts + // .push(self.token_compressed_accounts[index].clone()); + // self.token_compressed_accounts.remove(index); + // } + // } + // + // let mut compressed_accounts = Vec::new(); + // let mut token_compressed_accounts = Vec::new(); + // for (i, compressed_account) in event.output_compressed_accounts.iter().enumerate() { + // let nullifier_queue_pubkey = self + // .state_merkle_trees + // .iter() + // .find(|x| { + // x.accounts.merkle_tree + // == event.pubkey_array + // [event.output_compressed_accounts[i].merkle_tree_index as usize] + // }) + // .unwrap() + // .accounts + // .nullifier_queue; + // // if data is some, try to deserialize token data, if it fails, add to compressed_accounts + // // if data is none add to compressed_accounts + // // new accounts are inserted in front so that the newest accounts are found first + // match compressed_account.compressed_account.data.as_ref() { + // Some(data) => { + // if compressed_account.compressed_account.owner == PROGRAM_ID_LIGHT_SYSTEM + // && data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR + // { + // if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { + // let token_account = TokenDataWithMerkleContext { + // token_data, + // compressed_account: CompressedAccountWithMerkleContext { + // compressed_account: compressed_account + // .compressed_account + // .clone(), + // merkle_context: MerkleContext { + // leaf_index: event.output_leaf_indices[i], + // merkle_tree_pubkey: event.pubkey_array[event + // .output_compressed_accounts[i] + // .merkle_tree_index + // as usize], + // nullifier_queue_pubkey, + // queue_index: None, + // }, + // }, + // }; + // token_compressed_accounts.push(token_account.clone()); + // self.token_compressed_accounts.insert(0, token_account); + // } + // } else { + // let compressed_account = CompressedAccountWithMerkleContext { + // compressed_account: compressed_account.compressed_account.clone(), + // merkle_context: MerkleContext { + // leaf_index: event.output_leaf_indices[i], + // merkle_tree_pubkey: event.pubkey_array[event + // .output_compressed_accounts[i] + // .merkle_tree_index + // as usize], + // nullifier_queue_pubkey, + // queue_index: None, + // }, + // }; + // compressed_accounts.push(compressed_account.clone()); + // self.compressed_accounts.insert(0, compressed_account); + // } + // } + // None => { + // let compressed_account = CompressedAccountWithMerkleContext { + // compressed_account: compressed_account.compressed_account.clone(), + // merkle_context: MerkleContext { + // leaf_index: event.output_leaf_indices[i], + // merkle_tree_pubkey: event.pubkey_array + // [event.output_compressed_accounts[i].merkle_tree_index as usize], + // nullifier_queue_pubkey, + // queue_index: None, + // }, + // }; + // compressed_accounts.push(compressed_account.clone()); + // self.compressed_accounts.insert(0, compressed_account); + // } + // }; + // let merkle_tree = &mut self + // .state_merkle_trees + // .iter_mut() + // .find(|x| { + // x.accounts.merkle_tree + // == event.pubkey_array + // [event.output_compressed_accounts[i].merkle_tree_index as usize] + // }) + // .unwrap() + // .merkle_tree; + // merkle_tree + // .append( + // &compressed_account + // .compressed_account + // .hash::( + // &event.pubkey_array + // [event.output_compressed_accounts[i].merkle_tree_index as usize], + // &event.output_leaf_indices[i], + // ) + // .unwrap(), + // ) + // .expect("insert failed"); + // } + // + // self.events.push(event.clone()); + // (compressed_accounts, token_compressed_accounts) + // } + + + async fn create_proof_for_compressed_accounts( + &mut self, + compressed_accounts: Option>, + state_merkle_tree_pubkeys: Option>, + new_addresses: Option<&[[u8; 32]]>, + address_merkle_tree_pubkeys: Option>, + rpc: &mut R, + ) -> ProofRpcResult { + if compressed_accounts.is_some() + && ![1usize, 2usize, 3usize, 4usize, 8usize] + .contains(&compressed_accounts.as_ref().unwrap().len()) + { + panic!( + "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}", + compressed_accounts.unwrap().len() + ) + } + if new_addresses.is_some() && ![1usize, 2usize].contains(&new_addresses.unwrap().len()) { + panic!("new_addresses must be of length 1, 2") + } + let client = Client::new(); + let (root_indices, address_root_indices, json_payload) = + match (compressed_accounts, new_addresses) { + (Some(accounts), None) => { + let (payload, payload_legacy, indices) = self + .process_inclusion_proofs( + &state_merkle_tree_pubkeys.unwrap(), + &accounts, + rpc, + ) + .await; + if let Some(payload) = payload { + (indices, Vec::new(), payload.to_string()) + } else { + (indices, Vec::new(), payload_legacy.unwrap().to_string()) + } + } + (None, Some(addresses)) => { + let (payload, payload_legacy, indices) = self + .process_non_inclusion_proofs( + address_merkle_tree_pubkeys.unwrap().as_slice(), + addresses, + rpc, + ) + .await; + let payload_string = if let Some(payload) = payload { + payload.to_string() + } else { + payload_legacy.unwrap().to_string() + }; + (Vec::::new(), indices, payload_string) + } + (Some(accounts), Some(addresses)) => { + let (inclusion_payload, inclusion_payload_legacy, inclusion_indices) = self + .process_inclusion_proofs( + &state_merkle_tree_pubkeys.unwrap(), + &accounts, + rpc, + ) + .await; + + let ( + non_inclusion_payload, + non_inclusion_payload_legacy, + non_inclusion_indices, + ) = self + .process_non_inclusion_proofs( + address_merkle_tree_pubkeys.unwrap().as_slice(), + addresses, + rpc, + ) + .await; + let json_payload = if let Some(non_inclusion_payload) = non_inclusion_payload { + let public_input_hash = BigInt::from_bytes_be( + num_bigint::Sign::Plus, + &create_hash_chain_from_slice(&[ + bigint_to_u8_32( + &string_to_big_int( + &inclusion_payload.as_ref().unwrap().public_input_hash, + ) + .unwrap(), + ) + .unwrap(), + bigint_to_u8_32( + &string_to_big_int(&non_inclusion_payload.public_input_hash) + .unwrap(), + ) + .unwrap(), + ]) + .unwrap(), + ); + println!( + "inclusion public input hash offchain {:?}", + bigint_to_u8_32( + &string_to_big_int( + &inclusion_payload.as_ref().unwrap().public_input_hash, + ) + .unwrap(), + ) + .unwrap() + ); + println!( + "non inclusion public input hash offchain {:?}", + bigint_to_u8_32( + &string_to_big_int(&non_inclusion_payload.public_input_hash) + .unwrap() + ) + .unwrap() + ); + + println!( + "public input hash offchain {:?}", + public_input_hash.to_bytes_be() + ); + + CombinedJsonStruct { + circuit_type: ProofType::Combined.to_string(), + state_tree_height: DEFAULT_BATCH_STATE_TREE_HEIGHT, + address_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, + public_input_hash: big_int_to_string(&public_input_hash), + inclusion: inclusion_payload.unwrap().inputs, + non_inclusion: non_inclusion_payload.inputs, + } + .to_string() + } else if let Some(non_inclusion_payload) = non_inclusion_payload_legacy { + CombinedJsonStructLegacy { + circuit_type: ProofType::Combined.to_string(), + state_tree_height: 26, + address_tree_height: 26, + inclusion: inclusion_payload_legacy.unwrap().inputs, + non_inclusion: non_inclusion_payload.inputs, + } + .to_string() + } else { + panic!("Unsupported tree height") + }; + (inclusion_indices, non_inclusion_indices, json_payload) + } + _ => { + panic!("At least one of compressed_accounts or new_addresses must be provided") + } + }; + + println!("json_payload {:?}", json_payload); + let mut retries = 3; + while retries > 0 { + let response_result = client + .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) + .header("Content-Type", "text/plain; charset=utf-8") + .body(json_payload.clone()) + .send() + .await + .expect("Failed to execute request."); + println!("response_result {:?}", response_result); + if response_result.status().is_success() { + let body = response_result.text().await.unwrap(); + println!("body {:?}", body); + println!("root_indices {:?}", root_indices); + println!("address_root_indices {:?}", address_root_indices); + let proof_json = deserialize_gnark_proof_json(&body).unwrap(); + let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); + let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); + let root_indices = root_indices.iter().map(|x| Some(*x)).collect(); + return ProofRpcResult { + root_indices, + address_root_indices: address_root_indices.clone(), + proof: CompressedProof { + a: proof_a, + b: proof_b, + c: proof_c, + }, + }; + } else { + warn!("Error: {}", response_result.text().await.unwrap()); + tokio::time::sleep(Duration::from_secs(1)).await; + retries -= 1; + } + } + panic!("Failed to get proof from server"); + } + + async fn get_multiple_compressed_account_proofs( + &self, + hashes: Vec, + ) -> Result, IndexerError> { + info!("Getting proofs for {:?}", hashes); + let mut proofs: Vec = Vec::new(); + hashes.iter().for_each(|hash| { + let hash_array: [u8; 32] = bs58::decode(hash) + .into_vec() + .unwrap() + .as_slice() + .try_into() + .unwrap(); + + self.state_merkle_trees.iter().for_each(|tree| { + if let Some(leaf_index) = tree.merkle_tree.get_leaf_index(&hash_array) { + let proof = tree + .merkle_tree + .get_proof_of_leaf(leaf_index, false) + .unwrap(); + proofs.push(MerkleProof { + hash: hash.clone(), + leaf_index: leaf_index as u64, + merkle_tree: tree.accounts.merkle_tree.to_string(), + proof: proof.to_vec(), + root_seq: tree.merkle_tree.sequence_number as u64, + }); + } + }) + }); + Ok(proofs) + } + + /// Returns compressed accounts owned by the given `owner`. + // fn get_compressed_accounts_by_owner( + // &self, + // owner: &Pubkey, + // ) -> Vec { + // self.compressed_accounts + // .iter() + // .filter(|x| x.compressed_account.owner == *owner) + // .cloned() + // .collect() + // } + async fn get_compressed_accounts_by_owner( + &self, + owner: &Pubkey, + ) -> Result, IndexerError> { + let result = self.get_compressed_accounts_with_merkle_context_by_owner(owner); + let mut hashes: Vec = Vec::new(); + for account in result.iter() { + let hash = account.hash().unwrap(); + let bs58_hash = bs58::encode(hash).into_string(); + hashes.push(bs58_hash); + } + Ok(hashes) + } + + async fn get_multiple_new_address_proofs( + &self, + merkle_tree_pubkey: [u8; 32], + addresses: Vec<[u8; 32]>, + ) -> Result>, IndexerError> { + self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, false) + .await + } + + async fn get_multiple_new_address_proofs_full( + &self, + merkle_tree_pubkey: [u8; 32], + addresses: Vec<[u8; 32]>, + ) -> Result>, IndexerError> { + self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, true) + .await + } + + + fn get_proofs_by_indices(&mut self, merkle_tree_pubkey: Pubkey, indices: &[u64]) -> Vec { + indices + .iter() + .map(|&index| self.get_proof_by_index(merkle_tree_pubkey, index)) + .collect() + } + + fn get_leaf_indices_tx_hashes(&mut self, merkle_tree_pubkey: Pubkey, zkp_batch_size: usize) -> Vec<(u32, [u8; 32], [u8; 32])> { + let state_merkle_tree_bundle = self + .state_merkle_trees + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + .unwrap(); + + state_merkle_tree_bundle.input_leaf_indices[..zkp_batch_size].to_vec() + } + + fn get_address_merkle_trees(&self) -> &Vec { + &self.address_merkle_trees + } + +} + +#[async_trait] +impl TestIndexerExtensions for TestIndexer +where + R: RpcConnection + MerkleTreeExt, +{ + fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) { + let decoded_hash: [u8; 32] = bs58::decode(account_hash) + .into_vec() + .unwrap() + .as_slice() + .try_into() + .unwrap(); + + if let Some(state_tree_bundle) = self + .state_merkle_trees + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + { + if let Some(leaf_index) = state_tree_bundle.merkle_tree.get_leaf_index(&decoded_hash) { + state_tree_bundle + .merkle_tree + .update(&[0u8; 32], leaf_index) + .unwrap(); + } + } + } + + fn address_tree_updated( + &mut self, + merkle_tree_pubkey: Pubkey, + context: &NewAddressProofWithContext<16>, + ) { + info!("Updating address tree..."); + let address_tree_bundle: &mut AddressMerkleTreeBundle = self + .address_merkle_trees + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + .unwrap(); + + let new_low_element = context.new_low_element.clone().unwrap(); + let new_element = context.new_element.clone().unwrap(); + let new_element_next_value = context.new_element_next_value.clone().unwrap(); + address_tree_bundle + .merkle_tree + .update(&new_low_element, &new_element, &new_element_next_value) + .unwrap(); + address_tree_bundle + .indexed_array + .append_with_low_element_index(new_low_element.index, &new_element.value) + .unwrap(); + info!("Address tree updated"); + } + + fn get_state_merkle_tree_accounts(&self, pubkeys: &[Pubkey]) -> Vec { + pubkeys + .iter() + .map(|x| { + self.state_merkle_trees + .iter() + .find(|y| y.accounts.merkle_tree == *x || y.accounts.nullifier_queue == *x) + .unwrap() + .accounts + }) + .collect::>() + } + + fn get_state_merkle_trees(&self) -> &Vec { + &self.state_merkle_trees + } + + fn get_state_merkle_trees_mut(&mut self) -> &mut Vec { + &mut self.state_merkle_trees + } + + fn get_address_merkle_trees_mut(&mut self) -> &mut Vec { + &mut self.address_merkle_trees + } + + fn get_token_compressed_accounts(&self) -> &Vec { + &self.token_compressed_accounts + } + + fn get_payer(&self) -> &Keypair { + &self.payer + } + + fn get_group_pda(&self) -> &Pubkey { + &self.group_pda + } + + async fn create_proof_for_compressed_accounts2( + &mut self, + compressed_accounts: Option>, + state_merkle_tree_pubkeys: Option>, + new_addresses: Option<&[[u8; 32]]>, + address_merkle_tree_pubkeys: Option>, + rpc: &mut R + ) -> BatchedTreeProofRpcResult { + let mut indices_to_remove = Vec::new(); + + // for all accounts in batched trees, check whether values are in tree or queue + let (compressed_accounts, state_merkle_tree_pubkeys) = + if let Some((compressed_accounts, state_merkle_tree_pubkeys)) = + compressed_accounts.zip(state_merkle_tree_pubkeys) + { + for (i, (compressed_account, state_merkle_tree_pubkey)) in compressed_accounts + .iter() + .zip(state_merkle_tree_pubkeys.iter()) + .enumerate() + { + let accounts = self.state_merkle_trees.iter().find(|x| { + x.accounts.merkle_tree == *state_merkle_tree_pubkey && x.version == 2 + }); + if let Some(accounts) = accounts { + let output_queue_pubkey = accounts.accounts.nullifier_queue; + let mut queue = + AccountZeroCopy::::new(rpc, output_queue_pubkey) + .await; + let queue_zero_copy = BatchedQueueAccount::output_queue_from_bytes_mut( + queue.account.data.as_mut_slice(), + ) + .unwrap(); + for value_array in queue_zero_copy.value_vecs.iter() { + let index = value_array.iter().position(|x| *x == *compressed_account); + if index.is_some() { + indices_to_remove.push(i); + } + } + } + } + let compress_accounts = compressed_accounts + .iter() + .enumerate() + .filter(|(i, _)| !indices_to_remove.contains(i)) + .map(|(_, x)| *x) + .collect::>(); + let state_merkle_tree_pubkeys = state_merkle_tree_pubkeys + .iter() + .enumerate() + .filter(|(i, _)| !indices_to_remove.contains(i)) + .map(|(_, x)| *x) + .collect::>(); + if compress_accounts.is_empty() { + (None, None) + } else { + (Some(compress_accounts), Some(state_merkle_tree_pubkeys)) + } + } else { + (None, None) + }; + let rpc_result = if (compressed_accounts.is_some() + && !compressed_accounts.as_ref().unwrap().is_empty()) + || address_merkle_tree_pubkeys.is_some() + { + Some( + self.create_proof_for_compressed_accounts( + compressed_accounts, + state_merkle_tree_pubkeys, + new_addresses, + address_merkle_tree_pubkeys, + rpc, + ) + .await, + ) + } else { + None + }; + let address_root_indices = if let Some(rpc_result) = rpc_result.as_ref() { + rpc_result.address_root_indices.clone() + } else { + Vec::new() + }; + let root_indices = { + let mut root_indices = if let Some(rpc_result) = rpc_result.as_ref() { + rpc_result.root_indices.clone() + } else { + Vec::new() + }; + for index in indices_to_remove { + root_indices.insert(index, None); + } + root_indices + }; + BatchedTreeProofRpcResult { + proof: rpc_result.map(|x| x.proof), + root_indices, + address_root_indices, + } + } + + fn add_address_merkle_tree_accounts( + &mut self, + merkle_tree_keypair: &Keypair, + queue_keypair: &Keypair, + _owning_program_id: Option, + ) -> AddressMerkleTreeAccounts { + info!("Adding address merkle tree accounts..."); + let address_merkle_tree_accounts = AddressMerkleTreeAccounts { + merkle_tree: merkle_tree_keypair.pubkey(), + queue: queue_keypair.pubkey(), + }; + self.address_merkle_trees + .push(Self::add_address_merkle_tree_bundle( + address_merkle_tree_accounts, + )); + info!( + "Address merkle tree accounts added. Total: {}", + self.address_merkle_trees.len() + ); + address_merkle_tree_accounts + } + + fn get_compressed_accounts_with_merkle_context_by_owner( + &self, + owner: &Pubkey, + ) -> Vec { + self.compressed_accounts + .iter() + .filter(|x| x.compressed_account.owner == *owner) + .cloned() + .collect() + } + + fn get_compressed_token_accounts_by_owner( + &self, + owner: &Pubkey, + ) -> Vec { + self.token_compressed_accounts + .iter() + .filter(|x| x.token_data.owner == *owner) + .cloned() + .collect() + } + + fn add_state_bundle(&mut self, state_bundle: StateMerkleTreeBundle) { + self.get_state_merkle_trees_mut().push(state_bundle); + } + + fn add_event_and_compressed_accounts( + &mut self, + slot: u64, + event: &PublicTransactionEvent, + ) -> ( + Vec, + Vec, + ) { + let mut compressed_accounts = Vec::new(); + let mut token_compressed_accounts = Vec::new(); + let event_inputs_len = event.input_compressed_account_hashes.len(); + let event_outputs_len = event.output_compressed_account_hashes.len(); + for i in 0..std::cmp::max(event_inputs_len, event_outputs_len) { + self.process_v1_compressed_account( + slot, + event, + i, + &mut token_compressed_accounts, + &mut compressed_accounts, + ); + } + + self.events.push(event.clone()); + (compressed_accounts, token_compressed_accounts) + } + + fn get_proof_by_index(&mut self, merkle_tree_pubkey: Pubkey, index: u64) -> ProofOfLeaf { + let mut bundle = self + .state_merkle_trees + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + .unwrap(); + + while bundle.merkle_tree.leaves().len() <= index as usize { + bundle.merkle_tree.append(&[0u8; 32]).unwrap(); + } + + let leaf = match bundle.merkle_tree.get_leaf(index as usize) { + Ok(leaf) => leaf, + Err(_) => { + bundle.merkle_tree.append(&[0u8; 32]).unwrap(); + bundle.merkle_tree.get_leaf(index as usize).unwrap() + } + }; + + let proof = bundle + .merkle_tree + .get_proof_of_leaf(index as usize, true) + .unwrap() + .to_vec(); + + ProofOfLeaf { leaf, proof } + } + + + async fn update_test_indexer_after_append(&mut self, rpc: &mut R, merkle_tree_pubkey: Pubkey, output_queue_pubkey: Pubkey, num_inserted_zkps: u64) { + let state_merkle_tree_bundle = self + .state_merkle_trees + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + .unwrap(); + + let (merkle_tree_next_index, root) = { + let mut merkle_tree_account = + rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); + let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( + merkle_tree_account.data.as_mut_slice(), + ) + .unwrap(); + ( + merkle_tree.get_metadata().next_index as usize, + *merkle_tree.root_history.last().unwrap(), + ) + }; + + let (max_num_zkp_updates, zkp_batch_size) = { + let mut output_queue_account = + rpc.get_account(output_queue_pubkey).await.unwrap().unwrap(); + let output_queue = BatchedQueueAccount::output_queue_from_bytes_mut( + output_queue_account.data.as_mut_slice(), + ) + .unwrap(); + + let output_queue_account = output_queue.get_metadata(); + let max_num_zkp_updates = output_queue_account.batch_metadata.get_num_zkp_batches(); + let zkp_batch_size = output_queue_account.batch_metadata.zkp_batch_size; + (max_num_zkp_updates, zkp_batch_size) + }; + + let leaves = state_merkle_tree_bundle.output_queue_elements.to_vec(); + + let start = (num_inserted_zkps as usize) * zkp_batch_size as usize; + let end = start + zkp_batch_size as usize; + let batch_update_leaves = leaves[start..end].to_vec(); + + for (i, _) in batch_update_leaves.iter().enumerate() { + // if leaves[i] == [0u8; 32] { + let index = merkle_tree_next_index + i - zkp_batch_size as usize; + // This is dangerous it should call self.get_leaf_by_index() but it + // can t for mutable borrow + // TODO: call a get_leaf_by_index equivalent, we could move the method to the reference merkle tree + let leaf = state_merkle_tree_bundle + .merkle_tree + .get_leaf(index) + .unwrap(); + if leaf == [0u8; 32] { + state_merkle_tree_bundle + .merkle_tree + .update(&batch_update_leaves[i], index) + .unwrap(); + } + } + assert_eq!( + root, + state_merkle_tree_bundle.merkle_tree.root(), + "update indexer after append root invalid" + ); + + let num_inserted_zkps = num_inserted_zkps + 1; + // check can we get rid of this and use the data from the merkle tree + if num_inserted_zkps == max_num_zkp_updates { + for _ in 0..zkp_batch_size * max_num_zkp_updates { + state_merkle_tree_bundle.output_queue_elements.remove(0); + } + } + } + + async fn update_test_indexer_after_nullification(&mut self, rpc: &mut R, merkle_tree_pubkey: Pubkey, batch_index: usize) { + let state_merkle_tree_bundle = self + .state_merkle_trees + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + .unwrap(); + + let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); + let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( + merkle_tree_account.data.as_mut_slice(), + ) + .unwrap(); + + let batch = &merkle_tree.batches[batch_index]; + if batch.get_state() == BatchState::Inserted || batch.get_state() == BatchState::Full { + let batch_size = batch.zkp_batch_size; + let leaf_indices_tx_hashes = + state_merkle_tree_bundle.input_leaf_indices[..batch_size as usize].to_vec(); + for (index, leaf, tx_hash) in leaf_indices_tx_hashes.iter() { + let index = *index as usize; + let leaf = *leaf; + let index_bytes = index.to_be_bytes(); + + let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, tx_hash]).unwrap(); + + state_merkle_tree_bundle.input_leaf_indices.remove(0); + state_merkle_tree_bundle + .merkle_tree + .update(&nullifier, index) + .unwrap(); + } + } + } + + async fn finalize_batched_address_tree_update(&mut self, rpc: &mut R, merkle_tree_pubkey: Pubkey) { + let mut account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); + let onchain_account = + BatchedMerkleTreeAccount::address_tree_from_bytes_mut(account.data.as_mut_slice()) + .unwrap(); + let address_tree = self + .address_merkle_trees + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + .unwrap(); + let address_tree_index = address_tree.merkle_tree.merkle_tree.rightmost_index; + let onchain_next_index = onchain_account.get_metadata().next_index; + let diff_onchain_indexer = onchain_next_index - address_tree_index as u64; + let addresses = address_tree.queue_elements[0..diff_onchain_indexer as usize].to_vec(); + + for _ in 0..diff_onchain_indexer { + address_tree.queue_elements.remove(0); + } + for new_element_value in &addresses { + address_tree + .merkle_tree + .append( + &BigUint::from_bytes_be(new_element_value), + &mut address_tree.indexed_array, + ) + .unwrap(); + } + + let onchain_root = onchain_account.root_history.last().unwrap(); + let new_root = address_tree.merkle_tree.root(); + assert_eq!(*onchain_root, new_root); + println!("finalized batched address tree update"); + } +} + +impl TestIndexer +where + R: RpcConnection + MerkleTreeExt, +{ + + pub async fn init_from_env( + payer: &Keypair, + env: &EnvAccounts, + prover_config: Option, + ) -> Self { + Self::new( + vec![ + StateMerkleTreeAccounts { + merkle_tree: env.merkle_tree_pubkey, + nullifier_queue: env.nullifier_queue_pubkey, + cpi_context: env.cpi_context_account_pubkey, + }, + StateMerkleTreeAccounts { + merkle_tree: env.batched_state_merkle_tree, + nullifier_queue: env.batched_output_queue, + cpi_context: env.batched_cpi_context, + }, + ], + vec![ + AddressMerkleTreeAccounts { + merkle_tree: env.address_merkle_tree_pubkey, + queue: env.address_merkle_tree_queue_pubkey, + }, + AddressMerkleTreeAccounts { + merkle_tree: env.batch_address_merkle_tree, + queue: env.batch_address_merkle_tree, + }, + ], + payer.insecure_clone(), + env.group_pda, + prover_config, + ) + .await + } + + pub async fn new( + state_merkle_tree_accounts: Vec, + address_merkle_tree_accounts: Vec, + payer: Keypair, + group_pda: Pubkey, + prover_config: Option, + ) -> Self { + if let Some(ref prover_config) = prover_config { + // TODO: remove restart input and check whether prover is already + // running with correct config + spawn_prover(true, prover_config.clone()).await; + } + let mut state_merkle_trees = Vec::new(); + for state_merkle_tree_account in state_merkle_tree_accounts.iter() { + let test_batched_output_queue = + Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap(); + let (version, merkle_tree) = if state_merkle_tree_account.nullifier_queue + == test_batched_output_queue.pubkey() + { + let merkle_tree = Box::new(MerkleTree::::new( + DEFAULT_BATCH_STATE_TREE_HEIGHT as usize, + 0, + )); + (2, merkle_tree) + } else { + let merkle_tree = Box::new(MerkleTree::::new( + account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize, + account_compression::utils::constants::STATE_MERKLE_TREE_CANOPY_DEPTH as usize, + )); + (1, merkle_tree) + }; + + state_merkle_trees.push(StateMerkleTreeBundle { + accounts: *state_merkle_tree_account, + merkle_tree, + rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64, + version, + output_queue_elements: vec![], + input_leaf_indices: vec![], + }); + } + + let mut address_merkle_trees = Vec::new(); + for address_merkle_tree_account in address_merkle_tree_accounts { + address_merkle_trees.push(Self::add_address_merkle_tree_bundle( + address_merkle_tree_account, + )); + } + + Self { + state_merkle_trees, + address_merkle_trees, + payer, + compressed_accounts: vec![], + nullified_compressed_accounts: vec![], + events: vec![], + token_compressed_accounts: vec![], + token_nullified_compressed_accounts: vec![], + prover_config, + phantom: Default::default(), + group_pda, + } + } + + + pub fn add_address_merkle_tree_bundle( + address_merkle_tree_accounts: AddressMerkleTreeAccounts, + // TODO: add config here + ) -> AddressMerkleTreeBundle { + let (height, canopy) = + if address_merkle_tree_accounts.merkle_tree == address_merkle_tree_accounts.queue { + (40, 0) + } else { + (26, STATE_MERKLE_TREE_CANOPY_DEPTH) + }; + let mut merkle_tree = + Box::new(IndexedMerkleTree::::new(height, canopy).unwrap()); + merkle_tree.init().unwrap(); + let mut indexed_array = Box::>::default(); + indexed_array.init().unwrap(); + AddressMerkleTreeBundle { + merkle_tree, + indexed_array, + accounts: address_merkle_tree_accounts, + rollover_fee: FeeConfig::default().address_queue_rollover as i64, + queue_elements: vec![], + } + } + + async fn add_address_merkle_tree_v1( + &mut self, + rpc: &mut R, + merkle_tree_keypair: &Keypair, + queue_keypair: &Keypair, + owning_program_id: Option, + ) -> AddressMerkleTreeAccounts { + create_address_merkle_tree_and_queue_account_with_assert( + &self.payer, + true, + rpc, + merkle_tree_keypair, + queue_keypair, + owning_program_id, + None, + &AddressMerkleTreeConfig::default(), + &AddressQueueConfig::default(), + 0, + ) + .await + .unwrap(); + self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) + } + + async fn add_address_merkle_tree_v2( + &mut self, + rpc: &mut R, + merkle_tree_keypair: &Keypair, + queue_keypair: &Keypair, + owning_program_id: Option, + ) -> AddressMerkleTreeAccounts { + info!( + "Adding address merkle tree accounts v2 {:?}", + merkle_tree_keypair.pubkey() + ); + + let params = InitAddressTreeAccountsInstructionData::test_default(); + + info!( + "Creating batched address merkle tree {:?}", + merkle_tree_keypair.pubkey() + ); + create_batch_address_merkle_tree(rpc, &self.payer, merkle_tree_keypair, params) + .await + .unwrap(); + info!( + "Batched address merkle tree created {:?}", + merkle_tree_keypair.pubkey() + ); + + self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) + } + + pub async fn add_address_merkle_tree( + &mut self, + rpc: &mut R, + merkle_tree_keypair: &Keypair, + queue_keypair: &Keypair, + owning_program_id: Option, + version: u64, + ) -> AddressMerkleTreeAccounts { + if version == 1 { + self.add_address_merkle_tree_v1( + rpc, + merkle_tree_keypair, + queue_keypair, + owning_program_id, + ) + .await + } else if version == 2 { + self.add_address_merkle_tree_v2( + rpc, + merkle_tree_keypair, + queue_keypair, + owning_program_id, + ) + .await + } else { + panic!( + "add_address_merkle_tree: Version not supported, {}. Versions: 1, 2", + version + ) + } + } + + + #[allow(clippy::too_many_arguments)] + pub async fn add_state_merkle_tree( + &mut self, + rpc: &mut R, + merkle_tree_keypair: &Keypair, + queue_keypair: &Keypair, + cpi_context_keypair: &Keypair, + owning_program_id: Option, + forester: Option, + version: u64, + ) { + let (rollover_fee, merkle_tree) = match version { + 1 => { + create_state_merkle_tree_and_queue_account( + &self.payer, + true, + rpc, + merkle_tree_keypair, + queue_keypair, + Some(cpi_context_keypair), + owning_program_id, + forester, + self.state_merkle_trees.len() as u64, + &StateMerkleTreeConfig::default(), + &NullifierQueueConfig::default(), + ) + .await + .unwrap(); + let merkle_tree = Box::new(MerkleTree::::new( + account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize, + account_compression::utils::constants::STATE_MERKLE_TREE_CANOPY_DEPTH as usize, + )); + (FeeConfig::default().state_merkle_tree_rollover as i64,merkle_tree) + } + 2 => { + let params = InitStateTreeAccountsInstructionData::test_default(); + + create_batched_state_merkle_tree( + &self.payer, + true, + rpc, + merkle_tree_keypair, + queue_keypair, + cpi_context_keypair, + params, + ).await.unwrap(); + let merkle_tree = Box::new(MerkleTree::::new( + DEFAULT_BATCH_STATE_TREE_HEIGHT as usize, + 0 + )); + (FeeConfig::test_batched().state_merkle_tree_rollover as i64,merkle_tree) + } + _ => panic!( + "add_state_merkle_tree: Version not supported, {}. Versions: 1 concurrent, 2 batched", + version + ), + }; + let state_merkle_tree_account = StateMerkleTreeAccounts { + merkle_tree: merkle_tree_keypair.pubkey(), + nullifier_queue: queue_keypair.pubkey(), + cpi_context: cpi_context_keypair.pubkey(), + }; + + self.state_merkle_trees.push(StateMerkleTreeBundle { + merkle_tree, + accounts: state_merkle_tree_account, + rollover_fee, + version, + output_queue_elements: vec![], + input_leaf_indices: vec![], + }); + } + + async fn process_inclusion_proofs( + &self, + merkle_tree_pubkeys: &[Pubkey], + accounts: &[[u8; 32]], + rpc: &mut R, + ) -> ( + Option, + Option, + Vec, + ) { + let mut inclusion_proofs = Vec::new(); + let mut root_indices = Vec::new(); + let mut height = 0; + + // Collect all proofs first before any await points + let proof_data: Vec<_> = accounts + .iter() + .zip(merkle_tree_pubkeys.iter()) + .map(|(account, &pubkey)| { + let bundle = &self + .state_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == pubkey) + .unwrap(); + let merkle_tree = &bundle.merkle_tree; + let leaf_index = merkle_tree.get_leaf_index(account).unwrap(); + let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap(); + + // Convert proof to owned data that implements Send + let proof: Vec = proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(); + + if height == 0 { + height = merkle_tree.height; + } else { + assert_eq!(height, merkle_tree.height); + } + + ( + bundle.version, + pubkey, + leaf_index, + proof, + merkle_tree.root(), + ) + }) + .collect(); + + // Now handle the async operations with the collected data + for (i, (version, pubkey, leaf_index, proof, merkle_root)) in + proof_data.into_iter().enumerate() + { + inclusion_proofs.push(InclusionMerkleProofInputs { + root: BigInt::from_be_bytes(merkle_root.as_slice()), + leaf: BigInt::from_be_bytes(&accounts[i]), + path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()), + path_elements: proof, + }); + + let (root_index, root) = if version == 1 { + let fetched_merkle_tree = + get_concurrent_merkle_tree::( + rpc, pubkey, + ) + .await + ; + ( + fetched_merkle_tree.root_index() as u32, + fetched_merkle_tree.root(), + ) + } else { + let mut merkle_tree_account = rpc.get_account(pubkey).await.unwrap().unwrap(); + let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( + merkle_tree_account.data.as_mut_slice(), + ) + .unwrap(); + ( + merkle_tree.get_root_index(), + merkle_tree.get_root().unwrap(), + ) + }; + + assert_eq!(merkle_root, root, "Merkle tree root mismatch"); + root_indices.push(root_index as u16); + } + + let (batch_inclusion_proof_inputs, legacy) = if height + == DEFAULT_BATCH_STATE_TREE_HEIGHT as usize + { + let inclusion_proof_inputs = + InclusionProofInputs::new(inclusion_proofs.as_slice()).unwrap(); + ( + Some(BatchInclusionJsonStruct::from_inclusion_proof_inputs( + &inclusion_proof_inputs, + )), + None, + ) + } else if height == account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize { + let inclusion_proof_inputs = InclusionProofInputsLegacy(inclusion_proofs.as_slice()); + ( + None, + Some(BatchInclusionJsonStructLegacy::from_inclusion_proof_inputs( + &inclusion_proof_inputs, + )), + ) + } else { + panic!("Unsupported tree height") + }; + + (batch_inclusion_proof_inputs, legacy, root_indices) + } + + async fn process_non_inclusion_proofs( + &self, + address_merkle_tree_pubkeys: &[Pubkey], + addresses: &[[u8; 32]], + rpc: &mut R, + ) -> ( + Option, + Option, + Vec, + ) { + let mut non_inclusion_proofs = Vec::new(); + let mut address_root_indices = Vec::new(); + let mut tree_heights = Vec::new(); + for tree in self.address_merkle_trees.iter() { + println!("height {:?}", tree.merkle_tree.merkle_tree.height); + println!("accounts {:?}", tree.accounts); + } + println!("process_non_inclusion_proofs: addresses {:?}", addresses); + for (i, address) in addresses.iter().enumerate() { + let address_tree = &self + .address_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i]) + .unwrap(); + tree_heights.push(address_tree.merkle_tree.merkle_tree.height); + + let proof_inputs = get_non_inclusion_proof_inputs( + address, + &address_tree.merkle_tree, + &address_tree.indexed_array, + ); + non_inclusion_proofs.push(proof_inputs); + + // We don't have address queues in v2 (batch) address Merkle trees + // hence both accounts in this struct are the same. + let is_v2 = address_tree.accounts.merkle_tree == address_tree.accounts.queue; + println!("is v2 {:?}", is_v2); + println!( + "address_merkle_tree_pubkeys[i] {:?}", + address_merkle_tree_pubkeys[i] + ); + println!("address_tree.accounts {:?}", address_tree.accounts); + if is_v2 { + let account = rpc + .get_account(address_merkle_tree_pubkeys[i]) + .await + .unwrap(); + if let Some(mut account) = account { + let account = BatchedMerkleTreeAccount::address_tree_from_bytes_mut( + account.data.as_mut_slice(), + ) + .unwrap(); + address_root_indices.push(account.get_root_index() as u16); + } else { + panic!( + "TestIndexer.process_non_inclusion_proofs(): Address tree account not found." + ); + } + } else { + let fetched_address_merkle_tree = + get_indexed_merkle_tree::( + rpc, + address_merkle_tree_pubkeys[i], + ) + .await + ; + address_root_indices.push(fetched_address_merkle_tree.root_index() as u16); + } + } + // if tree heights are not the same, panic + if tree_heights.iter().any(|&x| x != tree_heights[0]) { + panic!( + "All address merkle trees must have the same height {:?}", + tree_heights + ); + } + let (batch_non_inclusion_proof_inputs, batch_non_inclusion_proof_inputs_legacy) = + if tree_heights[0] == 26 { + let non_inclusion_proof_inputs = + NonInclusionProofInputsLegacy::new(non_inclusion_proofs.as_slice()); + ( + None, + Some( + BatchNonInclusionJsonStructLegacy::from_non_inclusion_proof_inputs( + &non_inclusion_proof_inputs, + ), + ), + ) + } else if tree_heights[0] == 40 { + let non_inclusion_proof_inputs = + NonInclusionProofInputs::new(non_inclusion_proofs.as_slice()).unwrap(); + ( + Some( + BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs( + &non_inclusion_proof_inputs, + ), + ), + None, + ) + } else { + panic!("Unsupported tree height") + }; + ( + batch_non_inclusion_proof_inputs, + batch_non_inclusion_proof_inputs_legacy, + address_root_indices, + ) + } + + /// deserializes an event + /// adds the output_compressed_accounts to the compressed_accounts + /// removes the input_compressed_accounts from the compressed_accounts + /// adds the input_compressed_accounts to the nullified_compressed_accounts + pub fn add_lamport_compressed_accounts(&mut self, slot: u64, event_bytes: Vec) { + let event_bytes = event_bytes.clone(); + let event = PublicTransactionEvent::deserialize(&mut event_bytes.as_slice()).unwrap(); + // TODO: map event type + self.add_event_and_compressed_accounts(slot, &event); + } + + /// deserializes an event + /// adds the output_compressed_accounts to the compressed_accounts + /// removes the input_compressed_accounts from the compressed_accounts + /// adds the input_compressed_accounts to the nullified_compressed_accounts + /// deserialiazes token data from the output_compressed_accounts + /// adds the token_compressed_accounts to the token_compressed_accounts + pub fn add_compressed_accounts_with_token_data(&mut self, slot: u64, event: &PublicTransactionEvent) { + self.add_event_and_compressed_accounts(slot, event); + } + + /// returns the compressed sol balance of the owner pubkey + pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 { + self.compressed_accounts + .iter() + .filter(|x| x.compressed_account.owner == *owner) + .map(|x| x.compressed_account.lamports) + .sum() + } + + /// returns the compressed token balance of the owner pubkey for a token by mint + pub fn get_compressed_token_balance(&self, owner: &Pubkey, mint: &Pubkey) -> u64 { + self.token_compressed_accounts + .iter() + .filter(|x| { + x.compressed_account.compressed_account.owner == *owner + && x.token_data.mint == *mint + }) + .map(|x| x.token_data.amount) + .sum() + } + + fn process_v1_compressed_account( + &mut self, + slot: u64, + event: &PublicTransactionEvent, + i: usize, + token_compressed_accounts: &mut Vec, + compressed_accounts: &mut Vec, + ) { + let mut input_addresses = vec![]; + if event.input_compressed_account_hashes.len() > i { + let tx_hash: [u8; 32] = create_tx_hash( + &event.input_compressed_account_hashes, + &event.output_compressed_account_hashes, + slot, + ) + .unwrap(); + println!("tx_hash {:?}", tx_hash); + println!("slot {:?}", slot); + let hash = event.input_compressed_account_hashes[i]; + let index = self.compressed_accounts.iter().position(|x| { + x.compressed_account + .hash::( + &x.merkle_context.merkle_tree_pubkey, + &x.merkle_context.leaf_index, + ) + .unwrap() + == hash + }); + let (leaf_index, merkle_tree_pubkey) = if let Some(index) = index { + self.nullified_compressed_accounts + .push(self.compressed_accounts[index].clone()); + let leaf_index = self.compressed_accounts[index].merkle_context.leaf_index; + let merkle_tree_pubkey = self.compressed_accounts[index] + .merkle_context + .merkle_tree_pubkey; + if let Some(address) = self.compressed_accounts[index].compressed_account.address { + input_addresses.push(address); + } + self.compressed_accounts.remove(index); + (leaf_index, merkle_tree_pubkey) + } else { + let index = self + .token_compressed_accounts + .iter() + .position(|x| { + x.compressed_account + .compressed_account + .hash::( + &x.compressed_account.merkle_context.merkle_tree_pubkey, + &x.compressed_account.merkle_context.leaf_index, + ) + .unwrap() + == hash + }) + .expect("input compressed account not found"); + self.token_nullified_compressed_accounts + .push(self.token_compressed_accounts[index].clone()); + let leaf_index = self.token_compressed_accounts[index] + .compressed_account + .merkle_context + .leaf_index; + let merkle_tree_pubkey = self.token_compressed_accounts[index] + .compressed_account + .merkle_context + .merkle_tree_pubkey; + self.token_compressed_accounts.remove(index); + (leaf_index, merkle_tree_pubkey) + }; + let bundle = &mut self + .get_state_merkle_trees_mut() + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + .unwrap(); + // Store leaf indices of input accounts for batched trees + if bundle.version == 2 { + let leaf_hash = event.input_compressed_account_hashes[i]; + bundle + .input_leaf_indices + .push((leaf_index, leaf_hash, tx_hash)); + } + } + let mut new_addresses = vec![]; + if event.output_compressed_accounts.len() > i { + let compressed_account = &event.output_compressed_accounts[i]; + println!("output compressed account {:?}", compressed_account); + if let Some(address) = compressed_account.compressed_account.address { + if !input_addresses.iter().any(|x| x == &address) { + new_addresses.push(address); + } + } + + let merkle_tree = self.state_merkle_trees.iter().find(|x| { + x.accounts.merkle_tree + == event.pubkey_array + [event.output_compressed_accounts[i].merkle_tree_index as usize] + }); + // Check for output queue + let merkle_tree = if let Some(merkle_tree) = merkle_tree { + merkle_tree + } else { + self.state_merkle_trees + .iter() + .find(|x| { + x.accounts.nullifier_queue + == event.pubkey_array + [event.output_compressed_accounts[i].merkle_tree_index as usize] + }) + .unwrap() + }; + println!("found merkle tree {:?}", merkle_tree.accounts.merkle_tree); + let nullifier_queue_pubkey = merkle_tree.accounts.nullifier_queue; + let merkle_tree_pubkey = merkle_tree.accounts.merkle_tree; + // if data is some, try to deserialize token data, if it fails, add to compressed_accounts + // if data is none add to compressed_accounts + // new accounts are inserted in front so that the newest accounts are found first + match compressed_account.compressed_account.data.as_ref() { + Some(data) => { + if compressed_account.compressed_account.owner == light_compressed_token::ID + && data.discriminator == light_compressed_token::constants::TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR + { + if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { + let token_account = TokenDataWithMerkleContext { + token_data, + compressed_account: CompressedAccountWithMerkleContext { + compressed_account: compressed_account + .compressed_account + .clone(), + merkle_context: MerkleContext { + leaf_index: event.output_leaf_indices[i], + merkle_tree_pubkey, + nullifier_queue_pubkey, + queue_index: None, + }, + }, + }; + token_compressed_accounts.push(token_account.clone()); + self.token_compressed_accounts.insert(0, token_account); + } + } else { + let compressed_account = CompressedAccountWithMerkleContext { + compressed_account: compressed_account.compressed_account.clone(), + merkle_context: MerkleContext { + leaf_index: event.output_leaf_indices[i], + merkle_tree_pubkey, + nullifier_queue_pubkey, + queue_index: None, + }, + }; + compressed_accounts.push(compressed_account.clone()); + self.compressed_accounts.insert(0, compressed_account); + } + } + None => { + let compressed_account = CompressedAccountWithMerkleContext { + compressed_account: compressed_account.compressed_account.clone(), + merkle_context: MerkleContext { + leaf_index: event.output_leaf_indices[i], + merkle_tree_pubkey, + nullifier_queue_pubkey, + queue_index: None, + }, + }; + compressed_accounts.push(compressed_account.clone()); + self.compressed_accounts.insert(0, compressed_account); + } + }; + let seq = event + .sequence_numbers + .iter() + .find(|x| x.pubkey == merkle_tree_pubkey); + let seq = if let Some(seq) = seq { + seq + } else { + event + .sequence_numbers + .iter() + .find(|x| x.pubkey == nullifier_queue_pubkey) + .unwrap() + }; + let is_batched = seq.seq == u64::MAX; + + println!("Output is batched {:?}", is_batched); + if !is_batched { + let merkle_tree = &mut self + .state_merkle_trees + .iter_mut() + .find(|x| { + x.accounts.merkle_tree + == event.pubkey_array + [event.output_compressed_accounts[i].merkle_tree_index as usize] + }) + .unwrap(); + merkle_tree + .merkle_tree + .append( + &compressed_account + .compressed_account + .hash::( + &event.pubkey_array[event.output_compressed_accounts[i] + .merkle_tree_index + as usize], + &event.output_leaf_indices[i], + ) + .unwrap(), + ) + .expect("insert failed"); + } else { + let merkle_tree = &mut self + .state_merkle_trees + .iter_mut() + .find(|x| { + x.accounts.nullifier_queue + == event.pubkey_array + [event.output_compressed_accounts[i].merkle_tree_index as usize] + }) + .unwrap(); + + merkle_tree + .output_queue_elements + .push(event.output_compressed_account_hashes[i]); + } + } + println!("new addresses {:?}", new_addresses); + println!("event.pubkey_array {:?}", event.pubkey_array); + println!( + "address merkle trees {:?}", + self.address_merkle_trees + .iter() + .map(|x| x.accounts.merkle_tree) + .collect::>() + ); + // checks whether there are addresses in outputs which don't exist in inputs. + // if so check pubkey_array for the first address Merkle tree and append to the bundles queue elements. + // Note: + // - creating addresses in multiple address Merkle trees in one tx is not supported + // TODO: reimplement this is not a good solution + // - take addresses and address Merkle tree pubkeys from cpi to account compression program + if !new_addresses.is_empty() { + for pubkey in event.pubkey_array.iter() { + if let Some((_, address_merkle_tree)) = self + .address_merkle_trees + .iter_mut() + .enumerate() + .find(|(_, x)| x.accounts.merkle_tree == *pubkey) + { + address_merkle_tree + .queue_elements + .append(&mut new_addresses); + } + } + } + } + + async fn _get_multiple_new_address_proofs( + &self, + merkle_tree_pubkey: [u8; 32], + addresses: Vec<[u8; 32]>, + full: bool, + ) -> Result>, IndexerError> { + let mut proofs: Vec> = Vec::new(); + + for address in addresses.iter() { + info!("Getting new address proof for {:?}", address); + let pubkey = Pubkey::from(merkle_tree_pubkey); + let address_tree_bundle = self + .address_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == pubkey) + .unwrap(); + + let address_biguint = BigUint::from_bytes_be(address.as_slice()); + let (old_low_address, _old_low_address_next_value) = address_tree_bundle + .indexed_array + .find_low_element_for_nonexistent(&address_biguint) + .unwrap(); + let address_bundle = address_tree_bundle + .indexed_array + .new_element_with_low_element_index(old_low_address.index, &address_biguint) + .unwrap(); + + let (old_low_address, old_low_address_next_value) = address_tree_bundle + .indexed_array + .find_low_element_for_nonexistent(&address_biguint) + .unwrap(); + + // Get the Merkle proof for updating low element. + let low_address_proof = address_tree_bundle + .merkle_tree + .get_proof_of_leaf(old_low_address.index, full) + .unwrap(); + + let low_address_index: u64 = old_low_address.index as u64; + let low_address_value: [u8; 32] = + bigint_to_be_bytes_array(&old_low_address.value).unwrap(); + let low_address_next_index: u64 = old_low_address.next_index as u64; + let low_address_next_value: [u8; 32] = + bigint_to_be_bytes_array(&old_low_address_next_value).unwrap(); + let low_address_proof: [[u8; 32]; NET_HEIGHT] = low_address_proof.to_array().unwrap(); + let proof = NewAddressProofWithContext:: { + merkle_tree: merkle_tree_pubkey, + low_address_index, + low_address_value, + low_address_next_index, + low_address_next_value, + low_address_proof, + root: address_tree_bundle.merkle_tree.root(), + root_seq: address_tree_bundle.merkle_tree.merkle_tree.sequence_number as u64, + new_low_element: Some(address_bundle.new_low_element), + new_element: Some(address_bundle.new_element), + new_element_next_value: Some(address_bundle.new_element_next_value), + }; + proofs.push(proof); + } + Ok(proofs) + } + + // pub(crate) fn get_address_merkle_tree( + // &self, + // merkle_tree_pubkey: Pubkey, + // ) -> Option<&AddressMerkleTreeBundle> { + // self.address_merkle_trees + // .iter() + // .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + // } +} diff --git a/sdk-libs/program-test/src/indexer/utils.rs b/sdk-libs/program-test/src/indexer/utils.rs new file mode 100644 index 000000000..68a64090f --- /dev/null +++ b/sdk-libs/program-test/src/indexer/utils.rs @@ -0,0 +1,416 @@ +use std::cmp; +use anchor_lang::solana_program::instruction::InstructionError; +use solana_sdk::signature::{Keypair, Signature, Signer}; +use solana_sdk::transaction; +use account_compression::{AddressMerkleTreeConfig, AddressQueueConfig, QueueAccount, RegisteredProgram}; +use account_compression::initialize_address_merkle_tree::Pubkey; +use forester_utils::{get_hash_set, get_indexed_merkle_tree, AccountZeroCopy}; +use light_client::rpc::{RpcConnection, RpcError}; +use light_hasher::Poseidon; +use light_merkle_tree_metadata::access::AccessMetadata; +use light_merkle_tree_metadata::queue::{QueueMetadata, QueueType}; +use light_merkle_tree_metadata::rollover::RolloverMetadata; +use light_registry::account_compression_cpi::sdk::get_registered_program_pda; +use light_utils::fee::compute_rollover_fee; +use crate::test_env::create_address_merkle_tree_and_queue_account; + +#[allow(clippy::too_many_arguments)] +#[inline(never)] +pub async fn create_address_merkle_tree_and_queue_account_with_assert( + payer: &Keypair, + registry: bool, + context: &mut R, + address_merkle_tree_keypair: &Keypair, + address_queue_keypair: &Keypair, + program_owner: Option, + forester: Option, + merkle_tree_config: &AddressMerkleTreeConfig, + queue_config: &AddressQueueConfig, + index: u64, +) -> Result { + let result = create_address_merkle_tree_and_queue_account( + payer, + registry, + context, + address_merkle_tree_keypair, + address_queue_keypair, + program_owner, + forester, + merkle_tree_config, + queue_config, + index, + ) + .await; + + // To initialize the indexed tree we do 4 operations: + // 1. insert 0 append 0 and update 0 + // 2. insert 1 append BN254_FIELD_SIZE -1 and update 0 + // we appended two values this the expected next index is 2; + // The right most leaf is the hash of the indexed array element with value FIELD_SIZE - 1 + // index 1, next_index: 0 + let expected_change_log_length = cmp::min(4, merkle_tree_config.changelog_size as usize); + let expected_roots_length = cmp::min(4, merkle_tree_config.roots_size as usize); + let expected_next_index = 2; + let expected_indexed_change_log_length = + cmp::min(4, merkle_tree_config.address_changelog_size as usize); + + let mut reference_tree = + light_indexed_merkle_tree::reference::IndexedMerkleTree::::new( + account_compression::utils::constants::ADDRESS_MERKLE_TREE_HEIGHT as usize, + account_compression::utils::constants::ADDRESS_MERKLE_TREE_CANOPY_DEPTH as usize, + ) + .unwrap(); + reference_tree.init().unwrap(); + + let expected_right_most_leaf = reference_tree + .merkle_tree + .get_leaf(reference_tree.merkle_tree.rightmost_index - 1) + .unwrap(); + + let _expected_right_most_leaf = [ + 30, 164, 22, 238, 180, 2, 24, 181, 64, 193, 207, 184, 219, 233, 31, 109, 84, 232, 162, 158, + 220, 48, 163, 158, 50, 107, 64, 87, 167, 217, 99, 245, + ]; + assert_eq!(expected_right_most_leaf, _expected_right_most_leaf); + let owner = if registry { + let registered_program = get_registered_program_pda(&light_registry::ID); + let registered_program_account = context + .get_anchor_account::(®istered_program) + .await + .unwrap() + .unwrap(); + registered_program_account.group_authority_pda + } else { + payer.pubkey() + }; + + assert_address_merkle_tree_initialized( + context, + &address_merkle_tree_keypair.pubkey(), + &address_queue_keypair.pubkey(), + merkle_tree_config, + index, + program_owner, + forester, + expected_change_log_length, + expected_roots_length, + expected_next_index, + &expected_right_most_leaf, + &owner, + expected_indexed_change_log_length, + ) + .await; + + assert_address_queue_initialized( + context, + &address_queue_keypair.pubkey(), + queue_config, + &address_merkle_tree_keypair.pubkey(), + merkle_tree_config, + QueueType::AddressQueue, + index, + program_owner, + forester, + &owner, + ) + .await; + + result +} + +/// Asserts that the given `BanksTransactionResultWithMetadata` is an error with a custom error code +/// or a program error. +/// Unfortunately BanksTransactionResultWithMetadata does not reliably expose the custom error code, so +/// we allow program error as well. +// TODO: unify with assert_rpc_error +pub fn assert_custom_error_or_program_error( + result: Result, + error_code: u32, +) -> Result<(), RpcError> { + let accepted_errors = [ + (0, InstructionError::ProgramFailedToComplete), + (0, InstructionError::Custom(error_code)), + ]; + + let is_accepted = accepted_errors.iter().any(|(index, error)| { + matches!(result, Err(RpcError::TransactionError(transaction::TransactionError::InstructionError(i, ref e))) if i == (*index as u8) && e == error) + }); + + if !is_accepted { + println!("result {:?}", result); + println!("error_code {:?}", error_code); + return Err(RpcError::AssertRpcError(format!( + "Expected error code {} or program error, got {:?}", + error_code, result + ))); + } + + Ok(()) +} + + +#[allow(clippy::too_many_arguments)] +pub async fn assert_address_merkle_tree_initialized( + rpc: &mut R, + merkle_tree_pubkey: &Pubkey, + queue_pubkey: &Pubkey, + merkle_tree_config: &account_compression::AddressMerkleTreeConfig, + index: u64, + program_owner: Option, + forester: Option, + expected_changelog_length: usize, + expected_roots_length: usize, + expected_next_index: usize, + expected_rightmost_leaf: &[u8; 32], + owner_pubkey: &Pubkey, + expected_indexed_changelog_length: usize, +) { + let merkle_tree = AccountZeroCopy::::new( + rpc, + *merkle_tree_pubkey, + ) + .await; + let merkle_tree_account = merkle_tree.deserialized(); + + assert_eq!( + merkle_tree_account + .metadata + .rollover_metadata + .rollover_threshold, + merkle_tree_config.rollover_threshold.unwrap_or(u64::MAX) + ); + assert_eq!( + merkle_tree_account.metadata.rollover_metadata.network_fee, + merkle_tree_config.network_fee.unwrap_or_default() + ); + + // The address Merkle tree is never directly called by the user. + // The whole rollover fees are collected by the address queue. + let expected_rollover_fee = 0; + assert_eq!( + merkle_tree_account.metadata.rollover_metadata.rollover_fee, + expected_rollover_fee + ); + + assert_eq!(merkle_tree_account.metadata.rollover_metadata.index, index); + assert_eq!( + merkle_tree_account + .metadata + .rollover_metadata + .rolledover_slot, + u64::MAX + ); + + assert_eq!( + merkle_tree_account + .metadata + .rollover_metadata + .close_threshold, + merkle_tree_config.close_threshold.unwrap_or(u64::MAX) + ); + + assert_eq!( + merkle_tree_account.metadata.next_merkle_tree, + Pubkey::default() + ); + let expected_access_meta_data = AccessMetadata { + owner: *owner_pubkey, + program_owner: program_owner.unwrap_or_default(), + forester: forester.unwrap_or_default(), + }; + assert_eq!( + merkle_tree_account.metadata.access_metadata, + expected_access_meta_data + ); + assert_eq!(merkle_tree_account.metadata.associated_queue, *queue_pubkey); + + let merkle_tree = get_indexed_merkle_tree::< + account_compression::AddressMerkleTreeAccount, + R, + Poseidon, + usize, + 26, + 16, + >(rpc, *merkle_tree_pubkey) + .await; + + assert_eq!(merkle_tree.height, merkle_tree_config.height as usize); + assert_eq!( + merkle_tree.merkle_tree.changelog.capacity(), + merkle_tree_config.changelog_size as usize + ); + assert_eq!( + merkle_tree.merkle_tree.changelog.len(), + expected_changelog_length + ); + assert_eq!( + merkle_tree.merkle_tree.changelog_index(), + expected_changelog_length.saturating_sub(1) + ); + assert_eq!( + merkle_tree.roots.capacity(), + merkle_tree_config.roots_size as usize + ); + assert_eq!(merkle_tree.roots.len(), expected_roots_length); + assert_eq!( + merkle_tree.root_index(), + expected_roots_length.saturating_sub(1) + ); + assert_eq!( + merkle_tree.canopy_depth, + merkle_tree_config.canopy_depth as usize + ); + assert_eq!(merkle_tree.next_index(), expected_next_index); + assert_eq!( + merkle_tree.sequence_number() % merkle_tree_config.roots_size as usize, + expected_roots_length.saturating_sub(1) + ); + assert_eq!(&merkle_tree.rightmost_leaf(), expected_rightmost_leaf); + // TODO: complete asserts + assert_eq!( + merkle_tree.indexed_changelog_index(), + expected_indexed_changelog_length.saturating_sub(1) + ); +} + + +#[allow(clippy::too_many_arguments)] +pub async fn assert_address_queue_initialized( + rpc: &mut R, + queue_pubkey: &Pubkey, + queue_config: &account_compression::AddressQueueConfig, + associated_merkle_tree_pubkey: &Pubkey, + associated_tree_config: &account_compression::AddressMerkleTreeConfig, + expected_queue_type: QueueType, + expected_index: u64, + expected_program_owner: Option, + expected_forester: Option, + payer_pubkey: &Pubkey, +) { + assert_address_queue( + rpc, + queue_pubkey, + queue_config, + associated_merkle_tree_pubkey, + associated_tree_config, + expected_queue_type, + expected_index, + expected_program_owner, + expected_forester, + None, + None, + payer_pubkey, + ) + .await; +} + + +#[allow(clippy::too_many_arguments)] +pub async fn assert_address_queue( + rpc: &mut R, + queue_pubkey: &Pubkey, + queue_config: &account_compression::AddressQueueConfig, + associated_merkle_tree_pubkey: &Pubkey, + associated_tree_config: &account_compression::AddressMerkleTreeConfig, + expected_queue_type: QueueType, + expected_index: u64, + expected_program_owner: Option, + expected_forester: Option, + expected_rolledover_slot: Option, + expected_next_queue: Option, + payer_pubkey: &Pubkey, +) { + let balance_merkle_tree = rpc + .get_account(*associated_merkle_tree_pubkey) + .await + .unwrap() + .unwrap() + .lamports; + let balance_queue = rpc + .get_account(*queue_pubkey) + .await + .unwrap() + .unwrap() + .lamports; + // The address queue is the only account that collects the rollover fees. + let expected_rollover_fee = match associated_tree_config.rollover_threshold { + Some(threshold) => { + compute_rollover_fee(threshold, associated_tree_config.height, balance_queue).unwrap() + + compute_rollover_fee( + threshold, + associated_tree_config.height, + balance_merkle_tree, + ) + .unwrap() + } + None => 0, + }; + assert_queue( + rpc, + queue_pubkey, + queue_config, + associated_merkle_tree_pubkey, + associated_tree_config, + expected_rollover_fee, + expected_queue_type, + expected_index, + expected_program_owner, + expected_forester, + expected_rolledover_slot, + expected_next_queue, + payer_pubkey, + ) + .await; +} +#[allow(clippy::too_many_arguments)] +pub async fn assert_queue( + rpc: &mut R, + queue_pubkey: &Pubkey, + queue_config: &account_compression::AddressQueueConfig, + associated_merkle_tree_pubkey: &Pubkey, + associated_tree_config: &account_compression::AddressMerkleTreeConfig, + expected_rollover_fee: u64, + expected_queue_type: QueueType, + expected_index: u64, + expected_program_owner: Option, + expected_forester: Option, + expected_rolledover_slot: Option, + expected_next_queue: Option, + payer_pubkey: &Pubkey, +) { + let queue = AccountZeroCopy::::new(rpc, *queue_pubkey).await; + let queue_account = queue.deserialized(); + + let expected_rollover_meta_data = RolloverMetadata { + index: expected_index, + rolledover_slot: expected_rolledover_slot.unwrap_or(u64::MAX), + rollover_threshold: associated_tree_config + .rollover_threshold + .unwrap_or(u64::MAX), + network_fee: queue_config.network_fee.unwrap_or_default(), + rollover_fee: expected_rollover_fee, + close_threshold: associated_tree_config.close_threshold.unwrap_or(u64::MAX), + additional_bytes: 0, + }; + let expected_access_meta_data = AccessMetadata { + owner: *payer_pubkey, + program_owner: expected_program_owner.unwrap_or_default(), + forester: expected_forester.unwrap_or_default(), + }; + let expected_queue_meta_data = QueueMetadata { + access_metadata: expected_access_meta_data, + rollover_metadata: expected_rollover_meta_data, + associated_merkle_tree: *associated_merkle_tree_pubkey, + next_queue: expected_next_queue.unwrap_or_default(), + queue_type: expected_queue_type as u64, + }; + assert_eq!(queue_account.metadata, expected_queue_meta_data); + + let queue = unsafe { get_hash_set::(rpc, *queue_pubkey).await }; + assert_eq!(queue.get_capacity(), queue_config.capacity as usize); + assert_eq!( + queue.sequence_threshold, + queue_config.sequence_threshold as usize + ); +} + diff --git a/sdk-libs/program-test/src/lib.rs b/sdk-libs/program-test/src/lib.rs index d02fe4b89..98029f4b8 100644 --- a/sdk-libs/program-test/src/lib.rs +++ b/sdk-libs/program-test/src/lib.rs @@ -3,3 +3,4 @@ pub mod test_batch_forester; pub mod test_env; pub mod test_indexer; pub mod test_rpc; +pub mod indexer; diff --git a/sdk-libs/program-test/src/test_batch_forester.rs b/sdk-libs/program-test/src/test_batch_forester.rs index 562401684..7d70e92e1 100644 --- a/sdk-libs/program-test/src/test_batch_forester.rs +++ b/sdk-libs/program-test/src/test_batch_forester.rs @@ -2,7 +2,6 @@ use anchor_lang::AnchorDeserialize; use borsh::BorshSerialize; use forester_utils::{ create_account_instruction, - indexer::{Indexer, StateMerkleTreeBundle}, AccountZeroCopy, }; use light_batched_merkle_tree::{ @@ -393,6 +392,7 @@ pub async fn get_batched_nullify_ix_data( } use anchor_lang::{InstructionData, ToAccountMetas}; +use light_client::indexer::{Indexer, StateMerkleTreeBundle}; pub async fn create_batched_state_merkle_tree( payer: &Keypair, @@ -876,7 +876,6 @@ pub async fn create_batch_update_address_tree_instruction_data_with_proof< addresses, indexer .get_subtrees(merkle_tree_pubkey.to_bytes()) - .await .unwrap() .try_into() .unwrap(), diff --git a/sdk-libs/program-test/src/test_indexer.rs b/sdk-libs/program-test/src/test_indexer.rs index 06e9d673f..0932d1343 100644 --- a/sdk-libs/program-test/src/test_indexer.rs +++ b/sdk-libs/program-test/src/test_indexer.rs @@ -1,664 +1,664 @@ -use std::{marker::PhantomData, time::Duration}; - -use account_compression::StateMerkleTreeAccount; -use anchor_lang::Discriminator; -use borsh::BorshDeserialize; -use forester_utils::get_concurrent_merkle_tree; -use light_batched_merkle_tree::{ - constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, - merkle_tree::{BatchedMerkleTreeAccount, BatchedMerkleTreeMetadata}, -}; -use light_client::{ - indexer::{ - AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, - StateMerkleTreeBundle, - }, - rpc::{merkle_tree::MerkleTreeExt, RpcConnection}, - transaction_params::FeeConfig, -}; -use light_hasher::{Discriminator as LightDiscriminator, Poseidon}; -use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}; -use light_merkle_tree_reference::MerkleTree; -use light_prover_client::{ - gnark::{ - combined_json_formatter::CombinedJsonStruct, - combined_json_formatter_legacy::CombinedJsonStruct as CombinedJsonStructLegacy, - constants::{PROVE_PATH, SERVER_ADDRESS}, - helpers::{ - big_int_to_string, health_check, spawn_prover, string_to_big_int, ProofType, - ProverConfig, - }, - inclusion_json_formatter::BatchInclusionJsonStruct, - inclusion_json_formatter_legacy::BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy, - non_inclusion_json_formatter::BatchNonInclusionJsonStruct, - non_inclusion_json_formatter_legacy::BatchNonInclusionJsonStruct as BatchNonInclusionJsonStructLegacy, - proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, - }, - helpers::bigint_to_u8_32, - inclusion::merkle_inclusion_proof_inputs::{InclusionMerkleProofInputs, InclusionProofInputs}, - inclusion_legacy::merkle_inclusion_proof_inputs::InclusionProofInputs as InclusionProofInputsLegacy, - non_inclusion::merkle_non_inclusion_proof_inputs::{ - get_non_inclusion_proof_inputs, NonInclusionProofInputs, - }, - non_inclusion_legacy::merkle_non_inclusion_proof_inputs::NonInclusionProofInputs as NonInclusionProofInputsLegacy, -}; -use light_sdk::{ - compressed_account::CompressedAccountWithMerkleContext, - event::PublicTransactionEvent, - merkle_context::MerkleContext, - proof::{CompressedProof, ProofRpcResult}, - token::{TokenData, TokenDataWithMerkleContext}, - ADDRESS_MERKLE_TREE_CANOPY_DEPTH, ADDRESS_MERKLE_TREE_HEIGHT, PROGRAM_ID_LIGHT_SYSTEM, - STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT, - TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR, -}; -use light_utils::hashchain::create_hash_chain_from_slice; -use log::warn; -use num_bigint::BigInt; -use num_traits::FromBytes; -use reqwest::Client; -use solana_sdk::pubkey::Pubkey; - -#[derive(Debug)] -pub struct TestIndexer -where - R: RpcConnection + MerkleTreeExt, -{ - pub state_merkle_trees: Vec, - pub address_merkle_trees: Vec, - pub compressed_accounts: Vec, - pub nullified_compressed_accounts: Vec, - pub token_compressed_accounts: Vec, - pub token_nullified_compressed_accounts: Vec, - pub events: Vec, - _rpc: PhantomData, -} - -impl Indexer for TestIndexer -where - R: RpcConnection + MerkleTreeExt, -{ - fn add_event_and_compressed_accounts( - &mut self, - event: &PublicTransactionEvent, - ) -> ( - Vec, - Vec, - ) { - for hash in event.input_compressed_account_hashes.iter() { - let index = self.compressed_accounts.iter().position(|x| { - x.compressed_account - .hash::( - &x.merkle_context.merkle_tree_pubkey, - &x.merkle_context.leaf_index, - ) - .unwrap() - == *hash - }); - if let Some(index) = index { - self.nullified_compressed_accounts - .push(self.compressed_accounts[index].clone()); - self.compressed_accounts.remove(index); - continue; - }; - if index.is_none() { - let index = self - .token_compressed_accounts - .iter() - .position(|x| { - x.compressed_account - .compressed_account - .hash::( - &x.compressed_account.merkle_context.merkle_tree_pubkey, - &x.compressed_account.merkle_context.leaf_index, - ) - .unwrap() - == *hash - }) - .expect("input compressed account not found"); - self.token_nullified_compressed_accounts - .push(self.token_compressed_accounts[index].clone()); - self.token_compressed_accounts.remove(index); - } - } - - let mut compressed_accounts = Vec::new(); - let mut token_compressed_accounts = Vec::new(); - for (i, compressed_account) in event.output_compressed_accounts.iter().enumerate() { - let nullifier_queue_pubkey = self - .state_merkle_trees - .iter() - .find(|x| { - x.accounts.merkle_tree - == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] - }) - .unwrap() - .accounts - .nullifier_queue; - // if data is some, try to deserialize token data, if it fails, add to compressed_accounts - // if data is none add to compressed_accounts - // new accounts are inserted in front so that the newest accounts are found first - match compressed_account.compressed_account.data.as_ref() { - Some(data) => { - if compressed_account.compressed_account.owner == PROGRAM_ID_LIGHT_SYSTEM - && data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR - { - if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { - let token_account = TokenDataWithMerkleContext { - token_data, - compressed_account: CompressedAccountWithMerkleContext { - compressed_account: compressed_account - .compressed_account - .clone(), - merkle_context: MerkleContext { - leaf_index: event.output_leaf_indices[i], - merkle_tree_pubkey: event.pubkey_array[event - .output_compressed_accounts[i] - .merkle_tree_index - as usize], - nullifier_queue_pubkey, - queue_index: None, - }, - }, - }; - token_compressed_accounts.push(token_account.clone()); - self.token_compressed_accounts.insert(0, token_account); - } - } else { - let compressed_account = CompressedAccountWithMerkleContext { - compressed_account: compressed_account.compressed_account.clone(), - merkle_context: MerkleContext { - leaf_index: event.output_leaf_indices[i], - merkle_tree_pubkey: event.pubkey_array[event - .output_compressed_accounts[i] - .merkle_tree_index - as usize], - nullifier_queue_pubkey, - queue_index: None, - }, - }; - compressed_accounts.push(compressed_account.clone()); - self.compressed_accounts.insert(0, compressed_account); - } - } - None => { - let compressed_account = CompressedAccountWithMerkleContext { - compressed_account: compressed_account.compressed_account.clone(), - merkle_context: MerkleContext { - leaf_index: event.output_leaf_indices[i], - merkle_tree_pubkey: event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize], - nullifier_queue_pubkey, - queue_index: None, - }, - }; - compressed_accounts.push(compressed_account.clone()); - self.compressed_accounts.insert(0, compressed_account); - } - }; - let merkle_tree = &mut self - .state_merkle_trees - .iter_mut() - .find(|x| { - x.accounts.merkle_tree - == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] - }) - .unwrap() - .merkle_tree; - merkle_tree - .append( - &compressed_account - .compressed_account - .hash::( - &event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize], - &event.output_leaf_indices[i], - ) - .unwrap(), - ) - .expect("insert failed"); - } - - self.events.push(event.clone()); - (compressed_accounts, token_compressed_accounts) - } - - async fn create_proof_for_compressed_accounts( - &mut self, - compressed_accounts: Option<&[[u8; 32]]>, - state_merkle_tree_pubkeys: Option<&[solana_sdk::pubkey::Pubkey]>, - new_addresses: Option<&[[u8; 32]]>, - address_merkle_tree_pubkeys: Option>, - rpc: &mut R, - ) -> ProofRpcResult { - if compressed_accounts.is_some() - && ![1usize, 2usize, 3usize, 4usize, 8usize] - .contains(&compressed_accounts.unwrap().len()) - { - panic!( - "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}", - compressed_accounts.unwrap().len() - ) - } - if new_addresses.is_some() && ![1usize, 2usize].contains(&new_addresses.unwrap().len()) { - panic!("new_addresses must be of length 1, 2") - } - let client = Client::new(); - let (root_indices, address_root_indices, json_payload) = - match (compressed_accounts, new_addresses) { - (Some(accounts), None) => { - let (payload, payload_legacy, indices) = self - .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) - .await; - - if let Some(payload) = payload { - (indices, Vec::new(), payload.to_string()) - } else { - (indices, Vec::new(), payload_legacy.unwrap().to_string()) - } - } - (None, Some(addresses)) => { - let (payload, payload_legacy, indices) = self - .process_non_inclusion_proofs( - address_merkle_tree_pubkeys.unwrap().as_slice(), - addresses, - rpc, - ) - .await; - let payload_string = if let Some(payload) = payload { - payload.to_string() - } else { - payload_legacy.unwrap().to_string() - }; - (Vec::::new(), indices, payload_string) - } - (Some(accounts), Some(addresses)) => { - let (inclusion_payload, inclusion_payload_legacy, inclusion_indices) = self - .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) - .await; - - let ( - non_inclusion_payload, - non_inclusion_payload_legacy, - non_inclusion_indices, - ) = self - .process_non_inclusion_proofs( - address_merkle_tree_pubkeys.unwrap().as_slice(), - addresses, - rpc, - ) - .await; - let json_payload = if let Some(non_inclusion_payload) = non_inclusion_payload { - let public_input_hash = BigInt::from_bytes_be( - num_bigint::Sign::Plus, - &create_hash_chain_from_slice(&[ - bigint_to_u8_32( - &string_to_big_int( - &inclusion_payload.as_ref().unwrap().public_input_hash, - ) - .unwrap(), - ) - .unwrap(), - bigint_to_u8_32( - &string_to_big_int(&non_inclusion_payload.public_input_hash) - .unwrap(), - ) - .unwrap(), - ]) - .unwrap(), - ); - - CombinedJsonStruct { - circuit_type: ProofType::Combined.to_string(), - state_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, - address_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, - public_input_hash: big_int_to_string(&public_input_hash), - inclusion: inclusion_payload.unwrap().inputs, - non_inclusion: non_inclusion_payload.inputs, - } - .to_string() - } else if let Some(non_inclusion_payload) = non_inclusion_payload_legacy { - CombinedJsonStructLegacy { - circuit_type: ProofType::Combined.to_string(), - state_tree_height: 26, - address_tree_height: 26, - inclusion: inclusion_payload_legacy.unwrap().inputs, - non_inclusion: non_inclusion_payload.inputs, - } - .to_string() - } else { - panic!("Unsupported tree height") - }; - (inclusion_indices, non_inclusion_indices, json_payload) - } - _ => { - panic!("At least one of compressed_accounts or new_addresses must be provided") - } - }; - - let mut retries = 3; - while retries > 0 { - let response_result = client - .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) - .header("Content-Type", "text/plain; charset=utf-8") - .body(json_payload.clone()) - .send() - .await - .expect("Failed to execute request."); - if response_result.status().is_success() { - let body = response_result.text().await.unwrap(); - let proof_json = deserialize_gnark_proof_json(&body).unwrap(); - let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); - let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); - let root_indices = root_indices.iter().map(|x| Some(*x)).collect(); - return ProofRpcResult { - root_indices, - address_root_indices, - proof: CompressedProof { - a: proof_a, - b: proof_b, - c: proof_c, - }, - }; - } else { - warn!("Error: {}", response_result.text().await.unwrap()); - tokio::time::sleep(Duration::from_secs(1)).await; - retries -= 1; - } - } - panic!("Failed to get proof from server"); - } - - /// Returns compressed accounts owned by the given `owner`. - fn get_compressed_accounts_by_owner( - &self, - owner: &Pubkey, - ) -> Vec { - self.compressed_accounts - .iter() - .filter(|x| x.compressed_account.owner == *owner) - .cloned() - .collect() - } -} - -impl TestIndexer -where - R: RpcConnection + MerkleTreeExt, -{ - pub async fn new( - state_merkle_tree_accounts: &[StateMerkleTreeAccounts], - address_merkle_tree_accounts: &[AddressMerkleTreeAccounts], - inclusion: bool, - non_inclusion: bool, - ) -> Self { - let state_merkle_trees = state_merkle_tree_accounts - .iter() - .map(|accounts| { - let merkle_tree = Box::new(MerkleTree::::new( - STATE_MERKLE_TREE_HEIGHT, - STATE_MERKLE_TREE_CANOPY_DEPTH, - )); - StateMerkleTreeBundle { - accounts: *accounts, - merkle_tree, - rollover_fee: FeeConfig::default().state_merkle_tree_rollover, - } - }) - .collect::>(); - - let address_merkle_trees = address_merkle_tree_accounts - .iter() - .map(|accounts| Self::add_address_merkle_tree_bundle(accounts)) - .collect::>(); - - let mut prover_config = ProverConfig { - circuits: vec![], - run_mode: None, - }; - - if inclusion { - prover_config.circuits.push(ProofType::Inclusion); - } - if non_inclusion { - prover_config.circuits.push(ProofType::NonInclusion); - } - - spawn_prover(true, prover_config).await; - - health_check(20, 1).await; - - Self { - state_merkle_trees, - address_merkle_trees, - compressed_accounts: Vec::new(), - nullified_compressed_accounts: Vec::new(), - token_compressed_accounts: Vec::new(), - token_nullified_compressed_accounts: Vec::new(), - events: Vec::new(), - _rpc: PhantomData, - } - } - - pub fn add_address_merkle_tree_bundle( - accounts: &AddressMerkleTreeAccounts, - // TODO: add config here - ) -> AddressMerkleTreeBundle { - let mut merkle_tree = Box::new( - IndexedMerkleTree::::new( - ADDRESS_MERKLE_TREE_HEIGHT, - ADDRESS_MERKLE_TREE_CANOPY_DEPTH, - ) - .unwrap(), - ); - merkle_tree.init().unwrap(); - let mut indexed_array = Box::>::default(); - indexed_array.init().unwrap(); - AddressMerkleTreeBundle { - merkle_tree, - indexed_array, - accounts: *accounts, - rollover_fee: FeeConfig::default().address_queue_rollover, - } - } - - async fn process_inclusion_proofs( - &self, - merkle_tree_pubkeys: &[Pubkey], - accounts: &[[u8; 32]], - rpc: &mut R, - ) -> ( - Option, - Option, - Vec, - ) { - let mut inclusion_proofs = Vec::new(); - let mut root_indices = Vec::new(); - let mut height = 0; - - for (i, account) in accounts.iter().enumerate() { - let bundle = &self - .state_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkeys[i]) - .unwrap(); - let merkle_tree = &bundle.merkle_tree; - let leaf_index = merkle_tree.get_leaf_index(account).unwrap(); - println!("merkle_tree height {:?}", merkle_tree.height); - let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap(); - println!("proof length {:?}", proof.len()); - let merkle_tree_account = rpc - .get_account(merkle_tree_pubkeys[i]) - .await - .unwrap() - .unwrap(); - - let discriminator = merkle_tree_account.data[0..8].try_into().unwrap(); - let version = match discriminator { - StateMerkleTreeAccount::DISCRIMINATOR => 1, - BatchedMerkleTreeMetadata::DISCRIMINATOR => 2, - _ => panic!("Unsupported discriminator"), - }; - println!("bundle.version {:?}", version); - if height == 0 { - height = merkle_tree.height; - } else { - assert_eq!(height, merkle_tree.height); - } - inclusion_proofs.push(InclusionMerkleProofInputs { - root: BigInt::from_be_bytes(merkle_tree.root().as_slice()), - leaf: BigInt::from_be_bytes(account), - path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()), - path_elements: proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(), - }); - let (root_index, root) = if version == 1 { - let fetched_merkle_tree = - get_concurrent_merkle_tree::( - rpc, - merkle_tree_pubkeys[i], - ) - .await; - // for i in 0..fetched_merkle_tree.roots.len() { - // inf!("roots {:?} {:?}", i, fetched_merkle_tree.roots[i]); - // } - // info!( - // "sequence number {:?}", - // fetched_merkle_tree.sequence_number() - // ); - // info!("root index {:?}", fetched_merkle_tree.root_index()); - // info!("local sequence number {:?}", merkle_tree.sequence_number); - ( - fetched_merkle_tree.root_index() as u32, - fetched_merkle_tree.root(), - ) - } else { - let mut merkle_tree_account = rpc - .get_account(merkle_tree_pubkeys[i]) - .await - .unwrap() - .unwrap(); - let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( - merkle_tree_account.data.as_mut_slice(), - ) - .unwrap(); - ( - merkle_tree.get_root_index(), - merkle_tree.get_root().unwrap(), - ) - }; - assert_eq!(merkle_tree.root(), root, "Merkle tree root mismatch"); - - root_indices.push(root_index as u16); - } - - let (batch_inclusion_proof_inputs, legacy) = if height - == DEFAULT_BATCH_STATE_TREE_HEIGHT as usize - { - let inclusion_proof_inputs = - InclusionProofInputs::new(inclusion_proofs.as_slice()).unwrap(); - ( - Some(BatchInclusionJsonStruct::from_inclusion_proof_inputs( - &inclusion_proof_inputs, - )), - None, - ) - } else if height == STATE_MERKLE_TREE_HEIGHT { - let inclusion_proof_inputs = InclusionProofInputsLegacy(inclusion_proofs.as_slice()); - ( - None, - Some(BatchInclusionJsonStructLegacy::from_inclusion_proof_inputs( - &inclusion_proof_inputs, - )), - ) - } else { - panic!("Unsupported tree height") - }; - - (batch_inclusion_proof_inputs, legacy, root_indices) - } - - async fn process_non_inclusion_proofs( - &self, - address_merkle_tree_pubkeys: &[Pubkey], - addresses: &[[u8; 32]], - rpc: &mut R, - ) -> ( - Option, - Option, - Vec, - ) { - let mut non_inclusion_proofs = Vec::new(); - let mut address_root_indices = Vec::new(); - let mut tree_heights = Vec::new(); - for (i, address) in addresses.iter().enumerate() { - let address_tree = &self - .address_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i]) - .unwrap(); - tree_heights.push(address_tree.merkle_tree.merkle_tree.height); - let proof_inputs = get_non_inclusion_proof_inputs( - address, - &address_tree.merkle_tree, - &address_tree.indexed_array, - ); - non_inclusion_proofs.push(proof_inputs); - let onchain_address_merkle_tree = rpc - .get_address_merkle_tree(address_merkle_tree_pubkeys[i]) - .await - .unwrap(); - address_root_indices.push(onchain_address_merkle_tree.root_index() as u16); - } - // if tree heights are not the same, panic - if tree_heights.iter().any(|&x| x != tree_heights[0]) { - panic!( - "All address merkle trees must have the same height {:?}", - tree_heights - ); - } - - let (batch_non_inclusion_proof_inputs, batch_non_inclusion_proof_inputs_legacy) = - if tree_heights[0] == 26 { - let non_inclusion_proof_inputs = - NonInclusionProofInputsLegacy::new(non_inclusion_proofs.as_slice()); - ( - None, - Some( - BatchNonInclusionJsonStructLegacy::from_non_inclusion_proof_inputs( - &non_inclusion_proof_inputs, - ), - ), - ) - } else if tree_heights[0] == 40 { - let non_inclusion_proof_inputs = - NonInclusionProofInputs::new(non_inclusion_proofs.as_slice()).unwrap(); - ( - Some( - BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs( - &non_inclusion_proof_inputs, - ), - ), - None, - ) - } else { - panic!("Unsupported tree height") - }; - ( - batch_non_inclusion_proof_inputs, - batch_non_inclusion_proof_inputs_legacy, - address_root_indices, - ) - } - - /// deserializes an event - /// adds the output_compressed_accounts to the compressed_accounts - /// removes the input_compressed_accounts from the compressed_accounts - /// adds the input_compressed_accounts to the nullified_compressed_accounts - /// deserialiazes token data from the output_compressed_accounts - /// adds the token_compressed_accounts to the token_compressed_accounts - pub fn add_compressed_accounts_with_token_data(&mut self, event: &PublicTransactionEvent) { - self.add_event_and_compressed_accounts(event); - } -} +// use std::{marker::PhantomData, time::Duration}; +// +// use account_compression::StateMerkleTreeAccount; +// use anchor_lang::Discriminator; +// use borsh::BorshDeserialize; +// use forester_utils::get_concurrent_merkle_tree; +// use light_batched_merkle_tree::{ +// constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, +// merkle_tree::{BatchedMerkleTreeAccount, BatchedMerkleTreeMetadata}, +// }; +// use light_client::{ +// indexer::{ +// AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, +// StateMerkleTreeBundle, +// }, +// rpc::{merkle_tree::MerkleTreeExt, RpcConnection}, +// transaction_params::FeeConfig, +// }; +// use light_hasher::{Discriminator as LightDiscriminator, Poseidon}; +// use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}; +// use light_merkle_tree_reference::MerkleTree; +// use light_prover_client::{ +// gnark::{ +// combined_json_formatter::CombinedJsonStruct, +// combined_json_formatter_legacy::CombinedJsonStruct as CombinedJsonStructLegacy, +// constants::{PROVE_PATH, SERVER_ADDRESS}, +// helpers::{ +// big_int_to_string, health_check, spawn_prover, string_to_big_int, ProofType, +// ProverConfig, +// }, +// inclusion_json_formatter::BatchInclusionJsonStruct, +// inclusion_json_formatter_legacy::BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy, +// non_inclusion_json_formatter::BatchNonInclusionJsonStruct, +// non_inclusion_json_formatter_legacy::BatchNonInclusionJsonStruct as BatchNonInclusionJsonStructLegacy, +// proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, +// }, +// helpers::bigint_to_u8_32, +// inclusion::merkle_inclusion_proof_inputs::{InclusionMerkleProofInputs, InclusionProofInputs}, +// inclusion_legacy::merkle_inclusion_proof_inputs::InclusionProofInputs as InclusionProofInputsLegacy, +// non_inclusion::merkle_non_inclusion_proof_inputs::{ +// get_non_inclusion_proof_inputs, NonInclusionProofInputs, +// }, +// non_inclusion_legacy::merkle_non_inclusion_proof_inputs::NonInclusionProofInputs as NonInclusionProofInputsLegacy, +// }; +// use light_sdk::{ +// compressed_account::CompressedAccountWithMerkleContext, +// event::PublicTransactionEvent, +// merkle_context::MerkleContext, +// proof::{CompressedProof, ProofRpcResult}, +// token::{TokenData, TokenDataWithMerkleContext}, +// ADDRESS_MERKLE_TREE_CANOPY_DEPTH, ADDRESS_MERKLE_TREE_HEIGHT, PROGRAM_ID_LIGHT_SYSTEM, +// STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT, +// TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR, +// }; +// use light_utils::hashchain::create_hash_chain_from_slice; +// use log::warn; +// use num_bigint::BigInt; +// use num_traits::FromBytes; +// use reqwest::Client; +// use solana_sdk::pubkey::Pubkey; +// +// #[derive(Debug)] +// pub struct TestIndexer +// where +// R: RpcConnection + MerkleTreeExt, +// { +// pub state_merkle_trees: Vec, +// pub address_merkle_trees: Vec, +// pub compressed_accounts: Vec, +// pub nullified_compressed_accounts: Vec, +// pub token_compressed_accounts: Vec, +// pub token_nullified_compressed_accounts: Vec, +// pub events: Vec, +// _rpc: PhantomData, +// } +// +// impl Indexer for TestIndexer +// where +// R: RpcConnection + MerkleTreeExt, +// { +// fn add_event_and_compressed_accounts( +// &mut self, +// event: &PublicTransactionEvent, +// ) -> ( +// Vec, +// Vec, +// ) { +// for hash in event.input_compressed_account_hashes.iter() { +// let index = self.compressed_accounts.iter().position(|x| { +// x.compressed_account +// .hash::( +// &x.merkle_context.merkle_tree_pubkey, +// &x.merkle_context.leaf_index, +// ) +// .unwrap() +// == *hash +// }); +// if let Some(index) = index { +// self.nullified_compressed_accounts +// .push(self.compressed_accounts[index].clone()); +// self.compressed_accounts.remove(index); +// continue; +// }; +// if index.is_none() { +// let index = self +// .token_compressed_accounts +// .iter() +// .position(|x| { +// x.compressed_account +// .compressed_account +// .hash::( +// &x.compressed_account.merkle_context.merkle_tree_pubkey, +// &x.compressed_account.merkle_context.leaf_index, +// ) +// .unwrap() +// == *hash +// }) +// .expect("input compressed account not found"); +// self.token_nullified_compressed_accounts +// .push(self.token_compressed_accounts[index].clone()); +// self.token_compressed_accounts.remove(index); +// } +// } +// +// let mut compressed_accounts = Vec::new(); +// let mut token_compressed_accounts = Vec::new(); +// for (i, compressed_account) in event.output_compressed_accounts.iter().enumerate() { +// let nullifier_queue_pubkey = self +// .state_merkle_trees +// .iter() +// .find(|x| { +// x.accounts.merkle_tree +// == event.pubkey_array +// [event.output_compressed_accounts[i].merkle_tree_index as usize] +// }) +// .unwrap() +// .accounts +// .nullifier_queue; +// // if data is some, try to deserialize token data, if it fails, add to compressed_accounts +// // if data is none add to compressed_accounts +// // new accounts are inserted in front so that the newest accounts are found first +// match compressed_account.compressed_account.data.as_ref() { +// Some(data) => { +// if compressed_account.compressed_account.owner == PROGRAM_ID_LIGHT_SYSTEM +// && data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR +// { +// if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { +// let token_account = TokenDataWithMerkleContext { +// token_data, +// compressed_account: CompressedAccountWithMerkleContext { +// compressed_account: compressed_account +// .compressed_account +// .clone(), +// merkle_context: MerkleContext { +// leaf_index: event.output_leaf_indices[i], +// merkle_tree_pubkey: event.pubkey_array[event +// .output_compressed_accounts[i] +// .merkle_tree_index +// as usize], +// nullifier_queue_pubkey, +// queue_index: None, +// }, +// }, +// }; +// token_compressed_accounts.push(token_account.clone()); +// self.token_compressed_accounts.insert(0, token_account); +// } +// } else { +// let compressed_account = CompressedAccountWithMerkleContext { +// compressed_account: compressed_account.compressed_account.clone(), +// merkle_context: MerkleContext { +// leaf_index: event.output_leaf_indices[i], +// merkle_tree_pubkey: event.pubkey_array[event +// .output_compressed_accounts[i] +// .merkle_tree_index +// as usize], +// nullifier_queue_pubkey, +// queue_index: None, +// }, +// }; +// compressed_accounts.push(compressed_account.clone()); +// self.compressed_accounts.insert(0, compressed_account); +// } +// } +// None => { +// let compressed_account = CompressedAccountWithMerkleContext { +// compressed_account: compressed_account.compressed_account.clone(), +// merkle_context: MerkleContext { +// leaf_index: event.output_leaf_indices[i], +// merkle_tree_pubkey: event.pubkey_array +// [event.output_compressed_accounts[i].merkle_tree_index as usize], +// nullifier_queue_pubkey, +// queue_index: None, +// }, +// }; +// compressed_accounts.push(compressed_account.clone()); +// self.compressed_accounts.insert(0, compressed_account); +// } +// }; +// let merkle_tree = &mut self +// .state_merkle_trees +// .iter_mut() +// .find(|x| { +// x.accounts.merkle_tree +// == event.pubkey_array +// [event.output_compressed_accounts[i].merkle_tree_index as usize] +// }) +// .unwrap() +// .merkle_tree; +// merkle_tree +// .append( +// &compressed_account +// .compressed_account +// .hash::( +// &event.pubkey_array +// [event.output_compressed_accounts[i].merkle_tree_index as usize], +// &event.output_leaf_indices[i], +// ) +// .unwrap(), +// ) +// .expect("insert failed"); +// } +// +// self.events.push(event.clone()); +// (compressed_accounts, token_compressed_accounts) +// } +// +// async fn create_proof_for_compressed_accounts( +// &mut self, +// compressed_accounts: Option<&[[u8; 32]]>, +// state_merkle_tree_pubkeys: Option<&[solana_sdk::pubkey::Pubkey]>, +// new_addresses: Option<&[[u8; 32]]>, +// address_merkle_tree_pubkeys: Option>, +// rpc: &mut R, +// ) -> ProofRpcResult { +// if compressed_accounts.is_some() +// && ![1usize, 2usize, 3usize, 4usize, 8usize] +// .contains(&compressed_accounts.unwrap().len()) +// { +// panic!( +// "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}", +// compressed_accounts.unwrap().len() +// ) +// } +// if new_addresses.is_some() && ![1usize, 2usize].contains(&new_addresses.unwrap().len()) { +// panic!("new_addresses must be of length 1, 2") +// } +// let client = Client::new(); +// let (root_indices, address_root_indices, json_payload) = +// match (compressed_accounts, new_addresses) { +// (Some(accounts), None) => { +// let (payload, payload_legacy, indices) = self +// .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) +// .await; +// +// if let Some(payload) = payload { +// (indices, Vec::new(), payload.to_string()) +// } else { +// (indices, Vec::new(), payload_legacy.unwrap().to_string()) +// } +// } +// (None, Some(addresses)) => { +// let (payload, payload_legacy, indices) = self +// .process_non_inclusion_proofs( +// address_merkle_tree_pubkeys.unwrap().as_slice(), +// addresses, +// rpc, +// ) +// .await; +// let payload_string = if let Some(payload) = payload { +// payload.to_string() +// } else { +// payload_legacy.unwrap().to_string() +// }; +// (Vec::::new(), indices, payload_string) +// } +// (Some(accounts), Some(addresses)) => { +// let (inclusion_payload, inclusion_payload_legacy, inclusion_indices) = self +// .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) +// .await; +// +// let ( +// non_inclusion_payload, +// non_inclusion_payload_legacy, +// non_inclusion_indices, +// ) = self +// .process_non_inclusion_proofs( +// address_merkle_tree_pubkeys.unwrap().as_slice(), +// addresses, +// rpc, +// ) +// .await; +// let json_payload = if let Some(non_inclusion_payload) = non_inclusion_payload { +// let public_input_hash = BigInt::from_bytes_be( +// num_bigint::Sign::Plus, +// &create_hash_chain_from_slice(&[ +// bigint_to_u8_32( +// &string_to_big_int( +// &inclusion_payload.as_ref().unwrap().public_input_hash, +// ) +// .unwrap(), +// ) +// .unwrap(), +// bigint_to_u8_32( +// &string_to_big_int(&non_inclusion_payload.public_input_hash) +// .unwrap(), +// ) +// .unwrap(), +// ]) +// .unwrap(), +// ); +// +// CombinedJsonStruct { +// circuit_type: ProofType::Combined.to_string(), +// state_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, +// address_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, +// public_input_hash: big_int_to_string(&public_input_hash), +// inclusion: inclusion_payload.unwrap().inputs, +// non_inclusion: non_inclusion_payload.inputs, +// } +// .to_string() +// } else if let Some(non_inclusion_payload) = non_inclusion_payload_legacy { +// CombinedJsonStructLegacy { +// circuit_type: ProofType::Combined.to_string(), +// state_tree_height: 26, +// address_tree_height: 26, +// inclusion: inclusion_payload_legacy.unwrap().inputs, +// non_inclusion: non_inclusion_payload.inputs, +// } +// .to_string() +// } else { +// panic!("Unsupported tree height") +// }; +// (inclusion_indices, non_inclusion_indices, json_payload) +// } +// _ => { +// panic!("At least one of compressed_accounts or new_addresses must be provided") +// } +// }; +// +// let mut retries = 3; +// while retries > 0 { +// let response_result = client +// .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) +// .header("Content-Type", "text/plain; charset=utf-8") +// .body(json_payload.clone()) +// .send() +// .await +// .expect("Failed to execute request."); +// if response_result.status().is_success() { +// let body = response_result.text().await.unwrap(); +// let proof_json = deserialize_gnark_proof_json(&body).unwrap(); +// let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); +// let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); +// let root_indices = root_indices.iter().map(|x| Some(*x)).collect(); +// return ProofRpcResult { +// root_indices, +// address_root_indices, +// proof: CompressedProof { +// a: proof_a, +// b: proof_b, +// c: proof_c, +// }, +// }; +// } else { +// warn!("Error: {}", response_result.text().await.unwrap()); +// tokio::time::sleep(Duration::from_secs(1)).await; +// retries -= 1; +// } +// } +// panic!("Failed to get proof from server"); +// } +// +// /// Returns compressed accounts owned by the given `owner`. +// fn get_compressed_accounts_by_owner( +// &self, +// owner: &Pubkey, +// ) -> Vec { +// self.compressed_accounts +// .iter() +// .filter(|x| x.compressed_account.owner == *owner) +// .cloned() +// .collect() +// } +// } +// +// impl TestIndexer +// where +// R: RpcConnection + MerkleTreeExt, +// { +// pub async fn new( +// state_merkle_tree_accounts: &[StateMerkleTreeAccounts], +// address_merkle_tree_accounts: &[AddressMerkleTreeAccounts], +// inclusion: bool, +// non_inclusion: bool, +// ) -> Self { +// let state_merkle_trees = state_merkle_tree_accounts +// .iter() +// .map(|accounts| { +// let merkle_tree = Box::new(MerkleTree::::new( +// STATE_MERKLE_TREE_HEIGHT, +// STATE_MERKLE_TREE_CANOPY_DEPTH, +// )); +// StateMerkleTreeBundle { +// accounts: *accounts, +// merkle_tree, +// rollover_fee: FeeConfig::default().state_merkle_tree_rollover, +// } +// }) +// .collect::>(); +// +// let address_merkle_trees = address_merkle_tree_accounts +// .iter() +// .map(|accounts| Self::add_address_merkle_tree_bundle(accounts)) +// .collect::>(); +// +// let mut prover_config = ProverConfig { +// circuits: vec![], +// run_mode: None, +// }; +// +// if inclusion { +// prover_config.circuits.push(ProofType::Inclusion); +// } +// if non_inclusion { +// prover_config.circuits.push(ProofType::NonInclusion); +// } +// +// spawn_prover(true, prover_config).await; +// +// health_check(20, 1).await; +// +// Self { +// state_merkle_trees, +// address_merkle_trees, +// compressed_accounts: Vec::new(), +// nullified_compressed_accounts: Vec::new(), +// token_compressed_accounts: Vec::new(), +// token_nullified_compressed_accounts: Vec::new(), +// events: Vec::new(), +// _rpc: PhantomData, +// } +// } +// +// pub fn add_address_merkle_tree_bundle( +// accounts: &AddressMerkleTreeAccounts, +// // TODO: add config here +// ) -> AddressMerkleTreeBundle { +// let mut merkle_tree = Box::new( +// IndexedMerkleTree::::new( +// ADDRESS_MERKLE_TREE_HEIGHT, +// ADDRESS_MERKLE_TREE_CANOPY_DEPTH, +// ) +// .unwrap(), +// ); +// merkle_tree.init().unwrap(); +// let mut indexed_array = Box::>::default(); +// indexed_array.init().unwrap(); +// AddressMerkleTreeBundle { +// merkle_tree, +// indexed_array, +// accounts: *accounts, +// rollover_fee: FeeConfig::default().address_queue_rollover, +// } +// } +// +// async fn process_inclusion_proofs( +// &self, +// merkle_tree_pubkeys: &[Pubkey], +// accounts: &[[u8; 32]], +// rpc: &mut R, +// ) -> ( +// Option, +// Option, +// Vec, +// ) { +// let mut inclusion_proofs = Vec::new(); +// let mut root_indices = Vec::new(); +// let mut height = 0; +// +// for (i, account) in accounts.iter().enumerate() { +// let bundle = &self +// .state_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkeys[i]) +// .unwrap(); +// let merkle_tree = &bundle.merkle_tree; +// let leaf_index = merkle_tree.get_leaf_index(account).unwrap(); +// println!("merkle_tree height {:?}", merkle_tree.height); +// let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap(); +// println!("proof length {:?}", proof.len()); +// let merkle_tree_account = rpc +// .get_account(merkle_tree_pubkeys[i]) +// .await +// .unwrap() +// .unwrap(); +// +// let discriminator = merkle_tree_account.data[0..8].try_into().unwrap(); +// let version = match discriminator { +// StateMerkleTreeAccount::DISCRIMINATOR => 1, +// BatchedMerkleTreeMetadata::DISCRIMINATOR => 2, +// _ => panic!("Unsupported discriminator"), +// }; +// println!("bundle.version {:?}", version); +// if height == 0 { +// height = merkle_tree.height; +// } else { +// assert_eq!(height, merkle_tree.height); +// } +// inclusion_proofs.push(InclusionMerkleProofInputs { +// root: BigInt::from_be_bytes(merkle_tree.root().as_slice()), +// leaf: BigInt::from_be_bytes(account), +// path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()), +// path_elements: proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(), +// }); +// let (root_index, root) = if version == 1 { +// let fetched_merkle_tree = +// get_concurrent_merkle_tree::( +// rpc, +// merkle_tree_pubkeys[i], +// ) +// .await; +// // for i in 0..fetched_merkle_tree.roots.len() { +// // inf!("roots {:?} {:?}", i, fetched_merkle_tree.roots[i]); +// // } +// // info!( +// // "sequence number {:?}", +// // fetched_merkle_tree.sequence_number() +// // ); +// // info!("root index {:?}", fetched_merkle_tree.root_index()); +// // info!("local sequence number {:?}", merkle_tree.sequence_number); +// ( +// fetched_merkle_tree.root_index() as u32, +// fetched_merkle_tree.root(), +// ) +// } else { +// let mut merkle_tree_account = rpc +// .get_account(merkle_tree_pubkeys[i]) +// .await +// .unwrap() +// .unwrap(); +// let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( +// merkle_tree_account.data.as_mut_slice(), +// ) +// .unwrap(); +// ( +// merkle_tree.get_root_index(), +// merkle_tree.get_root().unwrap(), +// ) +// }; +// assert_eq!(merkle_tree.root(), root, "Merkle tree root mismatch"); +// +// root_indices.push(root_index as u16); +// } +// +// let (batch_inclusion_proof_inputs, legacy) = if height +// == DEFAULT_BATCH_STATE_TREE_HEIGHT as usize +// { +// let inclusion_proof_inputs = +// InclusionProofInputs::new(inclusion_proofs.as_slice()).unwrap(); +// ( +// Some(BatchInclusionJsonStruct::from_inclusion_proof_inputs( +// &inclusion_proof_inputs, +// )), +// None, +// ) +// } else if height == STATE_MERKLE_TREE_HEIGHT { +// let inclusion_proof_inputs = InclusionProofInputsLegacy(inclusion_proofs.as_slice()); +// ( +// None, +// Some(BatchInclusionJsonStructLegacy::from_inclusion_proof_inputs( +// &inclusion_proof_inputs, +// )), +// ) +// } else { +// panic!("Unsupported tree height") +// }; +// +// (batch_inclusion_proof_inputs, legacy, root_indices) +// } +// +// async fn process_non_inclusion_proofs( +// &self, +// address_merkle_tree_pubkeys: &[Pubkey], +// addresses: &[[u8; 32]], +// rpc: &mut R, +// ) -> ( +// Option, +// Option, +// Vec, +// ) { +// let mut non_inclusion_proofs = Vec::new(); +// let mut address_root_indices = Vec::new(); +// let mut tree_heights = Vec::new(); +// for (i, address) in addresses.iter().enumerate() { +// let address_tree = &self +// .address_merkle_trees +// .iter() +// .find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i]) +// .unwrap(); +// tree_heights.push(address_tree.merkle_tree.merkle_tree.height); +// let proof_inputs = get_non_inclusion_proof_inputs( +// address, +// &address_tree.merkle_tree, +// &address_tree.indexed_array, +// ); +// non_inclusion_proofs.push(proof_inputs); +// let onchain_address_merkle_tree = rpc +// .get_address_merkle_tree(address_merkle_tree_pubkeys[i]) +// .await +// .unwrap(); +// address_root_indices.push(onchain_address_merkle_tree.root_index() as u16); +// } +// // if tree heights are not the same, panic +// if tree_heights.iter().any(|&x| x != tree_heights[0]) { +// panic!( +// "All address merkle trees must have the same height {:?}", +// tree_heights +// ); +// } +// +// let (batch_non_inclusion_proof_inputs, batch_non_inclusion_proof_inputs_legacy) = +// if tree_heights[0] == 26 { +// let non_inclusion_proof_inputs = +// NonInclusionProofInputsLegacy::new(non_inclusion_proofs.as_slice()); +// ( +// None, +// Some( +// BatchNonInclusionJsonStructLegacy::from_non_inclusion_proof_inputs( +// &non_inclusion_proof_inputs, +// ), +// ), +// ) +// } else if tree_heights[0] == 40 { +// let non_inclusion_proof_inputs = +// NonInclusionProofInputs::new(non_inclusion_proofs.as_slice()).unwrap(); +// ( +// Some( +// BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs( +// &non_inclusion_proof_inputs, +// ), +// ), +// None, +// ) +// } else { +// panic!("Unsupported tree height") +// }; +// ( +// batch_non_inclusion_proof_inputs, +// batch_non_inclusion_proof_inputs_legacy, +// address_root_indices, +// ) +// } +// +// /// deserializes an event +// /// adds the output_compressed_accounts to the compressed_accounts +// /// removes the input_compressed_accounts from the compressed_accounts +// /// adds the input_compressed_accounts to the nullified_compressed_accounts +// /// deserialiazes token data from the output_compressed_accounts +// /// adds the token_compressed_accounts to the token_compressed_accounts +// pub fn add_compressed_accounts_with_token_data(&mut self, event: &PublicTransactionEvent) { +// self.add_event_and_compressed_accounts(event); +// } +// } diff --git a/sdk-libs/sdk/src/proof.rs b/sdk-libs/sdk/src/proof.rs index def692137..c4fac7fa3 100644 --- a/sdk-libs/sdk/src/proof.rs +++ b/sdk-libs/sdk/src/proof.rs @@ -41,3 +41,11 @@ pub struct ProofRpcResult { pub root_indices: Vec>, pub address_root_indices: Vec, } + +#[derive(Debug, Default)] +pub struct BatchedTreeProofRpcResult { + pub proof: Option, + // If none -> proof by index, else included in zkp + pub root_indices: Vec>, + pub address_root_indices: Vec, +} \ No newline at end of file From 99b3e60448c483d02b5f7a1a99114da1ca56c4cc Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 10:13:55 +0000 Subject: [PATCH 07/27] - spl with conversions - impl Default for sdk::CompressedProof --- program-tests/utils/src/spl.rs | 98 +++++++++++++++++++--------------- sdk-libs/sdk/src/proof.rs | 10 ++++ 2 files changed, 65 insertions(+), 43 deletions(-) diff --git a/program-tests/utils/src/spl.rs b/program-tests/utils/src/spl.rs index 05ff9bafe..0237f8979 100644 --- a/program-tests/utils/src/spl.rs +++ b/program-tests/utils/src/spl.rs @@ -44,7 +44,7 @@ use crate::{ assert_compressed_tx::get_merkle_tree_snapshots, assert_token_tx::{assert_create_mint, assert_mint_to, assert_transfer}, }; -use crate::conversions::{program_to_sdk_public_transaction_event, sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof, sdk_to_program_merkle_context, sdk_to_program_token_data}; +use crate::conversions::{program_to_sdk_public_transaction_event, program_to_sdk_token_data, sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof, sdk_to_program_merkle_context, sdk_to_program_token_data}; pub async fn mint_tokens_helper + TestIndexerExtensions>( rpc: &mut R, @@ -107,7 +107,7 @@ pub async fn mint_spl_tokens( } #[allow(clippy::too_many_arguments)] -pub async fn mint_tokens_helper_with_lamports>( +pub async fn mint_tokens_helper_with_lamports + TestIndexerExtensions>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, @@ -131,7 +131,7 @@ pub async fn mint_tokens_helper_with_lamports>( .await; } #[allow(clippy::too_many_arguments)] -pub async fn mint_tokens_22_helper_with_lamports>( +pub async fn mint_tokens_22_helper_with_lamports + TestIndexerExtensions>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, @@ -158,7 +158,7 @@ pub async fn mint_tokens_22_helper_with_lamports } #[allow(clippy::too_many_arguments)] -pub async fn mint_tokens_22_helper_with_lamports_and_bump>( +pub async fn mint_tokens_22_helper_with_lamports_and_bump + TestIndexerExtensions>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, @@ -208,7 +208,7 @@ pub async fn mint_tokens_22_helper_with_lamports_and_bump( } #[allow(clippy::too_many_arguments)] -pub async fn compressed_transfer_test>( +pub async fn compressed_transfer_test + TestIndexerExtensions>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, @@ -486,7 +486,7 @@ pub async fn compressed_transfer_test>( recipients: &[Pubkey], amounts: &[u64], lamports: Option>>, - input_compressed_accounts: &[TokenDataWithContext], + input_compressed_accounts: &[TokenDataWithMerkleContext], output_merkle_tree_pubkeys: &[Pubkey], delegate_change_account_index: Option, delegate_is_signer: bool, @@ -624,7 +624,7 @@ pub async fn compressed_transfer_22_test + TestI &input_merkle_tree_context, &output_compressed_accounts, &rpc_result.root_indices, - &Some(sdk_to_program_compressed_proof(rpc_result.proof)), + &Some(sdk_to_program_compressed_proof(rpc_result.proof.unwrap_or_default())), &input_compressed_account_token_data .into_iter() .map(sdk_to_program_token_data) @@ -697,7 +697,7 @@ pub async fn compressed_transfer_22_test + TestI .unwrap(); let slot = rpc.get_slot().await.unwrap(); let (created_change_output_account, created_token_output_accounts) = - test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event)); + test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); let delegates = if let Some(index) = delegate_change_account_index { let mut delegates = vec![None; created_token_output_accounts.len()]; delegates[index as usize] = Some(payer.pubkey()); @@ -788,7 +788,7 @@ pub async fn decompress_test + TestIndexerExtens .collect::>(), // input_compressed_account_merkle_tree_pubkeys &[change_out_compressed_account], // output_compressed_accounts &proof_rpc_result.root_indices, // root_indices - &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof)), + &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.unwrap_or_default())), input_compressed_accounts .iter() .map(|x| sdk_to_program_token_data(x.token_data.clone())) @@ -798,7 +798,6 @@ pub async fn decompress_test + TestIndexerExtens .iter() .map(|x| &x.compressed_account.compressed_account) .map(|x| sdk_to_program_compressed_account(x.clone())) - .cloned() .collect::>(), mint, // mint None, // owner_if_delegate_change_account_index @@ -990,7 +989,7 @@ pub async fn perform_compress_spl_token_account }; assert_eq!( created_compressed_token_account.token_data, - expected_token_data + program_to_sdk_token_data(expected_token_data) ); assert_eq!( created_compressed_token_account @@ -1011,7 +1010,7 @@ pub async fn perform_compress_spl_token_account } #[allow(clippy::too_many_arguments)] -pub async fn compress_test>( +pub async fn compress_test + TestIndexerExtensions>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, @@ -1079,7 +1078,7 @@ pub async fn compress_test>( .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); assert_transfer( rpc, @@ -1088,6 +1087,7 @@ pub async fn compress_test>( created_output_accounts .iter() .map(|x| x.compressed_account.clone()) + .map(sdk_to_program_compressed_account_with_merkle_context) .collect::>() .as_slice(), None, @@ -1114,11 +1114,11 @@ pub async fn compress_test>( } #[allow(clippy::too_many_arguments)] -pub async fn approve_test>( +pub async fn approve_test + TestIndexerExtensions>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, - input_compressed_accounts: Vec, + input_compressed_accounts: Vec, delegated_amount: u64, delegate_lamports: Option, delegate: &Pubkey, @@ -1155,15 +1155,17 @@ pub async fn approve_test>( input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint, delegated_amount, @@ -1172,7 +1174,7 @@ pub async fn approve_test>( change_compressed_account_merkle_tree: *change_compressed_account_merkle_tree, delegate: *delegate, root_indices: proof_rpc_result.root_indices, - proof: proof_rpc_result.proof.unwrap_or_default(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.unwrap_or_default()), }; let instruction = create_approve_instruction(inputs).unwrap(); @@ -1228,7 +1230,7 @@ pub async fn approve_test>( .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); let expected_delegated_token_data = TokenData { mint, @@ -1241,7 +1243,7 @@ pub async fn approve_test>( assert_eq!( expected_delegated_token_data, - created_output_accounts[0].token_data + sdk_to_program_token_data(created_output_accounts[0].token_data.clone()) ); let mut expected_token_data = vec![expected_delegated_token_data]; let mut delegates = vec![Some(*delegate)]; @@ -1256,7 +1258,7 @@ pub async fn approve_test>( }; assert_eq!( expected_change_token_data, - created_output_accounts[1].token_data + sdk_to_program_token_data(created_output_accounts[1].token_data.clone()) ); expected_token_data.push(expected_change_token_data); delegates.push(None); @@ -1272,6 +1274,7 @@ pub async fn approve_test>( created_output_accounts .iter() .map(|x| x.compressed_account.clone()) + .map(sdk_to_program_compressed_account_with_merkle_context) .collect::>() .as_slice(), change_lamports, @@ -1285,11 +1288,11 @@ pub async fn approve_test>( } #[allow(clippy::too_many_arguments)] -pub async fn revoke_test>( +pub async fn revoke_test + TestIndexerExtensions>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, - input_compressed_accounts: Vec, + input_compressed_accounts: Vec, output_account_merkle_tree: &Pubkey, transaction_params: Option, ) { @@ -1317,20 +1320,22 @@ pub async fn revoke_test>( input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint, output_account_merkle_tree: *output_account_merkle_tree, root_indices: proof_rpc_result.root_indices, - proof: proof_rpc_result.proof.unwrap_or_default(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.unwrap_or_default()), }; let instruction = create_revoke_instruction(inputs).unwrap(); @@ -1355,7 +1360,7 @@ pub async fn revoke_test>( .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); let input_amount = input_compressed_accounts .iter() .map(|x| x.token_data.amount) @@ -1368,7 +1373,7 @@ pub async fn revoke_test>( state: AccountState::Initialized, tlv: None, }; - assert_eq!(expected_token_data, created_output_accounts[0].token_data); + assert_eq!(expected_token_data, sdk_to_program_token_data(created_output_accounts[0].token_data.clone())); let expected_compressed_output_accounts = create_expected_token_output_data(vec![expected_token_data], &output_merkle_tree_pubkeys); let sum_inputs = input_compressed_accounts @@ -1387,6 +1392,7 @@ pub async fn revoke_test>( created_output_accounts .iter() .map(|x| x.compressed_account.clone()) + .map(sdk_to_program_compressed_account_with_merkle_context) .collect::>() .as_slice(), change_lamports, @@ -1399,11 +1405,11 @@ pub async fn revoke_test>( .await; } -pub async fn freeze_test>( +pub async fn freeze_test + TestIndexerExtensions>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, - input_compressed_accounts: Vec, + input_compressed_accounts: Vec, outputs_merkle_tree: &Pubkey, transaction_params: Option, ) { @@ -1418,11 +1424,11 @@ pub async fn freeze_test>( .await; } -pub async fn thaw_test>( +pub async fn thaw_test + TestIndexerExtensions>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, - input_compressed_accounts: Vec, + input_compressed_accounts: Vec, outputs_merkle_tree: &Pubkey, transaction_params: Option, ) { @@ -1437,11 +1443,11 @@ pub async fn thaw_test>( .await; } -pub async fn freeze_or_thaw_test>( +pub async fn freeze_or_thaw_test + TestIndexerExtensions>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, - input_compressed_accounts: Vec, + input_compressed_accounts: Vec, outputs_merkle_tree: &Pubkey, transaction_params: Option, ) { @@ -1469,19 +1475,21 @@ pub async fn freeze_or_thaw_test>(), outputs_merkle_tree: *outputs_merkle_tree, root_indices: proof_rpc_result.root_indices, - proof: proof_rpc_result.proof.unwrap_or_default(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.unwrap_or_default()), }; let instruction = create_instruction::(inputs).unwrap(); @@ -1507,7 +1515,7 @@ pub async fn freeze_or_thaw_test>() .as_slice(), change_lamports, @@ -1571,11 +1580,11 @@ pub async fn freeze_or_thaw_test>( +pub async fn burn_test + TestIndexerExtensions>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, - input_compressed_accounts: Vec, + input_compressed_accounts: Vec, change_account_merkle_tree: &Pubkey, burn_amount: u64, signer_is_delegate: bool, @@ -1639,7 +1648,7 @@ pub async fn burn_test>( .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); let mut delegates = Vec::new(); let mut expected_output_accounts = Vec::new(); @@ -1682,6 +1691,7 @@ pub async fn burn_test>( created_output_accounts .iter() .map(|x| x.compressed_account.clone()) + .map(sdk_to_program_compressed_account_with_merkle_context) .collect::>() .as_slice(), change_lamports, @@ -1714,11 +1724,11 @@ pub enum BurnInstructionMode { } #[allow(clippy::too_many_arguments)] -pub async fn create_burn_test_instruction>( +pub async fn create_burn_test_instruction + TestIndexerExtensions>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, - input_compressed_accounts: &[TokenDataWithContext], + input_compressed_accounts: &[TokenDataWithMerkleContext], change_account_merkle_tree: &Pubkey, burn_amount: u64, signer_is_delegate: bool, @@ -1756,7 +1766,7 @@ pub async fn create_burn_test_instruction>( c: proof_rpc_result.proof.as_ref().unwrap().a, // flip c to make proof invalid but not run into decompress errors } } else { - proof_rpc_result.proof.unwrap_or_default() + sdk_to_program_compressed_proof(proof_rpc_result.proof.unwrap_or_default()) }; let inputs = CreateBurnInstructionInputs { fee_payer: rpc.get_payer().pubkey(), @@ -1764,15 +1774,17 @@ pub async fn create_burn_test_instruction>( input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), change_account_merkle_tree: *change_account_merkle_tree, root_indices: proof_rpc_result.root_indices, diff --git a/sdk-libs/sdk/src/proof.rs b/sdk-libs/sdk/src/proof.rs index c4fac7fa3..9f579faf9 100644 --- a/sdk-libs/sdk/src/proof.rs +++ b/sdk-libs/sdk/src/proof.rs @@ -35,6 +35,16 @@ pub struct CompressedProof { pub c: [u8; 32], } +impl Default for CompressedProof { + fn default() -> Self { + Self { + a: [0; 32], + b: [0; 64], + c: [0; 32], + } + } +} + #[derive(Debug, AnchorDeserialize, AnchorSerialize)] pub struct ProofRpcResult { pub proof: CompressedProof, From ad8dd64bd425d23b39b85e53d3fa132c9574e879 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 10:18:19 +0000 Subject: [PATCH 08/27] - removed `TokenDataWithMerkleContext` from the client/indexer module a - updated `e2e_test_env.rs` accordingly --- program-tests/utils/src/e2e_test_env.rs | 4 ++-- sdk-libs/client/src/indexer/mod.rs | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/program-tests/utils/src/e2e_test_env.rs b/program-tests/utils/src/e2e_test_env.rs index 0b21d30f9..0dab68a64 100644 --- a/program-tests/utils/src/e2e_test_env.rs +++ b/program-tests/utils/src/e2e_test_env.rs @@ -87,7 +87,6 @@ use light_client::{ // context trait: send_transaction -> return transaction result, get_account_info -> return account info // indexer trait: get_compressed_accounts_by_owner -> return compressed accounts, // refactor all tests to work with that so that we can run all tests with a test validator and concurrency -use light_compressed_token::token_data::AccountState; use light_hasher::Poseidon; use light_indexed_merkle_tree::{ array::IndexedArray, reference::IndexedMerkleTree, HIGHEST_ADDRESS_PLUS_ONE, @@ -121,9 +120,10 @@ use solana_sdk::{ signer::{SeedDerivable, Signer}, }; use spl_token::solana_program::native_token::LAMPORTS_PER_SOL; -use light_client::indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, StateMerkleTreeBundle, TokenDataWithMerkleContext}; +use light_client::indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, StateMerkleTreeBundle}; use light_client::rpc::merkle_tree::MerkleTreeExt; use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; +use light_sdk::token::{AccountState, TokenDataWithMerkleContext}; use crate::{ address_tree_rollover::{ assert_rolled_over_address_merkle_tree_and_queue, diff --git a/sdk-libs/client/src/indexer/mod.rs b/sdk-libs/client/src/indexer/mod.rs index c471c13df..ebc1db0a4 100644 --- a/sdk-libs/client/src/indexer/mod.rs +++ b/sdk-libs/client/src/indexer/mod.rs @@ -113,11 +113,11 @@ pub trait Indexer: Sync + Send + Debug + 'static { fn get_address_merkle_trees(&self) -> &Vec; } -#[derive(Debug, Clone)] -pub struct TokenDataWithMerkleContext { - pub token_data: TokenData, - pub compressed_account: CompressedAccountWithMerkleContext, -} +// #[derive(Debug, Clone)] +// pub struct TokenDataWithMerkleContext { +// pub token_data: TokenData, +// pub compressed_account: CompressedAccountWithMerkleContext, +// } #[derive(Debug, Clone)] pub struct MerkleProof { From 0b318e2424eece7645cde33a2f4ef95363d93689 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 10:28:42 +0000 Subject: [PATCH 09/27] refactor program-test/utils* --- program-tests/utils/src/assert_token_tx.rs | 6 +++--- .../src/create_address_test_program_sdk.rs | 14 ++++++++------ program-tests/utils/src/system_program.rs | 17 ++++++++++++----- .../program-test/src/indexer/test_indexer.rs | 16 ++++++++-------- 4 files changed, 31 insertions(+), 22 deletions(-) diff --git a/program-tests/utils/src/assert_token_tx.rs b/program-tests/utils/src/assert_token_tx.rs index 82846ca42..0da145faf 100644 --- a/program-tests/utils/src/assert_token_tx.rs +++ b/program-tests/utils/src/assert_token_tx.rs @@ -1,12 +1,12 @@ use anchor_lang::AnchorSerialize; -use forester_utils::indexer::{Indexer, TokenDataWithContext}; use light_client::rpc::RpcConnection; use light_compressed_token::process_transfer::{get_cpi_authority_pda, TokenTransferOutputData}; use light_system_program::sdk::{ compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent, }; use solana_sdk::{program_pack::Pack, pubkey::Pubkey}; - +use light_client::indexer::Indexer; +use light_sdk::token::TokenDataWithMerkleContext; use crate::assert_compressed_tx::{ assert_merkle_tree_after_tx, assert_nullifiers_exist_in_hash_sets, assert_public_transaction_event, MerkleTreeTestSnapShot, @@ -195,7 +195,7 @@ pub async fn assert_mint_to<'a, R: RpcConnection, I: Indexer>( mint: Pubkey, amounts: &[u64], snapshots: &[MerkleTreeTestSnapShot], - created_token_accounts: &[TokenDataWithContext], + created_token_accounts: &[TokenDataWithMerkleContext], previous_mint_supply: u64, previous_sol_pool_amount: u64, token_pool_pda: Pubkey, diff --git a/program-tests/utils/src/create_address_test_program_sdk.rs b/program-tests/utils/src/create_address_test_program_sdk.rs index b9b962a51..b89483869 100644 --- a/program-tests/utils/src/create_address_test_program_sdk.rs +++ b/program-tests/utils/src/create_address_test_program_sdk.rs @@ -11,8 +11,10 @@ use light_system_program::{ NewAddressParams, }; use solana_sdk::{instruction::Instruction, pubkey::Pubkey, signature::Keypair, signer::Signer}; - -use crate::{indexer::TestIndexer, Indexer}; +use light_client::indexer::Indexer; +use light_client::rpc::merkle_tree::MerkleTreeExt; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; +use crate::conversions::sdk_to_program_compressed_proof; #[derive(Debug, Clone)] pub struct CreateCompressedPdaInstructionInputs<'a> { @@ -68,7 +70,7 @@ pub fn create_pda_instruction(input_params: CreateCompressedPdaInstructionInputs } } -pub async fn perform_create_pda_with_event_rnd( +pub async fn perform_create_pda_with_event_rnd( test_indexer: &mut TestIndexer, rpc: &mut R, env: &EnvAccounts, @@ -78,8 +80,8 @@ pub async fn perform_create_pda_with_event_rnd( let data = rand::random(); perform_create_pda_with_event(test_indexer, rpc, env, payer, seed, &data).await } -pub async fn perform_create_pda_with_event( - test_indexer: &mut TestIndexer, +pub async fn perform_create_pda_with_event + TestIndexerExtensions>( + test_indexer: &mut I, rpc: &mut R, env: &EnvAccounts, payer: &Keypair, @@ -126,7 +128,7 @@ pub async fn perform_create_pda_with_event( data: *data, signer: &payer.pubkey(), output_compressed_account_merkle_tree_pubkey: &env.merkle_tree_pubkey, - proof: &rpc_result.proof, + proof: &sdk_to_program_compressed_proof(rpc_result.proof), new_address_params, registered_program_pda: &env.registered_program_pda, diff --git a/program-tests/utils/src/system_program.rs b/program-tests/utils/src/system_program.rs index 2ee1c636d..3e4c6e4c9 100644 --- a/program-tests/utils/src/system_program.rs +++ b/program-tests/utils/src/system_program.rs @@ -23,6 +23,7 @@ use light_program_test::indexer::TestIndexerExtensions; use crate::assert_compressed_tx::{ assert_compressed_transaction, get_merkle_tree_snapshots, AssertCompressedTransactionInputs, }; +use crate::conversions::{program_to_sdk_public_transaction_event, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof}; #[allow(clippy::too_many_arguments)] pub async fn create_addresses_test + TestIndexerExtensions>( @@ -104,7 +105,7 @@ pub async fn create_addresses_test + TestIndexer } #[allow(clippy::too_many_arguments)] -pub async fn compress_sol_test>( +pub async fn compress_sol_test + TestIndexerExtensions>( rpc: &mut R, test_indexer: &mut I, authority: &Keypair, @@ -163,7 +164,7 @@ pub async fn compress_sol_test>( } #[allow(clippy::too_many_arguments)] -pub async fn decompress_sol_test>( +pub async fn decompress_sol_test + TestIndexerExtensions>( rpc: &mut R, test_indexer: &mut I, authority: &Keypair, @@ -207,7 +208,7 @@ pub async fn decompress_sol_test>( } #[allow(clippy::too_many_arguments)] -pub async fn transfer_compressed_sol_test>( +pub async fn transfer_compressed_sol_test + TestIndexerExtensions>( rpc: &mut R, test_indexer: &mut I, authority: &Keypair, @@ -351,7 +352,7 @@ pub async fn compressed_transaction_test + TestI .await; root_indices = proof_rpc_res.root_indices; - proof = proof_rpc_res.proof; + proof = Some(sdk_to_program_compressed_proof(proof_rpc_res.proof.unwrap_or_default())); let input_merkle_tree_accounts = inputs .test_indexer .get_state_merkle_tree_accounts(state_input_merkle_trees.unwrap_or(vec![]).as_slice()); @@ -432,7 +433,13 @@ pub async fn compressed_transaction_test + TestI let slot = inputs.rpc.get_transaction_slot(&event.1).await.unwrap(); let (created_output_compressed_accounts, _) = inputs .test_indexer - .add_event_and_compressed_accounts(slot, &event.0); + .add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.0.clone())); + + let created_output_compressed_accounts = created_output_compressed_accounts + .into_iter() + .map(sdk_to_program_compressed_account_with_merkle_context) + .collect::>(); + let input = AssertCompressedTransactionInputs { rpc: inputs.rpc, test_indexer: inputs.test_indexer, diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index dcb4d84d3..d191f0899 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -1876,12 +1876,12 @@ where Ok(proofs) } - // pub(crate) fn get_address_merkle_tree( - // &self, - // merkle_tree_pubkey: Pubkey, - // ) -> Option<&AddressMerkleTreeBundle> { - // self.address_merkle_trees - // .iter() - // .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - // } + pub fn get_address_merkle_tree( + &self, + merkle_tree_pubkey: Pubkey, + ) -> Option<&AddressMerkleTreeBundle> { + self.address_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + } } From a42d4e39796073564f37ba3cb4d35d7ee1bb324d Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 10:36:41 +0000 Subject: [PATCH 10/27] Add `TestIndexerExtensions` methods and update type constraints Introduces `get_address_merkle_tree` and `add_compressed_accounts_with_token_data` methods to `TestIndexerExtensions`. Removes redundant method definitions from `TestIndexer`. Adjusts type constraints in utility functions to depend on the updated `TestIndexerExtensions` trait. --- .../src/create_address_test_program_sdk.rs | 9 ++--- .../program-test/src/indexer/extensions.rs | 7 ++++ .../program-test/src/indexer/test_indexer.rs | 39 ++++++++++--------- 3 files changed, 32 insertions(+), 23 deletions(-) diff --git a/program-tests/utils/src/create_address_test_program_sdk.rs b/program-tests/utils/src/create_address_test_program_sdk.rs index b89483869..3a1ecb771 100644 --- a/program-tests/utils/src/create_address_test_program_sdk.rs +++ b/program-tests/utils/src/create_address_test_program_sdk.rs @@ -12,8 +12,7 @@ use light_system_program::{ }; use solana_sdk::{instruction::Instruction, pubkey::Pubkey, signature::Keypair, signer::Signer}; use light_client::indexer::Indexer; -use light_client::rpc::merkle_tree::MerkleTreeExt; -use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; +use light_program_test::indexer::{TestIndexerExtensions}; use crate::conversions::sdk_to_program_compressed_proof; #[derive(Debug, Clone)] @@ -70,8 +69,8 @@ pub fn create_pda_instruction(input_params: CreateCompressedPdaInstructionInputs } } -pub async fn perform_create_pda_with_event_rnd( - test_indexer: &mut TestIndexer, +pub async fn perform_create_pda_with_event_rnd + TestIndexerExtensions>( + test_indexer: &mut I, rpc: &mut R, env: &EnvAccounts, payer: &Keypair, @@ -80,7 +79,7 @@ pub async fn perform_create_pda_with_event_rnd let data = rand::random(); perform_create_pda_with_event(test_indexer, rpc, env, payer, seed, &data).await } -pub async fn perform_create_pda_with_event + TestIndexerExtensions>( +pub async fn perform_create_pda_with_event + TestIndexerExtensions>( test_indexer: &mut I, rpc: &mut R, env: &EnvAccounts, diff --git a/sdk-libs/program-test/src/indexer/extensions.rs b/sdk-libs/program-test/src/indexer/extensions.rs index bbe947ae3..a3be164fd 100644 --- a/sdk-libs/program-test/src/indexer/extensions.rs +++ b/sdk-libs/program-test/src/indexer/extensions.rs @@ -10,6 +10,13 @@ use solana_sdk::signature::Keypair; #[async_trait] pub trait TestIndexerExtensions: Indexer { + fn get_address_merkle_tree( + &self, + merkle_tree_pubkey: Pubkey, + ) -> Option<&AddressMerkleTreeBundle>; + + fn add_compressed_accounts_with_token_data(&mut self, slot: u64, event: &PublicTransactionEvent); + fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str); fn address_tree_updated( diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index d191f0899..7d65fcebe 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -581,6 +581,27 @@ impl TestIndexerExtensions for TestIndexer where R: RpcConnection + MerkleTreeExt, { + + fn get_address_merkle_tree( + &self, + merkle_tree_pubkey: Pubkey, + ) -> Option<&AddressMerkleTreeBundle> { + self.address_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + } + + /// deserializes an event + /// adds the output_compressed_accounts to the compressed_accounts + /// removes the input_compressed_accounts from the compressed_accounts + /// adds the input_compressed_accounts to the nullified_compressed_accounts + /// deserialiazes token data from the output_compressed_accounts + /// adds the token_compressed_accounts to the token_compressed_accounts + fn add_compressed_accounts_with_token_data(&mut self, slot: u64, event: &PublicTransactionEvent) { + self.add_event_and_compressed_accounts(slot, event); + } + + fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) { let decoded_hash: [u8; 32] = bs58::decode(account_hash) .into_vec() @@ -1529,15 +1550,6 @@ where self.add_event_and_compressed_accounts(slot, &event); } - /// deserializes an event - /// adds the output_compressed_accounts to the compressed_accounts - /// removes the input_compressed_accounts from the compressed_accounts - /// adds the input_compressed_accounts to the nullified_compressed_accounts - /// deserialiazes token data from the output_compressed_accounts - /// adds the token_compressed_accounts to the token_compressed_accounts - pub fn add_compressed_accounts_with_token_data(&mut self, slot: u64, event: &PublicTransactionEvent) { - self.add_event_and_compressed_accounts(slot, event); - } /// returns the compressed sol balance of the owner pubkey pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 { @@ -1875,13 +1887,4 @@ where } Ok(proofs) } - - pub fn get_address_merkle_tree( - &self, - merkle_tree_pubkey: Pubkey, - ) -> Option<&AddressMerkleTreeBundle> { - self.address_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) - } } From 86ac4d2dca5689de27f3c449a6c145bea0b0b2c0 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 10:43:40 +0000 Subject: [PATCH 11/27] program-tests/* refactor to include TestIndexerExtensions --- program-tests/utils/src/assert_compressed_tx.rs | 9 +++++---- program-tests/utils/src/assert_token_tx.rs | 7 ++++--- program-tests/utils/src/e2e_test_env.rs | 11 +++++++++-- program-tests/utils/src/spl.rs | 2 +- 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/program-tests/utils/src/assert_compressed_tx.rs b/program-tests/utils/src/assert_compressed_tx.rs index fd18b06aa..1ad68f6d9 100644 --- a/program-tests/utils/src/assert_compressed_tx.rs +++ b/program-tests/utils/src/assert_compressed_tx.rs @@ -2,7 +2,6 @@ use account_compression::{state::QueueAccount, StateMerkleTreeAccount}; use anchor_lang::Discriminator; use forester_utils::{ get_concurrent_merkle_tree, get_hash_set, - indexer::{Indexer, StateMerkleTreeAccounts}, AccountZeroCopy, }; use light_batched_merkle_tree::{ @@ -19,8 +18,10 @@ use light_system_program::sdk::{ use num_bigint::BigUint; use num_traits::FromBytes; use solana_sdk::{account::ReadableAccount, pubkey::Pubkey}; +use light_client::indexer::{Indexer, StateMerkleTreeAccounts}; +use light_program_test::indexer::TestIndexerExtensions; -pub struct AssertCompressedTransactionInputs<'a, R: RpcConnection, I: Indexer> { +pub struct AssertCompressedTransactionInputs<'a, R: RpcConnection, I: Indexer + TestIndexerExtensions> { pub rpc: &'a mut R, pub test_indexer: &'a mut I, pub output_compressed_accounts: &'a [CompressedAccount], @@ -48,7 +49,7 @@ pub struct AssertCompressedTransactionInputs<'a, R: RpcConnection, I: Indexer /// 5. Merkle tree was updated correctly /// 6. TODO: Fees have been paid (after fee refactor) /// 7. Check compression amount was transferred -pub async fn assert_compressed_transaction>( +pub async fn assert_compressed_transaction + TestIndexerExtensions>( input: AssertCompressedTransactionInputs<'_, R, I>, ) { // CHECK 1 @@ -316,7 +317,7 @@ pub struct MerkleTreeTestSnapShot { /// Asserts: /// 1. The root has been updated /// 2. The next index has been updated -pub async fn assert_merkle_tree_after_tx>( +pub async fn assert_merkle_tree_after_tx + TestIndexerExtensions>( rpc: &mut R, snapshots: &[MerkleTreeTestSnapShot], test_indexer: &mut I, diff --git a/program-tests/utils/src/assert_token_tx.rs b/program-tests/utils/src/assert_token_tx.rs index 0da145faf..77cfb735a 100644 --- a/program-tests/utils/src/assert_token_tx.rs +++ b/program-tests/utils/src/assert_token_tx.rs @@ -6,6 +6,7 @@ use light_system_program::sdk::{ }; use solana_sdk::{program_pack::Pack, pubkey::Pubkey}; use light_client::indexer::Indexer; +use light_program_test::indexer::TestIndexerExtensions; use light_sdk::token::TokenDataWithMerkleContext; use crate::assert_compressed_tx::{ assert_merkle_tree_after_tx, assert_nullifiers_exist_in_hash_sets, @@ -21,7 +22,7 @@ use crate::assert_compressed_tx::{ /// 6. Check compression amount was transferred (outside of this function) /// No addresses in token transactions #[allow(clippy::too_many_arguments)] -pub async fn assert_transfer>( +pub async fn assert_transfer + TestIndexerExtensions>( context: &mut R, test_indexer: &mut I, out_compressed_accounts: &[TokenTransferOutputData], @@ -78,7 +79,7 @@ pub async fn assert_transfer>( ); } -pub fn assert_compressed_token_accounts>( +pub fn assert_compressed_token_accounts + TestIndexerExtensions>( test_indexer: &mut I, out_compressed_accounts: &[TokenTransferOutputData], lamports: Option>>, @@ -188,7 +189,7 @@ pub fn assert_compressed_token_accounts>( } #[allow(clippy::too_many_arguments)] -pub async fn assert_mint_to<'a, R: RpcConnection, I: Indexer>( +pub async fn assert_mint_to<'a, R: RpcConnection, I: Indexer + TestIndexerExtensions>( rpc: &mut R, test_indexer: &'a mut I, recipients: &[Pubkey], diff --git a/program-tests/utils/src/e2e_test_env.rs b/program-tests/utils/src/e2e_test_env.rs index 0dab68a64..a745d0951 100644 --- a/program-tests/utils/src/e2e_test_env.rs +++ b/program-tests/utils/src/e2e_test_env.rs @@ -145,6 +145,7 @@ use crate::{ }, test_forester::{empty_address_queue_test, nullify_compressed_accounts}, }; +use crate::conversions::sdk_to_program_compressed_account_with_merkle_context; pub struct User { pub keypair: Keypair, @@ -2199,7 +2200,10 @@ where ) -> Vec { let input_compressed_accounts = self .indexer - .get_compressed_accounts_by_owner(&self.users[user_index].keypair.pubkey()); + .get_compressed_accounts_with_merkle_context_by_owner(&self.users[user_index].keypair.pubkey()) + .into_iter() + .map(sdk_to_program_compressed_account_with_merkle_context) + .collect::>(); if input_compressed_accounts.is_empty() { return vec![]; } @@ -2236,7 +2240,10 @@ where &self, pubkey: &Pubkey, ) -> Vec { - self.indexer.get_compressed_accounts_by_owner(pubkey) + self.indexer.get_compressed_accounts_with_merkle_context_by_owner(pubkey) + .into_iter() + .map(sdk_to_program_compressed_account_with_merkle_context) + .collect() } pub fn get_merkle_tree_pubkeys(&mut self, num: u64) -> Vec { diff --git a/program-tests/utils/src/spl.rs b/program-tests/utils/src/spl.rs index 0237f8979..6fbd87198 100644 --- a/program-tests/utils/src/spl.rs +++ b/program-tests/utils/src/spl.rs @@ -1566,7 +1566,7 @@ pub async fn freeze_or_thaw_test>() .as_slice(), change_lamports, From 742147e5aabccd3db7133dc367fa0ef550db8b33 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 13:44:43 +0000 Subject: [PATCH 12/27] program-tests/* refactoring wip --- Cargo.lock | 14 +- js/compressed-token/src/idl.ts | 306 ++---------------- .../account-compression-test/Cargo.toml | 1 + .../tests/address_merkle_tree_tests.rs | 3 +- .../compressed-token-test/Cargo.toml | 9 +- .../compressed-token-test/tests/test.rs | 198 ++++++++---- program-tests/e2e-test/tests/test.rs | 2 +- program-tests/registry-test/Cargo.toml | 4 +- program-tests/registry-test/tests/tests.rs | 14 +- .../programs/sdk-test/tests/test.rs | 15 +- program-tests/utils/src/lib.rs | 2 +- sdk-libs/sdk/src/lib.rs | 2 +- 12 files changed, 181 insertions(+), 389 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 483f6da9e..dbc067d7b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -53,6 +53,7 @@ dependencies = [ "light-batched-merkle-tree", "light-bloom-filter", "light-bounded-vec", + "light-client", "light-compressed-token", "light-concurrent-merkle-tree", "light-hash-set", @@ -1243,21 +1244,16 @@ dependencies = [ "account-compression", "anchor-lang", "anchor-spl", + "light-client", "light-compressed-token", - "light-concurrent-merkle-tree", - "light-hasher", "light-program-test", "light-prover-client", + "light-sdk", "light-system-program", "light-test-utils", - "light-utils 1.1.0", "light-verifier", - "num-bigint 0.4.6", - "num-traits", "rand 0.8.5", - "reqwest 0.11.27", "serial_test", - "solana-program-test", "solana-sdk", "spl-token", "tokio", @@ -4512,6 +4508,7 @@ dependencies = [ "anchor-spl", "forester-utils", "light-batched-merkle-tree", + "light-client", "light-compressed-token", "light-concurrent-merkle-tree", "light-hasher", @@ -4519,17 +4516,16 @@ dependencies = [ "light-program-test", "light-prover-client", "light-registry", + "light-sdk", "light-system-program", "light-test-utils", "light-utils 1.1.0", "light-verifier", "num-bigint 0.4.6", "num-traits", - "reqwest 0.11.27", "serde_json", "serial_test", "solana-cli-output", - "solana-program-test", "solana-sdk", "spl-token", "tokio", diff --git a/js/compressed-token/src/idl.ts b/js/compressed-token/src/idl.ts index 3dc084e1b..909301341 100644 --- a/js/compressed-token/src/idl.ts +++ b/js/compressed-token/src/idl.ts @@ -91,7 +91,7 @@ export type LightCompressedToken = { ]; args: [ { - name: 'tokenPoolBump'; + name: 'tokenPoolIndex'; type: 'u8'; }, ]; @@ -1581,158 +1581,23 @@ export type LightCompressedToken = { errors: [ { code: 6000; - name: 'PublicKeyAmountMissmatch'; - msg: 'public keys and amounts must be of same length'; + name: 'SignerCheckFailed'; + msg: 'Signer check failed'; }, { code: 6001; - name: 'ComputeInputSumFailed'; - msg: 'ComputeInputSumFailed'; + name: 'CreateTransferInstructionFailed'; + msg: 'Create transfer instruction failed'; }, { code: 6002; - name: 'ComputeOutputSumFailed'; - msg: 'ComputeOutputSumFailed'; + name: 'AccountNotFound'; + msg: 'Account not found'; }, { code: 6003; - name: 'ComputeCompressSumFailed'; - msg: 'ComputeCompressSumFailed'; - }, - { - code: 6004; - name: 'ComputeDecompressSumFailed'; - msg: 'ComputeDecompressSumFailed'; - }, - { - code: 6005; - name: 'SumCheckFailed'; - msg: 'SumCheckFailed'; - }, - { - code: 6006; - name: 'DecompressRecipientUndefinedForDecompress'; - msg: 'DecompressRecipientUndefinedForDecompress'; - }, - { - code: 6007; - name: 'CompressedPdaUndefinedForDecompress'; - msg: 'CompressedPdaUndefinedForDecompress'; - }, - { - code: 6008; - name: 'DeCompressAmountUndefinedForDecompress'; - msg: 'DeCompressAmountUndefinedForDecompress'; - }, - { - code: 6009; - name: 'CompressedPdaUndefinedForCompress'; - msg: 'CompressedPdaUndefinedForCompress'; - }, - { - code: 6010; - name: 'DeCompressAmountUndefinedForCompress'; - msg: 'DeCompressAmountUndefinedForCompress'; - }, - { - code: 6011; - name: 'DelegateSignerCheckFailed'; - msg: 'DelegateSignerCheckFailed'; - }, - { - code: 6012; - name: 'MintTooLarge'; - msg: 'Minted amount greater than u64::MAX'; - }, - { - code: 6013; - name: 'SplTokenSupplyMismatch'; - msg: 'SplTokenSupplyMismatch'; - }, - { - code: 6014; - name: 'HeapMemoryCheckFailed'; - msg: 'HeapMemoryCheckFailed'; - }, - { - code: 6015; - name: 'InstructionNotCallable'; - msg: 'The instruction is not callable'; - }, - { - code: 6016; - name: 'ArithmeticUnderflow'; - msg: 'ArithmeticUnderflow'; - }, - { - code: 6017; - name: 'HashToFieldError'; - msg: 'HashToFieldError'; - }, - { - code: 6018; - name: 'InvalidAuthorityMint'; - msg: 'Expected the authority to be also a mint authority'; - }, - { - code: 6019; - name: 'InvalidFreezeAuthority'; - msg: 'Provided authority is not the freeze authority'; - }, - { - code: 6020; - name: 'InvalidDelegateIndex'; - }, - { - code: 6021; - name: 'TokenPoolPdaUndefined'; - }, - { - code: 6022; - name: 'IsTokenPoolPda'; - msg: 'Compress or decompress recipient is the same account as the token pool pda.'; - }, - { - code: 6023; - name: 'InvalidTokenPoolPda'; - }, - { - code: 6024; - name: 'NoInputTokenAccountsProvided'; - }, - { - code: 6025; - name: 'NoInputsProvided'; - }, - { - code: 6026; - name: 'MintHasNoFreezeAuthority'; - }, - { - code: 6027; - name: 'MintWithInvalidExtension'; - }, - { - code: 6028; - name: 'InsufficientTokenAccountBalance'; - msg: 'The token account balance is less than the remaining amount.'; - }, - { - code: 6029; - name: 'InvalidTokenPoolBump'; - msg: 'Max number of token pools reached.'; - }, - { - code: 6030; - name: 'FailedToDecompress'; - }, - { - code: 6031; - name: 'FailedToBurnSplTokensFromTokenPool'; - }, - { - code: 6032; - name: 'NoMatchingBumpFound'; + name: 'SerializationError'; + msg: 'Serialization error'; }, ]; }; @@ -1829,7 +1694,7 @@ export const IDL: LightCompressedToken = { ], args: [ { - name: 'tokenPoolBump', + name: 'tokenPoolIndex', type: 'u8', }, ], @@ -3324,158 +3189,23 @@ export const IDL: LightCompressedToken = { errors: [ { code: 6000, - name: 'PublicKeyAmountMissmatch', - msg: 'public keys and amounts must be of same length', + name: 'SignerCheckFailed', + msg: 'Signer check failed', }, { code: 6001, - name: 'ComputeInputSumFailed', - msg: 'ComputeInputSumFailed', + name: 'CreateTransferInstructionFailed', + msg: 'Create transfer instruction failed', }, { code: 6002, - name: 'ComputeOutputSumFailed', - msg: 'ComputeOutputSumFailed', + name: 'AccountNotFound', + msg: 'Account not found', }, { code: 6003, - name: 'ComputeCompressSumFailed', - msg: 'ComputeCompressSumFailed', - }, - { - code: 6004, - name: 'ComputeDecompressSumFailed', - msg: 'ComputeDecompressSumFailed', - }, - { - code: 6005, - name: 'SumCheckFailed', - msg: 'SumCheckFailed', - }, - { - code: 6006, - name: 'DecompressRecipientUndefinedForDecompress', - msg: 'DecompressRecipientUndefinedForDecompress', - }, - { - code: 6007, - name: 'CompressedPdaUndefinedForDecompress', - msg: 'CompressedPdaUndefinedForDecompress', - }, - { - code: 6008, - name: 'DeCompressAmountUndefinedForDecompress', - msg: 'DeCompressAmountUndefinedForDecompress', - }, - { - code: 6009, - name: 'CompressedPdaUndefinedForCompress', - msg: 'CompressedPdaUndefinedForCompress', - }, - { - code: 6010, - name: 'DeCompressAmountUndefinedForCompress', - msg: 'DeCompressAmountUndefinedForCompress', - }, - { - code: 6011, - name: 'DelegateSignerCheckFailed', - msg: 'DelegateSignerCheckFailed', - }, - { - code: 6012, - name: 'MintTooLarge', - msg: 'Minted amount greater than u64::MAX', - }, - { - code: 6013, - name: 'SplTokenSupplyMismatch', - msg: 'SplTokenSupplyMismatch', - }, - { - code: 6014, - name: 'HeapMemoryCheckFailed', - msg: 'HeapMemoryCheckFailed', - }, - { - code: 6015, - name: 'InstructionNotCallable', - msg: 'The instruction is not callable', - }, - { - code: 6016, - name: 'ArithmeticUnderflow', - msg: 'ArithmeticUnderflow', - }, - { - code: 6017, - name: 'HashToFieldError', - msg: 'HashToFieldError', - }, - { - code: 6018, - name: 'InvalidAuthorityMint', - msg: 'Expected the authority to be also a mint authority', - }, - { - code: 6019, - name: 'InvalidFreezeAuthority', - msg: 'Provided authority is not the freeze authority', - }, - { - code: 6020, - name: 'InvalidDelegateIndex', - }, - { - code: 6021, - name: 'TokenPoolPdaUndefined', - }, - { - code: 6022, - name: 'IsTokenPoolPda', - msg: 'Compress or decompress recipient is the same account as the token pool pda.', - }, - { - code: 6023, - name: 'InvalidTokenPoolPda', - }, - { - code: 6024, - name: 'NoInputTokenAccountsProvided', - }, - { - code: 6025, - name: 'NoInputsProvided', - }, - { - code: 6026, - name: 'MintHasNoFreezeAuthority', - }, - { - code: 6027, - name: 'MintWithInvalidExtension', - }, - { - code: 6028, - name: 'InsufficientTokenAccountBalance', - msg: 'The token account balance is less than the remaining amount.', - }, - { - code: 6029, - name: 'InvalidTokenPoolBump', - msg: 'Max number of token pools reached.', - }, - { - code: 6030, - name: 'FailedToDecompress', - }, - { - code: 6031, - name: 'FailedToBurnSplTokensFromTokenPool', - }, - { - code: 6032, - name: 'NoMatchingBumpFound', + name: 'SerializationError', + msg: 'Serialization error', }, ], }; diff --git a/program-tests/account-compression-test/Cargo.toml b/program-tests/account-compression-test/Cargo.toml index f902ce92b..a1e948d08 100644 --- a/program-tests/account-compression-test/Cargo.toml +++ b/program-tests/account-compression-test/Cargo.toml @@ -25,6 +25,7 @@ ark-ff = "0.4.0" solana-program-test = { workspace = true} light-test-utils = { workspace = true, features=["devenv"] } light-program-test = { workspace = true, features = ["devenv"] } +light-client = { workspace = true } reqwest = "0.11.26" tokio = { workspace = true } light-prover-client = {workspace = true } diff --git a/program-tests/account-compression-test/tests/address_merkle_tree_tests.rs b/program-tests/account-compression-test/tests/address_merkle_tree_tests.rs index 1d67a42ba..6aabbc415 100644 --- a/program-tests/account-compression-test/tests/address_merkle_tree_tests.rs +++ b/program-tests/account-compression-test/tests/address_merkle_tree_tests.rs @@ -28,7 +28,7 @@ use light_test_utils::{ create_address_merkle_tree_and_queue_account_with_assert, get_hash_set, get_indexed_merkle_tree, test_forester::{empty_address_queue_test, update_merkle_tree}, - AddressMerkleTreeAccounts, AddressMerkleTreeBundle, FeeConfig, RpcConnection, RpcError, + FeeConfig, RpcConnection, RpcError, }; use light_utils::{bigint::bigint_to_be_bytes_array, UtilsError}; use num_bigint::ToBigUint; @@ -39,6 +39,7 @@ use solana_sdk::{ signature::{Keypair, Signature, Signer}, transaction::Transaction, }; +use light_client::indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle}; /// Tests insertion of addresses to the queue, dequeuing and Merkle tree update. /// 1. create address Merkle tree and queue accounts diff --git a/program-tests/compressed-token-test/Cargo.toml b/program-tests/compressed-token-test/Cargo.toml index 99be0fe0b..1775c35a1 100644 --- a/program-tests/compressed-token-test/Cargo.toml +++ b/program-tests/compressed-token-test/Cargo.toml @@ -22,23 +22,18 @@ anchor-lang = { workspace = true } light-compressed-token = { workspace = true } light-system-program = { workspace = true } account-compression = { workspace = true } -light-hasher = {workspace = true} -light-concurrent-merkle-tree = {workspace = true} -light-utils = {workspace = true} +light-client = { workspace = true } +light-sdk = { workspace = true } light-verifier = {workspace = true} [target.'cfg(not(target_os = "solana"))'.dependencies] solana-sdk = { workspace = true } [dev-dependencies] -solana-program-test = { workspace = true } light-test-utils = { workspace = true, features=["devenv"] } light-program-test = { workspace = true, features = ["devenv"] } -reqwest = "0.11.26" tokio = { workspace = true } light-prover-client = {workspace = true } -num-bigint = "0.4.6" -num-traits = "0.2.19" spl-token = { workspace = true } anchor-spl = { workspace = true } rand = "0.8" diff --git a/program-tests/compressed-token-test/tests/test.rs b/program-tests/compressed-token-test/tests/test.rs index ef9820066..ef13572e5 100644 --- a/program-tests/compressed-token-test/tests/test.rs +++ b/program-tests/compressed-token-test/tests/test.rs @@ -9,6 +9,7 @@ use anchor_spl::{ token::{Mint, TokenAccount}, token_2022::{spl_token_2022, spl_token_2022::extension::ExtensionType}, }; +use light_test_utils::conversions::{sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof, sdk_to_program_merkle_context, sdk_to_program_token_data}; use light_compressed_token::{ constants::NUM_MAX_POOL_ACCOUNTS, delegation::sdk::{ @@ -22,7 +23,7 @@ use light_compressed_token::{ get_cpi_authority_pda, transfer_sdk::create_transfer_instruction, TokenTransferOutputData, }, spl_compression::check_spl_token_pool_derivation_with_index, - token_data::{AccountState, TokenData}, + token_data::TokenData, ErrorCode, }; use light_program_test::{ @@ -36,7 +37,6 @@ use light_system_program::{ use light_test_utils::{ airdrop_lamports, assert_custom_error_or_program_error, assert_rpc_error, create_account_instruction, - indexer::TestIndexer, spl::{ approve_test, burn_test, compress_test, compressed_transfer_22_test, compressed_transfer_test, create_additional_token_pools, create_burn_test_instruction, @@ -46,7 +46,7 @@ use light_test_utils::{ mint_tokens_helper_with_lamports, mint_wrapped_sol, perform_compress_spl_token_account, revoke_test, thaw_test, BurnInstructionMode, }, - Indexer, RpcConnection, RpcError, TokenDataWithContext, + RpcConnection, RpcError, }; use light_verifier::VerifierError; use rand::{seq::SliceRandom, thread_rng, Rng}; @@ -60,6 +60,9 @@ use solana_sdk::{ transaction::{Transaction, TransactionError}, }; use spl_token::{error::TokenError, instruction::initialize_mint}; +use light_client::indexer::Indexer; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; +use light_sdk::token::{AccountState, TokenDataWithMerkleContext}; #[serial] #[tokio::test] @@ -1550,9 +1553,9 @@ async fn test_decompression() { kill_prover(); } -pub async fn mint_tokens_to_all_token_pools( +pub async fn mint_tokens_to_all_token_pools + TestIndexerExtensions>( rpc: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, mint_authority: &Keypair, mint: &Pubkey, @@ -1925,7 +1928,7 @@ async fn test_delegation( .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); compressed_transfer_test( &delegate, &mut rpc, @@ -1951,7 +1954,7 @@ async fn test_delegation( .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); compressed_transfer_test( &delegate, &mut rpc, @@ -2059,7 +2062,7 @@ async fn test_delegation_mixed() { .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); let delegate_input_compressed_accounts = test_indexer.get_compressed_token_accounts_by_owner(&delegate.pubkey()); input_compressed_accounts @@ -2098,7 +2101,7 @@ async fn test_delegation_mixed() { .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); let delegate_input_compressed_accounts = test_indexer.get_compressed_token_accounts_by_owner(&delegate.pubkey()); input_compressed_accounts @@ -2139,7 +2142,7 @@ async fn test_delegation_mixed() { .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); let delegate_input_compressed_accounts = test_indexer.get_compressed_token_accounts_by_owner(&delegate.pubkey()); @@ -2281,15 +2284,17 @@ async fn test_approve_failing() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint, delegated_amount, @@ -2298,7 +2303,7 @@ async fn test_approve_failing() { change_compressed_account_merkle_tree: delegated_compressed_account_merkle_tree, delegate: delegate.pubkey(), root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_approve_instruction(inputs).unwrap(); let context_payer = rpc.get_payer().insecure_clone(); @@ -2328,15 +2333,17 @@ async fn test_approve_failing() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint, delegated_amount, @@ -2345,7 +2352,7 @@ async fn test_approve_failing() { change_compressed_account_merkle_tree: invalid_change_merkle_tree.pubkey(), delegate: delegate.pubkey(), root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_approve_instruction(inputs).unwrap(); let context_payer = rpc.get_payer().insecure_clone(); @@ -2379,15 +2386,17 @@ async fn test_approve_failing() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint, delegated_amount, @@ -2419,15 +2428,17 @@ async fn test_approve_failing() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint: invalid_mint.pubkey(), delegated_amount, @@ -2436,7 +2447,7 @@ async fn test_approve_failing() { change_compressed_account_merkle_tree: delegated_compressed_account_merkle_tree, delegate: delegate.pubkey(), root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_approve_instruction(inputs).unwrap(); let context_payer = rpc.get_payer().insecure_clone(); @@ -2462,15 +2473,17 @@ async fn test_approve_failing() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint, delegated_amount, @@ -2479,7 +2492,7 @@ async fn test_approve_failing() { change_compressed_account_merkle_tree: delegated_compressed_account_merkle_tree, delegate: delegate.pubkey(), root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_approve_instruction(inputs).unwrap(); let context_payer = rpc.get_payer().insecure_clone(); @@ -2562,12 +2575,12 @@ async fn test_revoke(num_inputs: usize, mint_amount: u64, delegated_amount: u64) .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); let input_compressed_accounts = input_compressed_accounts .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); let delegated_compressed_account_merkle_tree = input_compressed_accounts[0] .compressed_account .merkle_context @@ -2680,7 +2693,7 @@ async fn test_revoke_failing() { .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); let input_compressed_account_hashes = input_compressed_accounts .iter() @@ -2710,20 +2723,22 @@ async fn test_revoke_failing() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint, output_account_merkle_tree: merkle_tree_pubkey, root_indices: invalid_root_indices, - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_revoke_instruction(inputs).unwrap(); let context_payer = rpc.get_payer().insecure_clone(); @@ -2746,20 +2761,22 @@ async fn test_revoke_failing() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint, output_account_merkle_tree: invalid_merkle_tree.pubkey(), root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_revoke_instruction(inputs).unwrap(); let context_payer = rpc.get_payer().insecure_clone(); @@ -2791,20 +2808,22 @@ async fn test_revoke_failing() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), mint: invalid_mint.pubkey(), output_account_merkle_tree: merkle_tree_pubkey, root_indices: proof_rpc_result.root_indices, - proof: proof_rpc_result.proof, + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof), }; let instruction = create_revoke_instruction(inputs).unwrap(); let context_payer = rpc.get_payer().insecure_clone(); @@ -2924,7 +2943,7 @@ async fn test_burn() { .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); let burn_amount = 100; let change_account_merkle_tree = input_compressed_accounts[0] .compressed_account @@ -2952,7 +2971,7 @@ async fn test_burn() { .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); let burn_amount = input_compressed_accounts .iter() .map(|x| x.token_data.amount) @@ -3210,7 +3229,7 @@ async fn failing_tests_burn() { .iter() .filter(|x| x.token_data.delegate.is_some()) .cloned() - .collect::>(); + .collect::>(); let burn_amount = 1; let change_account_merkle_tree = input_compressed_accounts[0] .compressed_account @@ -3468,7 +3487,7 @@ async fn test_freeze_and_thaw(mint_amount: u64, delegated_amount: u64) { .iter() .filter(|x| x.token_data.state == AccountState::Frozen) .cloned() - .collect::>(); + .collect::>(); let output_merkle_tree = input_compressed_accounts[0] .compressed_account .merkle_context @@ -3532,7 +3551,7 @@ async fn test_freeze_and_thaw(mint_amount: u64, delegated_amount: u64) { .iter() .filter(|x| x.token_data.state == AccountState::Frozen) .cloned() - .collect::>(); + .collect::>(); let output_merkle_tree = input_compressed_accounts[0] .compressed_account .merkle_context @@ -3648,19 +3667,21 @@ async fn test_failing_freeze() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), outputs_merkle_tree, root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_instruction::(inputs).unwrap(); let result = rpc @@ -3682,19 +3703,21 @@ async fn test_failing_freeze() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), outputs_merkle_tree: invalid_merkle_tree.pubkey(), root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_instruction::(inputs).unwrap(); let result = rpc @@ -3730,15 +3753,17 @@ async fn test_failing_freeze() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), outputs_merkle_tree, root_indices: proof_rpc_result.root_indices.clone(), @@ -3770,7 +3795,7 @@ async fn test_failing_freeze() { .iter() .filter(|x| x.token_data.state == AccountState::Frozen) .cloned() - .collect::>()[0] + .collect::>()[0] .clone()]; let outputs_merkle_tree = input_compressed_accounts[0] .compressed_account @@ -3800,19 +3825,21 @@ async fn test_failing_freeze() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), outputs_merkle_tree, root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_instruction::(inputs).unwrap(); let result = rpc @@ -3903,7 +3930,7 @@ async fn test_failing_thaw() { .iter() .filter(|x| x.token_data.state == AccountState::Frozen) .cloned() - .collect::>(); + .collect::>(); let outputs_merkle_tree = input_compressed_accounts[0] .compressed_account .merkle_context @@ -3938,19 +3965,21 @@ async fn test_failing_thaw() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), outputs_merkle_tree, root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_instruction::(inputs).unwrap(); let result = rpc @@ -3972,19 +4001,21 @@ async fn test_failing_thaw() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), outputs_merkle_tree: invalid_merkle_tree.pubkey(), root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_instruction::(inputs).unwrap(); let result = rpc @@ -4020,15 +4051,17 @@ async fn test_failing_thaw() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), outputs_merkle_tree, root_indices: proof_rpc_result.root_indices.clone(), @@ -4052,7 +4085,7 @@ async fn test_failing_thaw() { .iter() .filter(|x| x.token_data.state == AccountState::Initialized) .cloned() - .collect::>(); + .collect::>(); let outputs_merkle_tree = input_compressed_accounts[0] .compressed_account .merkle_context @@ -4081,19 +4114,21 @@ async fn test_failing_thaw() { input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), outputs_merkle_tree, root_indices: proof_rpc_result.root_indices.clone(), - proof: proof_rpc_result.proof.clone(), + proof: sdk_to_program_compressed_proof(proof_rpc_result.proof.clone()), }; let instruction = create_instruction::(inputs).unwrap(); let result = rpc @@ -4651,11 +4686,11 @@ async fn test_failing_decompression() { } #[allow(clippy::too_many_arguments)] -pub async fn failing_compress_decompress( +pub async fn failing_compress_decompress + TestIndexerExtensions>( payer: &Keypair, rpc: &mut R, - test_indexer: &mut TestIndexer, - input_compressed_accounts: Vec, + test_indexer: &mut I, + input_compressed_accounts: Vec, amount: u64, output_merkle_tree_pubkey: &Pubkey, compression_amount: u64, @@ -4709,25 +4744,33 @@ pub async fn failing_compress_decompress( } else { (Vec::new(), None) }; + + let mut _proof = None; + if let Some(proof) = proof { + _proof = Some(sdk_to_program_compressed_proof(proof)); + } + let instruction = create_transfer_instruction( &rpc.get_payer().pubkey(), &payer.pubkey(), &input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) + .map(sdk_to_program_merkle_context) .collect::>(), &[change_out_compressed_account], &root_indices, - &proof, + &_proof, input_compressed_accounts .iter() .map(|x| x.token_data.clone()) + .map(sdk_to_program_token_data) .collect::>() .as_slice(), &input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) - .cloned() + .map(|x| sdk_to_program_compressed_account(x.clone())) .collect::>(), *mint, None, @@ -4852,6 +4895,15 @@ async fn test_invalid_inputs() { &mut rpc, ) .await; + let proof = Some(sdk_to_program_compressed_proof( + proof_rpc_result.proof.clone(), + )); + + let input_compressed_accounts = input_compressed_accounts + .iter() + .map(|x| sdk_to_program_compressed_account_with_merkle_context(x.clone())) + .collect::>(); + let change_out_compressed_account_0 = TokenTransferOutputData { amount: input_compressed_account_token_data.amount - 1000, owner: recipient_keypair.pubkey(), @@ -4868,6 +4920,7 @@ async fn test_invalid_inputs() { let mut transfer_recipient_out_compressed_account_0 = transfer_recipient_out_compressed_account_0; transfer_recipient_out_compressed_account_0.amount += 1; + // Test 1: invalid token data amount (+ 1) let res = perform_transfer_failing_test( &mut rpc, @@ -4876,7 +4929,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(proof_rpc_result.proof.clone()), + &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.clone())), &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -4901,7 +4954,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(proof_rpc_result.proof.clone()), + &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.clone())), &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -4926,7 +4979,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(proof_rpc_result.proof.clone()), + &proof, &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -4950,7 +5003,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(proof_rpc_result.proof.clone()), + &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.clone())), &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -4974,7 +5027,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(proof_rpc_result.proof.clone()), + &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.clone())), &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -4988,6 +5041,8 @@ async fn test_invalid_inputs() { let mut input_compressed_account_token_data_invalid_amount = test_indexer.token_compressed_accounts[0].token_data.clone(); input_compressed_account_token_data_invalid_amount.amount = 0; + let input_compressed_account_token_data_invalid_amount = + sdk_to_program_token_data(input_compressed_account_token_data_invalid_amount); let mut input_compressed_accounts = vec![test_indexer.token_compressed_accounts[0] .compressed_account .clone()]; @@ -5002,6 +5057,10 @@ async fn test_invalid_inputs() { .as_mut_slice(), ) .unwrap(); + let input_compressed_accounts = input_compressed_accounts + .iter() + .map(|x| sdk_to_program_compressed_account_with_merkle_context(x.clone())) + .collect::>(); let change_out_compressed_account_0 = TokenTransferOutputData { amount: input_compressed_account_token_data.amount - 1000, owner: recipient_keypair.pubkey(), @@ -5022,7 +5081,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(proof_rpc_result.proof.clone()), + &proof, &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -5039,6 +5098,13 @@ async fn test_invalid_inputs() { let mut input_compressed_accounts = vec![test_indexer.token_compressed_accounts[0] .compressed_account .clone()]; + let mut input_compressed_accounts = input_compressed_accounts + .iter() + .map(|x| sdk_to_program_compressed_account_with_merkle_context(x.clone())) + .collect::>(); + let input_compressed_account_token_data = + sdk_to_program_token_data(input_compressed_account_token_data); + let mut vec = Vec::new(); crate::TokenData::serialize(&input_compressed_account_token_data, &mut vec).unwrap(); input_compressed_accounts[0] @@ -5054,7 +5120,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(proof_rpc_result.proof.clone()), + &proof, &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -5073,7 +5139,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &invalid_payer, - &Some(proof_rpc_result.proof.clone()), + &proof, &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -5094,7 +5160,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &payer, - &Some(proof_rpc_result.proof.clone()), + &proof, &root_indices, &input_compressed_accounts, false, @@ -5112,7 +5178,7 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &payer, - &Some(proof_rpc_result.proof.clone()), + &proof, &proof_rpc_result.root_indices, &input_compressed_accounts, true, @@ -5130,7 +5196,7 @@ async fn test_invalid_inputs() { &nullifier_queue_pubkey, &nullifier_queue_pubkey, &payer, - &Some(proof_rpc_result.proof.clone()), + &proof, &proof_rpc_result.root_indices, &input_compressed_accounts, false, diff --git a/program-tests/e2e-test/tests/test.rs b/program-tests/e2e-test/tests/test.rs index 14bf08268..58544f427 100644 --- a/program-tests/e2e-test/tests/test.rs +++ b/program-tests/e2e-test/tests/test.rs @@ -8,11 +8,11 @@ use light_program_test::{ test_env::setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params, test_rpc::ProgramTestRpcConnection, }; +use light_program_test::indexer::TestIndexer; use light_prover_client::gnark::helpers::{ProofType, ProverConfig}; use light_registry::protocol_config::state::ProtocolConfig; use light_test_utils::{ e2e_test_env::{E2ETestEnv, GeneralActionConfig, KeypairActionConfig}, - indexer::TestIndexer, }; #[tokio::test] diff --git a/program-tests/registry-test/Cargo.toml b/program-tests/registry-test/Cargo.toml index f1419087f..7d127c879 100644 --- a/program-tests/registry-test/Cargo.toml +++ b/program-tests/registry-test/Cargo.toml @@ -21,12 +21,12 @@ default = ["custom-heap"] [dev-dependencies] -solana-program-test = { workspace = true } light-test-utils = { workspace = true, features=["devenv"] } light-program-test = { workspace = true, features = ["devenv"] } -reqwest = "0.11.26" tokio = { workspace = true } light-prover-client = {workspace = true, features = ["devenv"] } +light-sdk = { workspace = true } +light-client = { workspace = true } num-bigint = "0.4.6" num-traits = "0.2.19" spl-token = { workspace = true } diff --git a/program-tests/registry-test/tests/tests.rs b/program-tests/registry-test/tests/tests.rs index 6b167afc8..a66613f4b 100644 --- a/program-tests/registry-test/tests/tests.rs +++ b/program-tests/registry-test/tests/tests.rs @@ -1,7 +1,7 @@ #![cfg(feature = "test-sbf")] use std::collections::HashSet; - +use std::hash::Hash; use account_compression::{ errors::AccountCompressionErrorCode, AddressMerkleTreeConfig, AddressQueueConfig, MigrateLeafParams, NullifierQueueConfig, StateMerkleTreeAccount, StateMerkleTreeConfig, @@ -9,7 +9,6 @@ use account_compression::{ use anchor_lang::{AnchorSerialize, InstructionData, ToAccountMetas}; use forester_utils::{ airdrop_lamports, forester_epoch::get_epoch_phases, get_concurrent_merkle_tree, - indexer::Indexer, }; use light_batched_merkle_tree::{ initialize_address_tree::InitAddressTreeAccountsInstructionData, @@ -70,7 +69,6 @@ use light_test_utils::{ create_rollover_address_merkle_tree_instructions, create_rollover_state_merkle_tree_instructions, e2e_test_env::{init_program_test_env, init_program_test_env_forester}, - indexer::TestIndexer, register_test_forester, test_forester::{empty_address_queue_test, nullify_compressed_accounts}, update_test_forester, Epoch, RpcConnection, RpcError, SolanaRpcConnection, SolanaRpcUrl, @@ -86,6 +84,8 @@ use solana_sdk::{ signature::{read_keypair_file, Keypair, Signature}, signer::Signer, }; +use light_client::indexer::Indexer; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; #[test] fn test_protocol_config_active_phase_continuity() { @@ -1427,7 +1427,7 @@ async fn test_migrate_state() { 26, >(&mut rpc, env_accounts.merkle_tree_pubkey) .await; - let compressed_account = &test_indexer.get_compressed_accounts_by_owner(&payer.pubkey())[0]; + let compressed_account = &test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&payer.pubkey())[0]; let hash = compressed_account.hash().unwrap(); let bundle = &test_indexer .get_state_merkle_trees() @@ -1511,7 +1511,7 @@ async fn test_migrate_state() { 26, >(&mut rpc, env_accounts.merkle_tree_pubkey) .await; - let compressed_account = &test_indexer.get_compressed_accounts_by_owner(&payer.pubkey())[1]; + let compressed_account = &test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&payer.pubkey())[1]; let hash = compressed_account.hash().unwrap(); let bundle = &test_indexer .get_state_merkle_trees() @@ -1957,9 +1957,9 @@ async fn test_batch_address_tree() { .await; } -pub async fn perform_batch_address_merkle_tree_update( +pub async fn perform_batch_address_merkle_tree_update + TestIndexerExtensions>( rpc: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, forester: &Keypair, derivation_pubkey: &Pubkey, merkle_tree_pubkey: &Pubkey, diff --git a/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs b/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs index 92d4110a8..92af5d81c 100644 --- a/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs +++ b/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs @@ -7,7 +7,6 @@ use light_client::{ }; use light_program_test::{ test_env::{setup_test_programs_with_accounts_v2, EnvAccounts}, - test_indexer::TestIndexer, test_rpc::ProgramTestRpcConnection, }; use light_sdk::{ @@ -27,6 +26,7 @@ use solana_sdk::{ pubkey::Pubkey, signature::{Keypair, Signer}, }; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; #[tokio::test] async fn test_sdk_test() { @@ -139,10 +139,10 @@ async fn test_sdk_test() { assert_eq!(record.nested.one, 2); } -async fn with_nested_data( +async fn with_nested_data( name: String, rpc: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, env: &EnvAccounts, remaining_accounts: &mut RemainingAccounts, payer: &Keypair, @@ -153,6 +153,7 @@ async fn with_nested_data( ) -> Result<(), RpcError> where R: RpcConnection + MerkleTreeExt, + I: Indexer + TestIndexerExtensions { let rpc_result = test_indexer .create_proof_for_compressed_accounts( @@ -213,9 +214,9 @@ where Ok(()) } -async fn update_nested_data( +async fn update_nested_data( rpc: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, remaining_accounts: &mut RemainingAccounts, nested_data: NestedData, payer: &Keypair, @@ -226,6 +227,7 @@ async fn update_nested_data( ) -> Result<(), RpcError> where R: RpcConnection + MerkleTreeExt, + I: Indexer + TestIndexerExtensions { let hash = compressed_account.hash().unwrap(); let merkle_tree_pubkey = compressed_account.merkle_context.merkle_tree_pubkey; @@ -282,6 +284,7 @@ where let event = rpc .create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None) .await?; - test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); Ok(()) } diff --git a/program-tests/utils/src/lib.rs b/program-tests/utils/src/lib.rs index 46ac8e2fe..078b9ac54 100644 --- a/program-tests/utils/src/lib.rs +++ b/program-tests/utils/src/lib.rs @@ -29,7 +29,7 @@ pub mod state_tree_rollover; pub mod system_program; #[allow(unused)] pub mod test_forester; -mod conversions; +pub mod conversions; pub use create_address_test_program::ID as CREATE_ADDRESS_TEST_PROGRAM_ID; pub use forester_utils::{ diff --git a/sdk-libs/sdk/src/lib.rs b/sdk-libs/sdk/src/lib.rs index cf0fab912..5d064b210 100644 --- a/sdk-libs/sdk/src/lib.rs +++ b/sdk-libs/sdk/src/lib.rs @@ -21,4 +21,4 @@ pub mod token; pub mod traits; pub mod transfer; pub mod utils; -pub mod verify; +pub mod verify; \ No newline at end of file From 9a5c6904d1237db54ce5d0779b16d8ecfaa46856 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 14:22:32 +0000 Subject: [PATCH 13/27] program-tests/* refactoring wip --- Cargo.lock | 2 + .../programs/sdk-test/tests/test.rs | 20 ++- .../tests/test_program_owned_trees.rs | 3 +- program-tests/system-test/Cargo.toml | 2 + program-tests/system-test/tests/test.rs | 154 ++++++++++++------ program-tests/utils/src/conversions.rs | 8 +- .../program-test/src/indexer/test_indexer.rs | 4 +- 7 files changed, 128 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dbc067d7b..b14ce4f87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6998,6 +6998,7 @@ dependencies = [ "anchor-lang", "anchor-spl", "light-batched-merkle-tree", + "light-client", "light-compressed-token", "light-concurrent-merkle-tree", "light-hasher", @@ -7006,6 +7007,7 @@ dependencies = [ "light-program-test", "light-prover-client", "light-registry", + "light-sdk", "light-system-program", "light-test-utils", "light-utils 1.1.0", diff --git a/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs b/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs index 92af5d81c..49759c999 100644 --- a/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs +++ b/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs @@ -36,17 +36,18 @@ async fn test_sdk_test() { let payer = rpc.get_payer().insecure_clone(); let mut test_indexer: TestIndexer = TestIndexer::new( - &[StateMerkleTreeAccounts { + vec![StateMerkleTreeAccounts { merkle_tree: env.merkle_tree_pubkey, nullifier_queue: env.nullifier_queue_pubkey, cpi_context: env.cpi_context_account_pubkey, }], - &[AddressMerkleTreeAccounts { + vec![AddressMerkleTreeAccounts { merkle_tree: env.address_merkle_tree_pubkey, queue: env.address_merkle_tree_queue_pubkey, }], - true, - true, + payer.insecure_clone(), + env.group_pda, + None ) .await; @@ -86,7 +87,7 @@ async fn test_sdk_test() { .unwrap(); // Check that it was created correctly. - let compressed_accounts = test_indexer.get_compressed_accounts_by_owner(&sdk_test::ID); + let compressed_accounts = test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&sdk_test::ID); assert_eq!(compressed_accounts.len(), 1); let compressed_account = &compressed_accounts[0]; let record = &compressed_account @@ -126,7 +127,7 @@ async fn test_sdk_test() { .unwrap(); // Check that it was updated correctly. - let compressed_accounts = test_indexer.get_compressed_accounts_by_owner(&sdk_test::ID); + let compressed_accounts = test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&sdk_test::ID); assert_eq!(compressed_accounts.len(), 1); let compressed_account = &compressed_accounts[0]; let record = &compressed_account @@ -210,7 +211,8 @@ where let event = rpc .create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None) .await?; - test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); Ok(()) } @@ -234,8 +236,8 @@ where let rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&[hash]), - Some(&[merkle_tree_pubkey]), + Some(vec![hash]), + Some(vec![merkle_tree_pubkey]), None, None, rpc, diff --git a/program-tests/system-cpi-test/tests/test_program_owned_trees.rs b/program-tests/system-cpi-test/tests/test_program_owned_trees.rs index e270f45ab..d073c9f57 100644 --- a/program-tests/system-cpi-test/tests/test_program_owned_trees.rs +++ b/program-tests/system-cpi-test/tests/test_program_owned_trees.rs @@ -29,7 +29,7 @@ use light_registry::{ }; use light_test_utils::{ airdrop_lamports, assert_custom_error_or_program_error, assert_rpc_error, - create_account_instruction, get_concurrent_merkle_tree, indexer::TestIndexer, + create_account_instruction, get_concurrent_merkle_tree, spl::create_mint_helper, FeeConfig, RpcConnection, RpcError, TransactionParams, }; use serial_test::serial; @@ -40,6 +40,7 @@ use solana_sdk::{ signer::Signer, transaction::Transaction, }; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; use system_cpi_test::sdk::{ create_initialize_address_merkle_tree_and_queue_instruction, create_initialize_merkle_tree_instruction, diff --git a/program-tests/system-test/Cargo.toml b/program-tests/system-test/Cargo.toml index 9cbbe55c3..9644d0c3b 100644 --- a/program-tests/system-test/Cargo.toml +++ b/program-tests/system-test/Cargo.toml @@ -39,6 +39,8 @@ light-hasher = {workspace = true} light-concurrent-merkle-tree = {workspace = true} light-indexed-merkle-tree = {workspace = true} light-utils = {workspace = true} +light-client = { workspace = true } +light-sdk = { workspace = true } light-verifier = {workspace = true} light-registry = { workspace = true} solana-cli-output = { workspace = true } diff --git a/program-tests/system-test/tests/test.rs b/program-tests/system-test/tests/test.rs index b0c0c525c..c31465af7 100644 --- a/program-tests/system-test/tests/test.rs +++ b/program-tests/system-test/tests/test.rs @@ -1,4 +1,6 @@ #![cfg(feature = "test-sbf")] + +use std::ops::Index; use account_compression::errors::AccountCompressionErrorCode; use anchor_lang::{error::ErrorCode, AnchorSerialize, InstructionData, ToAccountMetas}; use light_batched_merkle_tree::{ @@ -25,7 +27,7 @@ use light_system_program::{ address::{derive_address, derive_address_legacy}, compressed_account::{ CompressedAccount, CompressedAccountData, CompressedAccountWithMerkleContext, - MerkleContext, QueueIndex, + MerkleContext, }, invoke::{ create_invoke_instruction, create_invoke_instruction_data_and_remaining_accounts, @@ -38,11 +40,10 @@ use light_test_utils::{ airdrop_lamports, assert_compressed_tx::assert_created_compressed_accounts, assert_custom_error_or_program_error, assert_rpc_error, - indexer::TestIndexer, system_program::{ compress_sol_test, create_addresses_test, decompress_sol_test, transfer_compressed_sol_test, }, - FeeConfig, Indexer, RpcConnection, RpcError, TransactionParams, + FeeConfig, RpcConnection, RpcError, TransactionParams, }; use light_utils::{hash_to_bn254_field_size_be, UtilsError}; use light_verifier::VerifierError; @@ -57,6 +58,11 @@ use solana_sdk::{ transaction::{Transaction, TransactionError}, }; use tokio::fs::write as async_write; +use light_client::indexer::Indexer; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; +use light_sdk::merkle_context::QueueIndex as SdkQueueIndex; +use light_system_program::sdk::compressed_account::QueueIndex; +use light_test_utils::conversions::{sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof, sdk_to_program_merkle_context}; // TODO: use lazy_static to spawn the server once /// invoke_failing_test @@ -174,9 +180,9 @@ async fn invoke_failing_test() { } #[allow(clippy::too_many_arguments)] -pub async fn failing_transaction_inputs( - context: &mut ProgramTestRpcConnection, - test_indexer: &mut TestIndexer, +pub async fn failing_transaction_inputs + TestIndexerExtensions>( + context: &mut R, + test_indexer: &mut I, payer: &Keypair, env: &EnvAccounts, num_inputs: usize, @@ -203,7 +209,7 @@ pub async fn failing_transaction_inputs( let (mut new_address_params, derived_addresses) = create_address_test_inputs(env, num_addresses); let input_compressed_accounts = - test_indexer.get_compressed_accounts_by_owner(&payer.pubkey())[0..num_inputs].to_vec(); + test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&payer.pubkey())[0..num_inputs].to_vec(); let hashes = input_compressed_accounts .iter() .map(|x| x.hash().unwrap()) @@ -239,7 +245,7 @@ pub async fn failing_transaction_inputs( for (i, root_index) in proof_rpc_res.address_root_indices.iter().enumerate() { new_address_params[i].address_merkle_tree_root_index = *root_index; } - (proof_rpc_res.root_indices, Some(proof_rpc_res.proof)) + (proof_rpc_res.root_indices, Some(sdk_to_program_compressed_proof(proof_rpc_res.proof))) } else { (Vec::new(), None) }; @@ -276,10 +282,12 @@ pub async fn failing_transaction_inputs( &input_compressed_accounts .iter() .map(|x| x.merkle_context) + .map(|x| sdk_to_program_merkle_context(x)) .collect::>(), &input_compressed_accounts .iter() .map(|x| x.compressed_account.clone()) + .map(|x| sdk_to_program_compressed_account(x)) .collect::>(), &root_indices, &output_merkle_tree_pubkeys, @@ -321,8 +329,8 @@ pub async fn failing_transaction_inputs( Ok(()) } -pub async fn failing_transaction_inputs_inner( - context: &mut ProgramTestRpcConnection, +pub async fn failing_transaction_inputs_inner( + context: &mut R, payer: &Keypair, env: &EnvAccounts, inputs_struct: &InstructionDataInvoke, @@ -581,8 +589,8 @@ fn create_address_test_inputs( (new_address_params, derived_addresses) } -pub async fn failing_transaction_address( - context: &mut ProgramTestRpcConnection, +pub async fn failing_transaction_address( + context: &mut R, payer: &Keypair, env: &EnvAccounts, inputs_struct: &InstructionDataInvoke, @@ -701,8 +709,8 @@ pub async fn failing_transaction_address( /// 2. data but signer is not a program /// 3. invalid output Merkle tree /// 4. address that doesn't exist -pub async fn failing_transaction_output( - context: &mut ProgramTestRpcConnection, +pub async fn failing_transaction_output( + context: &mut R, payer: &Keypair, env: &EnvAccounts, inputs_struct: InstructionDataInvoke, @@ -838,8 +846,8 @@ pub async fn perform_tx_with_output_compressed_accounts( assert_rpc_error(result, 0, expected_error_code) } -pub async fn create_instruction_and_failing_transaction( - context: &mut ProgramTestRpcConnection, +pub async fn create_instruction_and_failing_transaction( + context: &mut R, payer: &Keypair, inputs_struct: InstructionDataInvoke, remaining_accounts: Vec, @@ -943,6 +951,10 @@ async fn invoke_test() { let slot: u64 = context.get_slot().await.unwrap(); let (created_compressed_accounts, _) = test_indexer.add_event_and_compressed_accounts(slot, &event.0); + let created_compressed_accounts = created_compressed_accounts + .into_iter() + .map(sdk_to_program_compressed_account_with_merkle_context) + .collect::>(); assert_created_compressed_accounts( output_compressed_accounts.as_slice(), output_merkle_tree_pubkeys.as_slice(), @@ -1039,7 +1051,11 @@ async fn invoke_test() { &mut context, ) .await; - let input_compressed_accounts = vec![compressed_account_with_context.compressed_account]; + let proof = sdk_to_program_compressed_proof(proof_rpc_res.proof.clone()); + let input_compressed_accounts = vec![sdk_to_program_compressed_account( + compressed_account_with_context.compressed_account, + )]; + let instruction = create_invoke_instruction( &payer_pubkey, &payer_pubkey, @@ -1054,7 +1070,7 @@ async fn invoke_test() { &[merkle_tree_pubkey], &proof_rpc_res.root_indices, &Vec::new(), - Some(proof_rpc_res.proof.clone()), + Some(proof.clone()), None, false, None, @@ -1103,7 +1119,7 @@ async fn invoke_test() { &[merkle_tree_pubkey], &proof_rpc_res.root_indices, &Vec::new(), - Some(proof_rpc_res.proof.clone()), + Some(proof.clone()), None, false, None, @@ -1134,7 +1150,7 @@ async fn invoke_test() { &[merkle_tree_pubkey], &proof_rpc_res.root_indices, &Vec::new(), - Some(proof_rpc_res.proof.clone()), + Some(proof.clone()), None, false, None, @@ -1319,7 +1335,9 @@ async fn test_with_address() { // transfer with address println!("transfer with address-------------------------"); - let compressed_account_with_context = test_indexer.compressed_accounts[0].clone(); + let compressed_account_with_context = sdk_to_program_compressed_account_with_merkle_context( + test_indexer.compressed_accounts[0].clone(), + ); let recipient_pubkey = Keypair::new().pubkey(); transfer_compressed_sol_test( &mut context, @@ -1413,8 +1431,12 @@ async fn test_with_address() { ]; for (n_input_compressed_accounts, n_new_addresses) in test_inputs { let compressed_input_accounts = test_indexer - .get_compressed_accounts_by_owner(&payer_pubkey)[0..n_input_compressed_accounts] + .get_compressed_accounts_with_merkle_context_by_owner(&payer_pubkey)[0..n_input_compressed_accounts] .to_vec(); + let compressed_input_accounts = compressed_input_accounts + .into_iter() + .map(sdk_to_program_compressed_account_with_merkle_context) + .collect::>(); let mut address_vec = Vec::new(); // creates multiple seeds by taking the number of input accounts and zeroing out the jth byte for j in 0..n_new_addresses { @@ -1559,8 +1581,10 @@ async fn test_with_compression() { &mut context, ) .await; - let input_compressed_accounts = - vec![compressed_account_with_context.clone().compressed_account]; + let proof = sdk_to_program_compressed_proof(proof_rpc_res.proof.clone()); + let input_compressed_accounts = vec![sdk_to_program_compressed_account( + compressed_account_with_context.clone().compressed_account, + )]; let recipient_pubkey = Keypair::new().pubkey(); let output_compressed_accounts = vec![CompressedAccount { lamports: 0, @@ -1583,7 +1607,7 @@ async fn test_with_compression() { &[merkle_tree_pubkey], &proof_rpc_res.root_indices, &Vec::new(), - Some(proof_rpc_res.proof.clone()), + Some(proof.clone()), Some(compress_amount), true, Some(recipient), @@ -1603,7 +1627,11 @@ async fn test_with_compression() { .unwrap(); let compressed_account_with_context = - test_indexer.get_compressed_accounts_by_owner(&payer_pubkey)[0].clone(); + test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&payer_pubkey)[0].clone(); + let compressed_account_with_context = + sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context); + + decompress_sol_test( &mut context, &mut test_indexer, @@ -1892,7 +1920,11 @@ async fn batch_invoke_test() { assert!(proof_rpc_result.proof.is_none()); // No root index since value is in output queue assert!(proof_rpc_result.root_indices[0].is_none()); - let input_compressed_accounts = vec![compressed_account_with_context.compressed_account]; + + let input_compressed_accounts = vec![sdk_to_program_compressed_account( + compressed_account_with_context.compressed_account, + )]; + let instruction = create_invoke_instruction( &payer_pubkey, &payer_pubkey, @@ -1980,10 +2012,10 @@ async fn batch_invoke_test() { ) .unwrap(); } - // 6. Should fail: invalid leaf index + // 6. Should fail: invalid leaf index { let input_compressed_account = test_indexer - .get_compressed_accounts_by_owner(&payer_pubkey) + .get_compressed_accounts_with_merkle_context_by_owner(&payer_pubkey) .iter() .filter(|x| x.merkle_context.nullifier_queue_pubkey == output_queue_pubkey) .last() @@ -1998,7 +2030,7 @@ async fn batch_invoke_test() { let instruction = create_invoke_instruction( &payer_pubkey, &payer_pubkey, - &[input_compressed_account.compressed_account], + &[sdk_to_program_compressed_account(input_compressed_account.compressed_account)], &output_compressed_accounts, &[MerkleContext { merkle_tree_pubkey, @@ -2085,10 +2117,28 @@ async fn batch_invoke_test() { &mut context, ) .await; + + + let mut proof = None; + if let Some(proof_rpc) = proof_rpc_result.proof { + proof = Some(sdk_to_program_compressed_proof(proof_rpc)); + } + let input_compressed_accounts = vec![ compressed_account_with_context_1.compressed_account, compressed_account_with_context_2.compressed_account, - ]; + ] + .iter() + .map(|x| sdk_to_program_compressed_account(x.clone())) + .collect::>(); + + let merkle_context = vec![ + compressed_account_with_context_1.merkle_context, + compressed_account_with_context_2.merkle_context, + ] + .iter() + .map(|x| sdk_to_program_merkle_context(x.clone())) + .collect::>(); let output_compressed_accounts = vec![ CompressedAccount { lamports: 0, @@ -2108,16 +2158,16 @@ async fn batch_invoke_test() { let instruction = create_invoke_instruction( &payer_pubkey, &payer_pubkey, - &input_compressed_accounts, + input_compressed_accounts.as_slice(), &output_compressed_accounts, - &[merkle_context_1, merkle_context_2], + merkle_context.as_slice(), &[ merkle_context_1.nullifier_queue_pubkey, // output queue merkle_context_2.merkle_tree_pubkey, ], &proof_rpc_result.root_indices, &Vec::new(), - proof_rpc_result.proof, + proof, None, false, None, @@ -2163,7 +2213,7 @@ async fn batch_invoke_test() { &mut test_indexer, &payer, TestMode::ByZkpThenIndex, - compressed_account_with_context_1.clone(), + sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context_1.clone()), ) .await; assert_rpc_error( @@ -2194,7 +2244,7 @@ async fn batch_invoke_test() { &mut test_indexer, &payer, TestMode::ByIndexThenZkp, - compressed_account_with_context_1.clone(), + sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context_1.clone()), ) .await; assert_rpc_error( @@ -2224,7 +2274,7 @@ async fn batch_invoke_test() { &mut test_indexer, &payer, TestMode::ByIndexThenIndex, - compressed_account_with_context_1.clone(), + sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context_1.clone()), ) .await; assert_rpc_error( @@ -2254,7 +2304,7 @@ async fn batch_invoke_test() { &mut test_indexer, &payer, TestMode::ByZkpThenZkp, - compressed_account_with_context_1.clone(), + sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context_1.clone()), ) .await; assert_rpc_error( @@ -2311,18 +2361,23 @@ async fn batch_invoke_test() { &mut context, ) .await; - let mut merkle_context = compressed_account_with_context_1.merkle_context; + let mut merkle_context = sdk_to_program_merkle_context(compressed_account_with_context_1.merkle_context); merkle_context.queue_index = Some(QueueIndex::default()); + let mut proof = None; + if let Some(proof_rpc) = proof_rpc_result.proof { + proof = Some(sdk_to_program_compressed_proof(proof_rpc)); + } + let instruction = create_invoke_instruction( &payer_pubkey, &payer_pubkey, - &[compressed_account_with_context_1.compressed_account], + &[sdk_to_program_compressed_account(compressed_account_with_context_1.compressed_account)], &output_compressed_accounts, &[merkle_context], &[merkle_context.nullifier_queue_pubkey], &[None], &Vec::new(), - proof_rpc_result.proof, + proof, None, false, None, @@ -2355,13 +2410,13 @@ async fn batch_invoke_test() { .clone(); let mut merkle_context = compressed_account_with_context_1.merkle_context; - merkle_context.queue_index = Some(QueueIndex::default()); + merkle_context.queue_index = Some(SdkQueueIndex::default()); let instruction = create_invoke_instruction( &payer_pubkey, &payer_pubkey, &input_compressed_accounts, &output_compressed_accounts, - &[merkle_context], + &[sdk_to_program_merkle_context(merkle_context)], &[merkle_context.merkle_tree_pubkey], &[None], &Vec::new(), @@ -2388,9 +2443,9 @@ pub enum TestMode { ByZkpThenZkp, } -pub async fn double_spend_compressed_account( - context: &mut ProgramTestRpcConnection, - test_indexer: &mut TestIndexer, +pub async fn double_spend_compressed_account + TestIndexerExtensions>( + context: &mut R, + test_indexer: &mut I, payer: &Keypair, mode: TestMode, compressed_account_with_context_1: CompressedAccountWithMerkleContext, @@ -2408,6 +2463,7 @@ pub async fn double_spend_compressed_account( context, ) .await; + let mut proof = Some(sdk_to_program_compressed_proof(proof_rpc_result.proof)); let input_compressed_accounts = vec![compressed_account_with_context_1.compressed_account]; let output_compressed_accounts = vec![CompressedAccount { lamports: 0, @@ -2425,7 +2481,7 @@ pub async fn double_spend_compressed_account( &[merkle_context_1.nullifier_queue_pubkey], &proof_rpc_result.root_indices, &Vec::new(), - Some(proof_rpc_result.proof), + proof, None, false, None, @@ -2569,6 +2625,10 @@ pub async fn create_output_accounts( let slot: u64 = context.get_slot().await.unwrap(); let (created_compressed_accounts, _) = test_indexer.add_event_and_compressed_accounts(slot, &event); + let created_compressed_accounts = created_compressed_accounts + .into_iter() + .map(sdk_to_program_compressed_account_with_merkle_context) + .collect::>(); assert_created_compressed_accounts( output_compressed_accounts.as_slice(), output_merkle_tree_pubkeys.as_slice(), diff --git a/program-tests/utils/src/conversions.rs b/program-tests/utils/src/conversions.rs index eaa0777a3..c094e4e74 100644 --- a/program-tests/utils/src/conversions.rs +++ b/program-tests/utils/src/conversions.rs @@ -1,18 +1,14 @@ use light_sdk::{self as sdk, proof::CompressedProof}; use light_system_program::invoke::processor::CompressedProof as ProgramCompressedProof; use light_system_program::invoke::OutputCompressedAccountWithPackedContext as ProgramOutputCompressedAccountWithPackedContext; -use light_system_program::sdk::compressed_account::{ - CompressedAccount as ProgramCompressedAccount, - CompressedAccountData as ProgramCompressedAccountData, - CompressedAccountWithMerkleContext as ProgramCompressedAccountWithMerkleContext, - MerkleContext as ProgramMerkleContext, QueueIndex as ProgramQueueIndex, -}; +use light_system_program::sdk::compressed_account::{CompressedAccount as ProgramCompressedAccount, CompressedAccountData as ProgramCompressedAccountData, CompressedAccountWithMerkleContext as ProgramCompressedAccountWithMerkleContext, MerkleContext as ProgramMerkleContext, QueueIndex as ProgramQueueIndex, QueueIndex}; use light_compressed_token::{ token_data::AccountState as ProgramAccountState, TokenData as ProgramTokenData, }; use light_system_program::sdk::event::MerkleTreeSequenceNumber as ProgramMerkleTreeSequenceNumber; use light_system_program::sdk::event::PublicTransactionEvent as ProgramPublicTransactionEvent; + pub fn sdk_to_program_queue_index( sdk_queue_index: sdk::merkle_context::QueueIndex, ) -> ProgramQueueIndex { diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index 7d65fcebe..b80a14595 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -581,7 +581,7 @@ impl TestIndexerExtensions for TestIndexer where R: RpcConnection + MerkleTreeExt, { - + fn get_address_merkle_tree( &self, merkle_tree_pubkey: Pubkey, @@ -590,7 +590,7 @@ where .iter() .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) } - + /// deserializes an event /// adds the output_compressed_accounts to the compressed_accounts /// removes the input_compressed_accounts from the compressed_accounts From 1839e693423bf9cacf66c57f4aa13ad1a0751e3e Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 14:33:41 +0000 Subject: [PATCH 14/27] format + imports --- Cargo.lock | 30 --- .../src/address_merkle_tree_config.rs | 12 +- forester-utils/src/indexer/mod.rs | 88 ++++---- forester-utils/src/instructions.rs | 8 +- forester-utils/src/registry.rs | 7 +- forester/src/batch_processor/address.rs | 17 +- forester/src/batch_processor/common.rs | 8 +- forester/src/batch_processor/mod.rs | 3 +- forester/src/batch_processor/state.rs | 37 +-- forester/src/epoch_manager.rs | 13 +- forester/src/indexer_type.rs | 125 ++++++---- forester/src/lib.rs | 12 +- forester/src/photon_indexer.rs | 24 +- forester/src/rollover/mod.rs | 4 +- forester/src/rollover/operations.rs | 10 +- forester/src/send_transaction.rs | 7 +- .../account-compression-test/Cargo.toml | 9 - .../tests/address_merkle_tree_tests.rs | 2 +- .../compressed-token-test/tests/test.rs | 40 +++- program-tests/e2e-test/tests/test.rs | 6 +- program-tests/registry-test/Cargo.toml | 13 -- program-tests/registry-test/tests/tests.rs | 19 +- .../programs/sdk-test/tests/test.rs | 14 +- .../tests/test_program_owned_trees.rs | 6 +- program-tests/system-test/Cargo.toml | 9 - program-tests/system-test/tests/test.rs | 81 ++++--- .../utils/src/assert_compressed_tx.rs | 29 ++- program-tests/utils/src/assert_token_tx.rs | 13 +- program-tests/utils/src/conversions.rs | 28 ++- .../src/create_address_test_program_sdk.rs | 20 +- program-tests/utils/src/e2e_test_env.rs | 22 +- program-tests/utils/src/lib.rs | 2 +- program-tests/utils/src/spl.rs | 118 +++++++--- program-tests/utils/src/system_program.rs | 38 +++- program-tests/utils/src/test_forester.rs | 8 +- sdk-libs/client/src/indexer/mod.rs | 14 +- .../program-test/src/indexer/extensions.rs | 27 ++- .../program-test/src/indexer/test_indexer.rs | 213 ++++++++++-------- sdk-libs/program-test/src/indexer/utils.rs | 106 ++++----- sdk-libs/program-test/src/lib.rs | 2 +- .../program-test/src/test_batch_forester.rs | 5 +- sdk-libs/program-test/src/test_indexer.rs | 56 ++--- sdk-libs/sdk/src/lib.rs | 2 +- sdk-libs/sdk/src/proof.rs | 2 +- 44 files changed, 745 insertions(+), 564 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b14ce4f87..e843619d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -54,7 +54,6 @@ dependencies = [ "light-bloom-filter", "light-bounded-vec", "light-client", - "light-compressed-token", "light-concurrent-merkle-tree", "light-hash-set", "light-hasher", @@ -63,22 +62,14 @@ dependencies = [ "light-merkle-tree-reference", "light-program-test", "light-prover-client", - "light-system-program", "light-test-utils", "light-utils 1.1.0", "light-verifier", - "memoffset 0.9.1", "num-bigint 0.4.6", - "num-traits", "rand 0.8.5", - "reqwest 0.11.27", - "serde_json", "serial_test", - "solana-cli-output", "solana-program-test", "solana-sdk", - "spl-token", - "thiserror", "tokio", ] @@ -4505,29 +4496,17 @@ version = "1.1.0" dependencies = [ "account-compression", "anchor-lang", - "anchor-spl", "forester-utils", "light-batched-merkle-tree", "light-client", - "light-compressed-token", - "light-concurrent-merkle-tree", "light-hasher", - "light-indexed-merkle-tree", "light-program-test", "light-prover-client", "light-registry", - "light-sdk", - "light-system-program", "light-test-utils", "light-utils 1.1.0", - "light-verifier", - "num-bigint 0.4.6", - "num-traits", - "serde_json", "serial_test", - "solana-cli-output", "solana-sdk", - "spl-token", "tokio", ] @@ -6996,13 +6975,9 @@ version = "1.1.0" dependencies = [ "account-compression", "anchor-lang", - "anchor-spl", "light-batched-merkle-tree", "light-client", - "light-compressed-token", - "light-concurrent-merkle-tree", "light-hasher", - "light-indexed-merkle-tree", "light-merkle-tree-metadata", "light-program-test", "light-prover-client", @@ -7012,16 +6987,11 @@ dependencies = [ "light-test-utils", "light-utils 1.1.0", "light-verifier", - "num-bigint 0.4.6", - "num-traits", "quote", - "reqwest 0.11.27", "serde_json", "serial_test", "solana-cli-output", - "solana-program-test", "solana-sdk", - "spl-token", "tokio", ] diff --git a/forester-utils/src/address_merkle_tree_config.rs b/forester-utils/src/address_merkle_tree_config.rs index ac190d0af..968affa37 100644 --- a/forester-utils/src/address_merkle_tree_config.rs +++ b/forester-utils/src/address_merkle_tree_config.rs @@ -4,15 +4,15 @@ use account_compression::{ }; use anchor_lang::Discriminator; use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeMetadata; -use light_client::rpc::RpcConnection; +use light_client::{ + indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}, + rpc::RpcConnection, +}; use light_hasher::{Discriminator as LightDiscriminator, Poseidon}; use num_traits::Zero; use solana_sdk::pubkey::Pubkey; -use light_client::indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}; -use crate::{ - get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree, - AccountZeroCopy, -}; + +use crate::{get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree, AccountZeroCopy}; pub async fn get_address_bundle_config( rpc: &mut R, diff --git a/forester-utils/src/indexer/mod.rs b/forester-utils/src/indexer/mod.rs index e2b309a84..2cbf037b2 100644 --- a/forester-utils/src/indexer/mod.rs +++ b/forester-utils/src/indexer/mod.rs @@ -1,5 +1,5 @@ // use std::fmt::Debug; -// +// // use account_compression::initialize_address_merkle_tree::{ // Error as AccountCompressionError, Pubkey, // }; @@ -21,13 +21,13 @@ // use photon_api::apis::{default_api::GetCompressedAccountProofPostError, Error as PhotonApiError}; // use solana_sdk::signature::Keypair; // use thiserror::Error; -// +// // #[derive(Debug, Clone)] // pub struct TokenDataWithContext { // pub token_data: TokenData, // pub compressed_account: CompressedAccountWithMerkleContext, // } -// +// // #[derive(Debug, Default)] // pub struct BatchedTreeProofRpcResult { // pub proof: Option, @@ -35,27 +35,27 @@ // pub root_indices: Vec>, // pub address_root_indices: Vec, // } -// +// // #[derive(Debug, Default)] // pub struct ProofRpcResult { // pub proof: CompressedProof, // pub root_indices: Vec>, // pub address_root_indices: Vec, // } -// +// // #[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] // pub struct StateMerkleTreeAccounts { // pub merkle_tree: Pubkey, // pub nullifier_queue: Pubkey, // pub cpi_context: Pubkey, // } -// +// // #[derive(Debug, Clone, Copy)] // pub struct AddressMerkleTreeAccounts { // pub merkle_tree: Pubkey, // pub queue: Pubkey, // } -// +// // #[derive(Debug, Clone)] // pub struct StateMerkleTreeBundle { // pub rollover_fee: i64, @@ -66,7 +66,7 @@ // /// leaf index, leaf, tx hash // pub input_leaf_indices: Vec<(u32, [u8; 32], [u8; 32])>, // } -// +// // #[derive(Debug, Clone)] // pub struct AddressMerkleTreeBundle { // pub rollover_fee: i64, @@ -75,19 +75,19 @@ // pub accounts: AddressMerkleTreeAccounts, // pub queue_elements: Vec<[u8; 32]>, // } -// +// // pub struct ProofOfLeaf { // pub leaf: [u8; 32], // pub proof: Vec<[u8; 32]>, // } -// +// // #[async_trait] // pub trait Indexer: Sync + Send + Debug + 'static { // /// Returns queue elements from the queue with the given pubkey. For input // /// queues account compression program does not store queue elements in the // /// account data but only emits these in the public transaction event. The // /// indexer needs the queue elements to create batch update proofs. -// +// // // i // async fn get_queue_elements( // &self, @@ -96,12 +96,12 @@ // start_offset: u64, // end_offset: u64, // ) -> Result, IndexerError>; -// +// // // e // fn get_proof_by_index(&mut self, _merkle_tree_pubkey: Pubkey, _index: u64) -> ProofOfLeaf { // unimplemented!("get_proof_by_index not implemented") // } -// +// // // e // fn get_proofs_by_indices( // &mut self, @@ -110,7 +110,7 @@ // ) -> Vec { // unimplemented!("get_proof_by_index not implemented") // } -// +// // // e // fn get_leaf_indices_tx_hashes( // &mut self, @@ -119,41 +119,41 @@ // ) -> Vec<(u32, [u8; 32], [u8; 32])> { // unimplemented!(); // } -// +// // // i // async fn get_subtrees( // &self, // merkle_tree_pubkey: [u8; 32], // ) -> Result, IndexerError>; -// +// // // i // async fn get_multiple_compressed_account_proofs( // &self, // hashes: Vec, // ) -> Result, IndexerError>; -// +// // async fn get_rpc_compressed_accounts_by_owner( // &self, // owner: &Pubkey, // ) -> Result, IndexerError>; -// +// // // i // async fn get_multiple_new_address_proofs( // &self, // merkle_tree_pubkey: [u8; 32], // addresses: Vec<[u8; 32]>, // ) -> Result>, IndexerError>; -// +// // // i // async fn get_multiple_new_address_proofs_full( // &self, // merkle_tree_pubkey: [u8; 32], // addresses: Vec<[u8; 32]>, // ) -> Result>, IndexerError>; -// +// // // e // fn account_nullified(&mut self, _merkle_tree_pubkey: Pubkey, _account_hash: &str) {} -// +// // // e // fn address_tree_updated( // &mut self, @@ -161,12 +161,12 @@ // _context: &NewAddressProofWithContext<16>, // ) { // } -// +// // // e // fn get_state_merkle_tree_accounts(&self, _pubkeys: &[Pubkey]) -> Vec { // unimplemented!() // } -// +// // // e // fn add_event_and_compressed_accounts( // &mut self, @@ -178,42 +178,42 @@ // ) { // unimplemented!() // } -// +// // // e // fn get_state_merkle_trees(&self) -> &Vec { // unimplemented!() // } -// +// // // e // fn get_state_merkle_trees_mut(&mut self) -> &mut Vec { // unimplemented!() // } -// +// // // e // fn get_address_merkle_trees(&self) -> &Vec { // unimplemented!() // } -// +// // // e // fn get_address_merkle_trees_mut(&mut self) -> &mut Vec { // unimplemented!() // } -// +// // // e // fn get_token_compressed_accounts(&self) -> &Vec { // unimplemented!() // } -// +// // // e // fn get_payer(&self) -> &Keypair { // unimplemented!() // } -// +// // // e // fn get_group_pda(&self) -> &Pubkey { // unimplemented!() // } -// +// // // i + e // async fn create_proof_for_compressed_accounts( // &mut self, @@ -225,7 +225,7 @@ // ) -> ProofRpcResult { // unimplemented!() // } -// +// // // e // async fn create_proof_for_compressed_accounts2( // &mut self, @@ -237,7 +237,7 @@ // ) -> BatchedTreeProofRpcResult { // unimplemented!() // } -// +// // // e // fn add_address_merkle_tree_accounts( // &mut self, @@ -247,7 +247,7 @@ // ) -> AddressMerkleTreeAccounts { // unimplemented!() // } -// +// // // i // fn get_compressed_accounts_by_owner( // &self, @@ -255,17 +255,17 @@ // ) -> Vec { // unimplemented!() // } -// +// // // e // fn get_compressed_token_accounts_by_owner(&self, _owner: &Pubkey) -> Vec { // unimplemented!() // } -// +// // // e // fn add_state_bundle(&mut self, _state_bundle: StateMerkleTreeBundle) { // unimplemented!() // } -// +// // // e // async fn update_test_indexer_after_append( // &mut self, @@ -276,7 +276,7 @@ // ) { // unimplemented!() // } -// +// // // e // async fn update_test_indexer_after_nullification( // &mut self, @@ -286,7 +286,7 @@ // ) { // unimplemented!() // } -// +// // // e // async fn finalize_batched_address_tree_update( // &mut self, @@ -296,7 +296,7 @@ // unimplemented!() // } // } -// +// // #[derive(Debug, Clone)] // pub struct MerkleProof { // pub hash: String, @@ -305,7 +305,7 @@ // pub proof: Vec<[u8; 32]>, // pub root_seq: u64, // } -// +// // // For consistency with the Photon API. // #[derive(Clone, Debug, PartialEq)] // pub struct NewAddressProofWithContext { @@ -321,7 +321,7 @@ // pub new_element: Option>, // pub new_element_next_value: Option, // } -// +// // #[derive(Error, Debug)] // pub enum IndexerError { // #[error("RPC Error: {0}")] @@ -341,13 +341,13 @@ // #[error("unknown error")] // Unknown, // } -// +// // #[derive(Error, Debug)] // pub enum PhotonApiErrorWrapper { // #[error(transparent)] // GetCompressedAccountProofPostError(#[from] PhotonApiError), // } -// +// // impl From> for IndexerError { // fn from(err: PhotonApiError) -> Self { // IndexerError::PhotonApiError(PhotonApiErrorWrapper::GetCompressedAccountProofPostError( diff --git a/forester-utils/src/instructions.rs b/forester-utils/src/instructions.rs index 039391888..2828dfecf 100644 --- a/forester-utils/src/instructions.rs +++ b/forester-utils/src/instructions.rs @@ -6,7 +6,7 @@ use light_batched_merkle_tree::{ }, queue::BatchedQueueAccount, }; -use light_client::rpc::RpcConnection; +use light_client::{indexer::Indexer, rpc::RpcConnection}; use light_hasher::{Hasher, Poseidon}; use light_prover_client::{ batch_address_append::get_batch_address_append_circuit_inputs, @@ -26,7 +26,6 @@ use log::{error, info}; use reqwest::Client; use solana_sdk::pubkey::Pubkey; use thiserror::Error; -use light_client::indexer::Indexer; #[derive(Error, Debug)] pub enum ForesterUtilsError { @@ -45,9 +44,10 @@ pub async fn create_batch_update_address_tree_instruction_data( rpc: &mut R, indexer: &mut I, merkle_tree_pubkey: Pubkey, -) -> Result<(InstructionDataBatchNullifyInputs, usize), ForesterUtilsError> where +) -> Result<(InstructionDataBatchNullifyInputs, usize), ForesterUtilsError> +where R: RpcConnection, - I: Indexer //+ TestIndexerExtensions, + I: Indexer, //+ TestIndexerExtensions, { let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await .map_err(|e| { diff --git a/forester-utils/src/registry.rs b/forester-utils/src/registry.rs index 5989ba880..38cd4c90e 100644 --- a/forester-utils/src/registry.rs +++ b/forester-utils/src/registry.rs @@ -2,7 +2,10 @@ use account_compression::{ AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, QueueAccount, StateMerkleTreeConfig, }; -use light_client::rpc::{RpcConnection, RpcError}; +use light_client::{ + indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}, + rpc::{RpcConnection, RpcError}, +}; use light_registry::{ account_compression_cpi::sdk::{ create_rollover_state_merkle_tree_instruction, CreateRolloverMerkleTreeInstructionInputs, @@ -17,7 +20,7 @@ use solana_sdk::{ pubkey::Pubkey, signature::{Keypair, Signer}, }; -use light_client::indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}; + use crate::{ address_merkle_tree_config::{get_address_bundle_config, get_state_bundle_config}, create_account_instruction, diff --git a/forester/src/batch_processor/address.rs b/forester/src/batch_processor/address.rs index 83148d499..339a6abe7 100644 --- a/forester/src/batch_processor/address.rs +++ b/forester/src/batch_processor/address.rs @@ -1,16 +1,16 @@ use borsh::BorshSerialize; -use forester_utils::{ - instructions::create_batch_update_address_tree_instruction_data, -}; +use forester_utils::instructions::create_batch_update_address_tree_instruction_data; use light_batched_merkle_tree::event::BatchNullifyEvent; -use light_client::rpc::RpcConnection; +use light_client::{indexer::Indexer, rpc::RpcConnection}; use light_registry::account_compression_cpi::sdk::create_batch_update_address_tree_instruction; use solana_sdk::signer::Signer; use tracing::{info, instrument}; -use light_client::indexer::Indexer; + use super::common::BatchContext; -use crate::batch_processor::error::{BatchProcessError, Result}; -use crate::indexer_type::{finalize_batch_address_tree_update, IndexerType}; +use crate::{ + batch_processor::error::{BatchProcessError, Result}, + indexer_type::{finalize_batch_address_tree_update, IndexerType}, +}; #[instrument(level = "debug", skip(context), fields(tree = %context.merkle_tree))] pub(crate) async fn process_batch + IndexerType>( @@ -51,7 +51,8 @@ pub(crate) async fn process_batch + IndexerType< .await?; finalize_batch_address_tree_update(&mut *rpc, context.indexer.clone(), context.merkle_tree) - .await.expect("Failed to finalize batch address tree update"); + .await + .expect("Failed to finalize batch address tree update"); info!( "Address batch processing completed successfully. Batch size: {}", diff --git a/forester/src/batch_processor/common.rs b/forester/src/batch_processor/common.rs index 083998b4a..8cf9b9a00 100644 --- a/forester/src/batch_processor/common.rs +++ b/forester/src/batch_processor/common.rs @@ -1,19 +1,19 @@ use std::sync::Arc; -use forester_utils::{forester_epoch::TreeType}; +use forester_utils::forester_epoch::TreeType; use light_batched_merkle_tree::{ batch::{Batch, BatchState}, merkle_tree::BatchedMerkleTreeAccount, queue::BatchedQueueAccount, }; -use light_client::{rpc::RpcConnection, rpc_pool::SolanaRpcPool}; +use light_client::{indexer::Indexer, rpc::RpcConnection, rpc_pool::SolanaRpcPool}; use solana_program::pubkey::Pubkey; use solana_sdk::signature::Keypair; use tokio::sync::Mutex; use tracing::info; -use light_client::indexer::Indexer; -use crate::indexer_type::IndexerType; + use super::{address, error::Result, state, BatchProcessError}; +use crate::indexer_type::IndexerType; #[derive(Debug)] pub struct BatchContext> { diff --git a/forester/src/batch_processor/mod.rs b/forester/src/batch_processor/mod.rs index 75546e93f..9e392aee3 100644 --- a/forester/src/batch_processor/mod.rs +++ b/forester/src/batch_processor/mod.rs @@ -5,7 +5,7 @@ mod state; use common::BatchProcessor; use error::Result; -use forester_utils::{forester_epoch::TreeType}; +use forester_utils::forester_epoch::TreeType; use light_client::rpc::RpcConnection; use tracing::{info, instrument}; @@ -29,4 +29,5 @@ pub async fn process_batched_operations + Indexe pub use common::BatchContext; pub use error::BatchProcessError; use light_client::indexer::Indexer; + use crate::indexer_type::IndexerType; diff --git a/forester/src/batch_processor/state.rs b/forester/src/batch_processor/state.rs index 57a65724b..85e1fb3b0 100644 --- a/forester/src/batch_processor/state.rs +++ b/forester/src/batch_processor/state.rs @@ -1,17 +1,19 @@ use borsh::BorshSerialize; -use forester_utils::{ - instructions::{create_append_batch_ix_data, create_nullify_batch_ix_data}, -}; +use forester_utils::instructions::{create_append_batch_ix_data, create_nullify_batch_ix_data}; use light_batched_merkle_tree::event::{BatchAppendEvent, BatchNullifyEvent}; -use light_client::rpc::RpcConnection; +use light_client::{indexer::Indexer, rpc::RpcConnection}; use light_registry::account_compression_cpi::sdk::{ create_batch_append_instruction, create_batch_nullify_instruction, }; use solana_sdk::signer::Signer; -use light_client::indexer::Indexer; + use super::common::BatchContext; -use crate::batch_processor::error::{BatchProcessError, Result}; -use crate::indexer_type::{update_test_indexer_after_append, update_test_indexer_after_nullification, IndexerType}; +use crate::{ + batch_processor::error::{BatchProcessError, Result}, + indexer_type::{ + update_test_indexer_after_append, update_test_indexer_after_nullification, IndexerType, + }, +}; pub(crate) async fn perform_append + IndexerType>( context: &BatchContext, @@ -46,14 +48,15 @@ pub(crate) async fn perform_append + IndexerType ) .await?; - update_test_indexer_after_append( - rpc, - context.indexer.clone(), - context.merkle_tree, - context.output_queue, - num_inserted_zkps, - ) - .await.expect("Failed to update test indexer after append"); + update_test_indexer_after_append( + rpc, + context.indexer.clone(), + context.merkle_tree, + context.output_queue, + num_inserted_zkps, + ) + .await + .expect("Failed to update test indexer after append"); Ok(()) } @@ -87,14 +90,14 @@ pub(crate) async fn perform_nullify + IndexerTyp ) .await?; - update_test_indexer_after_nullification( rpc, context.indexer.clone(), context.merkle_tree, batch_index, ) - .await.expect("Failed to update test indexer after nullification"); + .await + .expect("Failed to update test indexer after nullification"); Ok(()) } diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index 2772e1355..067a01f04 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -9,11 +9,12 @@ use std::{ use anyhow::Context; use dashmap::DashMap; -use forester_utils::{ - forester_epoch::{get_epoch_phases, Epoch, TreeAccounts, TreeForesterSchedule, TreeType}, +use forester_utils::forester_epoch::{ + get_epoch_phases, Epoch, TreeAccounts, TreeForesterSchedule, TreeType, }; use futures::future::join_all; use light_client::{ + indexer::{Indexer, MerkleProof, NewAddressProofWithContext}, rpc::{RetryConfig, RpcConnection, RpcError, SolanaRpcConnection}, rpc_pool::SolanaRpcPool, }; @@ -31,19 +32,18 @@ use tokio::{ time::{sleep, Instant}, }; use tracing::{debug, error, info, info_span, instrument, warn}; -use light_client::indexer::{Indexer, MerkleProof, NewAddressProofWithContext}; + use crate::{ batch_processor::{process_batched_operations, BatchContext}, errors::{ ChannelError, ConfigurationError, ForesterError, InitializationError, RegistrationError, WorkReportError, }, + indexer_type::{rollover_address_merkle_tree, rollover_state_merkle_tree, IndexerType}, metrics::{push_metrics, queue_metric_update, update_forester_sol_balance}, pagerduty::send_pagerduty_alert, queue_helpers::QueueItemData, - rollover::{ - is_tree_ready_for_rollover, - }, + rollover::is_tree_ready_for_rollover, send_transaction::{ send_batched_transactions, BuildTransactionBatchConfig, EpochManagerTransactions, SendBatchedTransactionsConfig, @@ -53,7 +53,6 @@ use crate::{ tree_finder::TreeFinder, ForesterConfig, ForesterEpochInfo, Result, }; -use crate::indexer_type::{rollover_address_merkle_tree, rollover_state_merkle_tree, IndexerType}; #[derive(Copy, Clone, Debug)] pub struct WorkReport { diff --git a/forester/src/indexer_type.rs b/forester/src/indexer_type.rs index 2aa5572f4..68d891d76 100644 --- a/forester/src/indexer_type.rs +++ b/forester/src/indexer_type.rs @@ -1,25 +1,30 @@ -use std::any::Any; -use std::sync::Arc; +use std::{any::Any, sync::Arc}; + use async_trait::async_trait; -use solana_program::pubkey::Pubkey; -use solana_sdk::signature::Keypair; -use solana_sdk::signer::Signer; -use tokio::sync::Mutex; -use tracing::info; use forester_utils::forester_epoch::TreeAccounts; -use light_client::indexer::{Indexer, StateMerkleTreeAccounts, StateMerkleTreeBundle}; -use light_client::rpc::RpcConnection; +use light_client::{ + indexer::{Indexer, StateMerkleTreeAccounts, StateMerkleTreeBundle}, + rpc::RpcConnection, +}; use light_hasher::Poseidon; use light_merkle_tree_reference::MerkleTree; use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; use light_sdk::{STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT}; -use crate::errors::ForesterError; -use crate::ForesterConfig; -use crate::photon_indexer::PhotonIndexer; -use crate::rollover::{perform_address_merkle_tree_rollover, perform_state_merkle_tree_rollover_forester}; +use solana_program::pubkey::Pubkey; +use solana_sdk::{signature::Keypair, signer::Signer}; +use tokio::sync::Mutex; +use tracing::info; + +use crate::{ + errors::ForesterError, + photon_indexer::PhotonIndexer, + rollover::{perform_address_merkle_tree_rollover, perform_state_merkle_tree_rollover_forester}, + ForesterConfig, +}; mod sealed { use light_client::rpc::merkle_tree::MerkleTreeExt; + use super::*; pub trait Sealed {} impl Sealed for TestIndexer {} @@ -69,7 +74,9 @@ pub trait IndexerType: sealed::Sealed { } #[async_trait] -impl IndexerType for TestIndexer { +impl IndexerType + for TestIndexer +{ fn handle_state_bundle( indexer: &mut impl Indexer, new_merkle_tree: Pubkey, @@ -112,25 +119,45 @@ impl IndexerTy new_merkle_tree_pubkey: Pubkey, ) { if let Some(test_indexer) = (indexer as &mut dyn Any).downcast_mut::>() { - test_indexer.finalize_batched_address_tree_update(rpc, new_merkle_tree_pubkey).await; + test_indexer + .finalize_batched_address_tree_update(rpc, new_merkle_tree_pubkey) + .await; } } - async fn update_test_indexer_after_nullification(rpc: &mut R, indexer: &mut impl Indexer, merkle_tree_pubkey: Pubkey, batch_index: usize) - where - Self: Sized + async fn update_test_indexer_after_nullification( + rpc: &mut R, + indexer: &mut impl Indexer, + merkle_tree_pubkey: Pubkey, + batch_index: usize, + ) where + Self: Sized, { if let Some(test_indexer) = (indexer as &mut dyn Any).downcast_mut::>() { - test_indexer.update_test_indexer_after_nullification(rpc, merkle_tree_pubkey, batch_index).await; + test_indexer + .update_test_indexer_after_nullification(rpc, merkle_tree_pubkey, batch_index) + .await; } } - async fn update_test_indexer_after_append(rpc: &mut R, indexer: &mut impl Indexer, merkle_tree_pubkey: Pubkey, output_queue: Pubkey, num_inserted_zkps: u64) - where - Self: Sized + async fn update_test_indexer_after_append( + rpc: &mut R, + indexer: &mut impl Indexer, + merkle_tree_pubkey: Pubkey, + output_queue: Pubkey, + num_inserted_zkps: u64, + ) where + Self: Sized, { if let Some(test_indexer) = (indexer as &mut dyn Any).downcast_mut::>() { - test_indexer.update_test_indexer_after_append(rpc, merkle_tree_pubkey, output_queue, num_inserted_zkps).await; + test_indexer + .update_test_indexer_after_append( + rpc, + merkle_tree_pubkey, + output_queue, + num_inserted_zkps, + ) + .await; } } } @@ -167,7 +194,7 @@ impl IndexerType for PhotonIndexer { _rpc: &mut R, _indexer: &mut impl Indexer, _merkle_tree_pubkey: Pubkey, - _batch_index: usize + _batch_index: usize, ) { // No-op for production indexer } @@ -177,7 +204,7 @@ impl IndexerType for PhotonIndexer { _indexer: &mut impl Indexer, _merkle_tree_pubkey: Pubkey, _output_queue: Pubkey, - _num_inserted_zkps: u64 + _num_inserted_zkps: u64, ) { // No-op for production indexer } @@ -204,8 +231,9 @@ pub async fn rollover_state_merkle_tree + Indexe &tree_accounts.merkle_tree, &tree_accounts.queue, &Pubkey::default(), - epoch) - .await?; + epoch, + ) + .await?; info!("State rollover signature: {:?}", rollover_signature); @@ -229,8 +257,17 @@ pub async fn rollover_address_merkle_tree + Inde let new_nullifier_queue_keypair = Keypair::new(); let new_merkle_tree_keypair = Keypair::new(); - let rollover_signature = perform_address_merkle_tree_rollover(&config.payer_keypair, &config.derivation_pubkey, rpc, &new_nullifier_queue_keypair, &new_merkle_tree_keypair, &tree_accounts.merkle_tree, &tree_accounts.queue, epoch) - .await?; + let rollover_signature = perform_address_merkle_tree_rollover( + &config.payer_keypair, + &config.derivation_pubkey, + rpc, + &new_nullifier_queue_keypair, + &new_merkle_tree_keypair, + &tree_accounts.merkle_tree, + &tree_accounts.queue, + epoch, + ) + .await?; info!("Address rollover signature: {:?}", rollover_signature); @@ -243,21 +280,28 @@ pub async fn rollover_address_merkle_tree + Inde Ok(()) } -pub async fn finalize_batch_address_tree_update + IndexerType>( - rpc: &mut R, - indexer: Arc>, - new_merkle_tree_pubkey: Pubkey, +pub async fn finalize_batch_address_tree_update< + R: RpcConnection, + I: Indexer + IndexerType, +>( + rpc: &mut R, + indexer: Arc>, + new_merkle_tree_pubkey: Pubkey, ) -> Result<(), ForesterError> { I::finalize_batch_address_tree_update( &mut *rpc, &mut *indexer.lock().await, new_merkle_tree_pubkey, - ).await; + ) + .await; Ok(()) } -pub async fn update_test_indexer_after_nullification + IndexerType>( +pub async fn update_test_indexer_after_nullification< + R: RpcConnection, + I: Indexer + IndexerType, +>( rpc: &mut R, indexer: Arc>, merkle_tree_pubkey: Pubkey, @@ -268,12 +312,12 @@ pub async fn update_test_indexer_after_nullification + IndexerType>( rpc: &mut R, indexer: Arc>, @@ -286,8 +330,9 @@ pub async fn update_test_indexer_after_append + &mut *indexer.lock().await, merkle_tree_pubkey, output_queue, - num_inserted_zkps - ).await; + num_inserted_zkps, + ) + .await; Ok(()) -} \ No newline at end of file +} diff --git a/forester/src/lib.rs b/forester/src/lib.rs index e891acf4e..64e9f5fdf 100644 --- a/forester/src/lib.rs +++ b/forester/src/lib.rs @@ -6,6 +6,7 @@ pub mod config; pub mod epoch_manager; pub mod errors; pub mod forester_status; +mod indexer_type; pub mod metrics; pub mod pagerduty; pub mod photon_indexer; @@ -18,32 +19,29 @@ pub mod telemetry; pub mod tree_data_sync; pub mod tree_finder; pub mod utils; -mod indexer_type; use std::{sync::Arc, time::Duration}; use account_compression::utils::constants::{ADDRESS_QUEUE_VALUES, STATE_NULLIFIER_QUEUE_VALUES}; pub use config::{ForesterConfig, ForesterEpochInfo}; -use forester_utils::{ - forester_epoch::{TreeAccounts, TreeType}, -}; +use forester_utils::forester_epoch::{TreeAccounts, TreeType}; use light_client::{ + indexer::Indexer, rpc::{RpcConnection, SolanaRpcConnection}, rpc_pool::SolanaRpcPool, }; use solana_sdk::commitment_config::CommitmentConfig; use tokio::sync::{mpsc, oneshot, Mutex}; use tracing::debug; -use light_client::indexer::Indexer; + use crate::{ epoch_manager::{run_service, WorkReport}, + indexer_type::IndexerType, metrics::QUEUE_LENGTH, queue_helpers::fetch_queue_item_data, slot_tracker::SlotTracker, utils::get_protocol_config, }; -use crate::indexer_type::IndexerType; - pub async fn run_queue_info( config: Arc, diff --git a/forester/src/photon_indexer.rs b/forester/src/photon_indexer.rs index cad5e3b16..f404203ed 100644 --- a/forester/src/photon_indexer.rs +++ b/forester/src/photon_indexer.rs @@ -2,15 +2,21 @@ use std::fmt::Debug; use account_compression::initialize_address_merkle_tree::Pubkey; use async_trait::async_trait; -use light_client::rpc::RpcConnection; +use light_client::{ + indexer::{ + AddressMerkleTreeBundle, Indexer, IndexerError, MerkleProof, NewAddressProofWithContext, + ProofOfLeaf, + }, + rpc::RpcConnection, +}; +use light_sdk::proof::ProofRpcResult; use photon_api::{ apis::configuration::{ApiKey, Configuration}, models::{AddressWithTree, GetCompressedAccountsByOwnerPostRequestParams}, }; use solana_sdk::bs58; use tracing::debug; -use light_client::indexer::{AddressMerkleTreeBundle, Indexer, IndexerError, MerkleProof, NewAddressProofWithContext, ProofOfLeaf}; -use light_sdk::proof::ProofRpcResult; + use crate::utils::decode_hash; pub struct PhotonIndexer { @@ -44,7 +50,6 @@ impl Debug for PhotonIndexer { #[async_trait] impl Indexer for PhotonIndexer { - async fn get_queue_elements( &self, _pubkey: [u8; 32], @@ -55,10 +60,7 @@ impl Indexer for PhotonIndexer { unimplemented!() } - fn get_subtrees( - &self, - _merkle_tree_pubkey: [u8; 32], - ) -> Result, IndexerError> { + fn get_subtrees(&self, _merkle_tree_pubkey: [u8; 32]) -> Result, IndexerError> { unimplemented!() } @@ -233,14 +235,16 @@ impl Indexer for PhotonIndexer { fn get_proofs_by_indices( &mut self, _merkle_tree_pubkey: Pubkey, - _indices: &[u64]) -> Vec { + _indices: &[u64], + ) -> Vec { todo!() } fn get_leaf_indices_tx_hashes( &mut self, _merkle_tree_pubkey: Pubkey, - _zkp_batch_size: usize) -> Vec<(u32, [u8; 32], [u8; 32])> { + _zkp_batch_size: usize, + ) -> Vec<(u32, [u8; 32], [u8; 32])> { todo!() } diff --git a/forester/src/rollover/mod.rs b/forester/src/rollover/mod.rs index 3fa5b107a..0c4985de2 100644 --- a/forester/src/rollover/mod.rs +++ b/forester/src/rollover/mod.rs @@ -2,7 +2,7 @@ mod operations; mod state; pub use operations::{ - get_tree_fullness, is_tree_ready_for_rollover, - perform_address_merkle_tree_rollover, perform_state_merkle_tree_rollover_forester, + get_tree_fullness, is_tree_ready_for_rollover, perform_address_merkle_tree_rollover, + perform_state_merkle_tree_rollover_forester, }; pub use state::RolloverState; diff --git a/forester/src/rollover/operations.rs b/forester/src/rollover/operations.rs index d63cd478c..408122444 100644 --- a/forester/src/rollover/operations.rs +++ b/forester/src/rollover/operations.rs @@ -10,7 +10,10 @@ use forester_utils::{ registry::RentExemption, }; use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeAccount; -use light_client::rpc::{RpcConnection, RpcError}; +use light_client::{ + indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}, + rpc::{RpcConnection, RpcError}, +}; use light_hasher::Poseidon; use light_registry::{ account_compression_cpi::sdk::{ @@ -24,8 +27,8 @@ use solana_sdk::{ transaction::Transaction, }; use tracing::info; -use light_client::indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}; -use crate::{errors::ForesterError}; + +use crate::errors::ForesterError; enum TreeAccount { State(StateMerkleTreeAccount), @@ -320,7 +323,6 @@ pub async fn perform_state_merkle_tree_rollover_forester( context.process_transaction(transaction).await } - #[allow(clippy::too_many_arguments)] pub async fn perform_address_merkle_tree_rollover( payer: &Keypair, diff --git a/forester/src/send_transaction.rs b/forester/src/send_transaction.rs index 1c4b9653e..63ac5e437 100644 --- a/forester/src/send_transaction.rs +++ b/forester/src/send_transaction.rs @@ -5,11 +5,10 @@ use account_compression::utils::constants::{ STATE_MERKLE_TREE_CHANGELOG, STATE_NULLIFIER_QUEUE_VALUES, }; use async_trait::async_trait; -use forester_utils::{ - forester_epoch::{TreeAccounts, TreeType}, -}; +use forester_utils::forester_epoch::{TreeAccounts, TreeType}; use futures::future::join_all; use light_client::{ + indexer::Indexer, rpc::{RetryConfig, RpcConnection}, rpc_pool::SolanaRpcPool, }; @@ -31,7 +30,7 @@ use tokio::{ time::{sleep, Instant}, }; use tracing::{debug, warn}; -use light_client::indexer::Indexer; + use crate::{ config::QueueConfig, epoch_manager::{MerkleProofType, WorkItem}, diff --git a/program-tests/account-compression-test/Cargo.toml b/program-tests/account-compression-test/Cargo.toml index a1e948d08..709228460 100644 --- a/program-tests/account-compression-test/Cargo.toml +++ b/program-tests/account-compression-test/Cargo.toml @@ -26,16 +26,11 @@ solana-program-test = { workspace = true} light-test-utils = { workspace = true, features=["devenv"] } light-program-test = { workspace = true, features = ["devenv"] } light-client = { workspace = true } -reqwest = "0.11.26" tokio = { workspace = true } light-prover-client = {workspace = true } num-bigint = "0.4.6" -num-traits = "0.2.19" -spl-token = { workspace = true } anchor-spl = { workspace = true } anchor-lang = { workspace = true } -light-compressed-token = { workspace = true } -light-system-program = { workspace = true } account-compression = { workspace = true } light-hasher = {workspace = true} light-hash-set = { workspace = true} @@ -46,11 +41,7 @@ light-bounded-vec = {workspace = true} light-utils = {workspace = true} light-verifier = {workspace = true} rand = "0.8" -solana-cli-output = { workspace = true } -serde_json = "1.0.114" solana-sdk = { workspace = true } -thiserror = "1.0" -memoffset = "0.9.1" serial_test = "3.1.1" light-bloom-filter = { workspace = true } light-batched-merkle-tree = { workspace = true } diff --git a/program-tests/account-compression-test/tests/address_merkle_tree_tests.rs b/program-tests/account-compression-test/tests/address_merkle_tree_tests.rs index 6aabbc415..45cda27e3 100644 --- a/program-tests/account-compression-test/tests/address_merkle_tree_tests.rs +++ b/program-tests/account-compression-test/tests/address_merkle_tree_tests.rs @@ -12,6 +12,7 @@ use anchor_lang::error::ErrorCode; use ark_bn254::Fr; use ark_ff::{BigInteger, PrimeField, UniformRand}; use light_bounded_vec::BoundedVecError; +use light_client::indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle}; use light_concurrent_merkle_tree::errors::ConcurrentMerkleTreeError; use light_hash_set::{HashSet, HashSetError}; use light_hasher::Poseidon; @@ -39,7 +40,6 @@ use solana_sdk::{ signature::{Keypair, Signature, Signer}, transaction::Transaction, }; -use light_client::indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle}; /// Tests insertion of addresses to the queue, dequeuing and Merkle tree update. /// 1. create address Merkle tree and queue accounts diff --git a/program-tests/compressed-token-test/tests/test.rs b/program-tests/compressed-token-test/tests/test.rs index ef13572e5..f954719e0 100644 --- a/program-tests/compressed-token-test/tests/test.rs +++ b/program-tests/compressed-token-test/tests/test.rs @@ -9,7 +9,7 @@ use anchor_spl::{ token::{Mint, TokenAccount}, token_2022::{spl_token_2022, spl_token_2022::extension::ExtensionType}, }; -use light_test_utils::conversions::{sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof, sdk_to_program_merkle_context, sdk_to_program_token_data}; +use light_client::indexer::Indexer; use light_compressed_token::{ constants::NUM_MAX_POOL_ACCOUNTS, delegation::sdk::{ @@ -27,15 +27,22 @@ use light_compressed_token::{ ErrorCode, }; use light_program_test::{ - test_env::setup_test_programs_with_accounts, test_rpc::ProgramTestRpcConnection, + indexer::{TestIndexer, TestIndexerExtensions}, + test_env::setup_test_programs_with_accounts, + test_rpc::ProgramTestRpcConnection, }; use light_prover_client::gnark::helpers::{kill_prover, spawn_prover, ProofType, ProverConfig}; +use light_sdk::token::{AccountState, TokenDataWithMerkleContext}; use light_system_program::{ invoke::processor::CompressedProof, sdk::compressed_account::{CompressedAccountWithMerkleContext, MerkleContext}, }; use light_test_utils::{ airdrop_lamports, assert_custom_error_or_program_error, assert_rpc_error, + conversions::{ + sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, + sdk_to_program_compressed_proof, sdk_to_program_merkle_context, sdk_to_program_token_data, + }, create_account_instruction, spl::{ approve_test, burn_test, compress_test, compressed_transfer_22_test, @@ -60,9 +67,6 @@ use solana_sdk::{ transaction::{Transaction, TransactionError}, }; use spl_token::{error::TokenError, instruction::initialize_mint}; -use light_client::indexer::Indexer; -use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; -use light_sdk::token::{AccountState, TokenDataWithMerkleContext}; #[serial] #[tokio::test] @@ -1553,7 +1557,10 @@ async fn test_decompression() { kill_prover(); } -pub async fn mint_tokens_to_all_token_pools + TestIndexerExtensions>( +pub async fn mint_tokens_to_all_token_pools< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, @@ -4686,7 +4693,10 @@ async fn test_failing_decompression() { } #[allow(clippy::too_many_arguments)] -pub async fn failing_compress_decompress + TestIndexerExtensions>( +pub async fn failing_compress_decompress< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, @@ -4929,7 +4939,9 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.clone())), + &Some(sdk_to_program_compressed_proof( + proof_rpc_result.proof.clone(), + )), &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -4954,7 +4966,9 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.clone())), + &Some(sdk_to_program_compressed_proof( + proof_rpc_result.proof.clone(), + )), &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -5003,7 +5017,9 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.clone())), + &Some(sdk_to_program_compressed_proof( + proof_rpc_result.proof.clone(), + )), &proof_rpc_result.root_indices, &input_compressed_accounts, false, @@ -5027,7 +5043,9 @@ async fn test_invalid_inputs() { &merkle_tree_pubkey, &nullifier_queue_pubkey, &recipient_keypair, - &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.clone())), + &Some(sdk_to_program_compressed_proof( + proof_rpc_result.proof.clone(), + )), &proof_rpc_result.root_indices, &input_compressed_accounts, false, diff --git a/program-tests/e2e-test/tests/test.rs b/program-tests/e2e-test/tests/test.rs index 58544f427..f58cd9106 100644 --- a/program-tests/e2e-test/tests/test.rs +++ b/program-tests/e2e-test/tests/test.rs @@ -5,15 +5,13 @@ use light_batched_merkle_tree::{ initialize_state_tree::InitStateTreeAccountsInstructionData, }; use light_program_test::{ + indexer::TestIndexer, test_env::setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params, test_rpc::ProgramTestRpcConnection, }; -use light_program_test::indexer::TestIndexer; use light_prover_client::gnark::helpers::{ProofType, ProverConfig}; use light_registry::protocol_config::state::ProtocolConfig; -use light_test_utils::{ - e2e_test_env::{E2ETestEnv, GeneralActionConfig, KeypairActionConfig}, -}; +use light_test_utils::e2e_test_env::{E2ETestEnv, GeneralActionConfig, KeypairActionConfig}; #[tokio::test] async fn test_10_all() { diff --git a/program-tests/registry-test/Cargo.toml b/program-tests/registry-test/Cargo.toml index 7d127c879..37f5cb1da 100644 --- a/program-tests/registry-test/Cargo.toml +++ b/program-tests/registry-test/Cargo.toml @@ -19,31 +19,18 @@ default = ["custom-heap"] [dependencies] - [dev-dependencies] light-test-utils = { workspace = true, features=["devenv"] } light-program-test = { workspace = true, features = ["devenv"] } tokio = { workspace = true } light-prover-client = {workspace = true, features = ["devenv"] } -light-sdk = { workspace = true } light-client = { workspace = true } -num-bigint = "0.4.6" -num-traits = "0.2.19" -spl-token = { workspace = true } -anchor-spl = { workspace = true } anchor-lang = { workspace = true } forester-utils = { workspace = true } light-registry = { workspace = true } -light-compressed-token = { workspace = true } -light-system-program = { workspace = true } account-compression = { workspace = true } light-hasher = {workspace = true} -light-concurrent-merkle-tree = {workspace = true} -light-indexed-merkle-tree = {workspace = true} light-utils = {workspace = true} -light-verifier = {workspace = true} -solana-cli-output = { workspace = true } -serde_json = "1.0.133" solana-sdk = { workspace = true } serial_test = { workspace = true } light-batched-merkle-tree = { workspace = true } diff --git a/program-tests/registry-test/tests/tests.rs b/program-tests/registry-test/tests/tests.rs index a66613f4b..22874486c 100644 --- a/program-tests/registry-test/tests/tests.rs +++ b/program-tests/registry-test/tests/tests.rs @@ -1,7 +1,7 @@ #![cfg(feature = "test-sbf")] -use std::collections::HashSet; -use std::hash::Hash; +use std::{collections::HashSet, hash::Hash}; + use account_compression::{ errors::AccountCompressionErrorCode, AddressMerkleTreeConfig, AddressQueueConfig, MigrateLeafParams, NullifierQueueConfig, StateMerkleTreeAccount, StateMerkleTreeConfig, @@ -18,8 +18,10 @@ use light_batched_merkle_tree::{ merkle_tree::{BatchedMerkleTreeAccount, BatchedMerkleTreeMetadata, CreateTreeParams}, queue::BatchedQueueAccount, }; +use light_client::indexer::Indexer; use light_hasher::Poseidon; use light_program_test::{ + indexer::{TestIndexer, TestIndexerExtensions}, test_batch_forester::{ assert_perform_state_mt_roll_over, create_append_batch_ix_data, create_batch_address_merkle_tree, @@ -84,8 +86,6 @@ use solana_sdk::{ signature::{read_keypair_file, Keypair, Signature}, signer::Signer, }; -use light_client::indexer::Indexer; -use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; #[test] fn test_protocol_config_active_phase_continuity() { @@ -1427,7 +1427,8 @@ async fn test_migrate_state() { 26, >(&mut rpc, env_accounts.merkle_tree_pubkey) .await; - let compressed_account = &test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&payer.pubkey())[0]; + let compressed_account = + &test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&payer.pubkey())[0]; let hash = compressed_account.hash().unwrap(); let bundle = &test_indexer .get_state_merkle_trees() @@ -1511,7 +1512,8 @@ async fn test_migrate_state() { 26, >(&mut rpc, env_accounts.merkle_tree_pubkey) .await; - let compressed_account = &test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&payer.pubkey())[1]; + let compressed_account = + &test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&payer.pubkey())[1]; let hash = compressed_account.hash().unwrap(); let bundle = &test_indexer .get_state_merkle_trees() @@ -1957,7 +1959,10 @@ async fn test_batch_address_tree() { .await; } -pub async fn perform_batch_address_merkle_tree_update + TestIndexerExtensions>( +pub async fn perform_batch_address_merkle_tree_update< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, forester: &Keypair, diff --git a/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs b/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs index 49759c999..ff3f2c21d 100644 --- a/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs +++ b/program-tests/sdk-test-program/programs/sdk-test/tests/test.rs @@ -6,6 +6,7 @@ use light_client::{ rpc::merkle_tree::MerkleTreeExt, }; use light_program_test::{ + indexer::{TestIndexer, TestIndexerExtensions}, test_env::{setup_test_programs_with_accounts_v2, EnvAccounts}, test_rpc::ProgramTestRpcConnection, }; @@ -26,7 +27,6 @@ use solana_sdk::{ pubkey::Pubkey, signature::{Keypair, Signer}, }; -use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; #[tokio::test] async fn test_sdk_test() { @@ -47,7 +47,7 @@ async fn test_sdk_test() { }], payer.insecure_clone(), env.group_pda, - None + None, ) .await; @@ -87,7 +87,8 @@ async fn test_sdk_test() { .unwrap(); // Check that it was created correctly. - let compressed_accounts = test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&sdk_test::ID); + let compressed_accounts = + test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&sdk_test::ID); assert_eq!(compressed_accounts.len(), 1); let compressed_account = &compressed_accounts[0]; let record = &compressed_account @@ -127,7 +128,8 @@ async fn test_sdk_test() { .unwrap(); // Check that it was updated correctly. - let compressed_accounts = test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&sdk_test::ID); + let compressed_accounts = + test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&sdk_test::ID); assert_eq!(compressed_accounts.len(), 1); let compressed_account = &compressed_accounts[0]; let record = &compressed_account @@ -154,7 +156,7 @@ async fn with_nested_data( ) -> Result<(), RpcError> where R: RpcConnection + MerkleTreeExt, - I: Indexer + TestIndexerExtensions + I: Indexer + TestIndexerExtensions, { let rpc_result = test_indexer .create_proof_for_compressed_accounts( @@ -229,7 +231,7 @@ async fn update_nested_data( ) -> Result<(), RpcError> where R: RpcConnection + MerkleTreeExt, - I: Indexer + TestIndexerExtensions + I: Indexer + TestIndexerExtensions, { let hash = compressed_account.hash().unwrap(); let merkle_tree_pubkey = compressed_account.merkle_context.merkle_tree_pubkey; diff --git a/program-tests/system-cpi-test/tests/test_program_owned_trees.rs b/program-tests/system-cpi-test/tests/test_program_owned_trees.rs index d073c9f57..0687edb00 100644 --- a/program-tests/system-cpi-test/tests/test_program_owned_trees.rs +++ b/program-tests/system-cpi-test/tests/test_program_owned_trees.rs @@ -10,6 +10,7 @@ use anchor_lang::{system_program, InstructionData, ToAccountMetas}; use light_compressed_token::mint_sdk::create_mint_to_instruction; use light_hasher::Poseidon; use light_program_test::{ + indexer::{TestIndexer, TestIndexerExtensions}, test_env::{ initialize_new_group, register_program_with_registry_program, setup_test_programs_with_accounts, NOOP_PROGRAM_ID, @@ -29,8 +30,8 @@ use light_registry::{ }; use light_test_utils::{ airdrop_lamports, assert_custom_error_or_program_error, assert_rpc_error, - create_account_instruction, get_concurrent_merkle_tree, - spl::create_mint_helper, FeeConfig, RpcConnection, RpcError, TransactionParams, + create_account_instruction, get_concurrent_merkle_tree, spl::create_mint_helper, FeeConfig, + RpcConnection, RpcError, TransactionParams, }; use serial_test::serial; use solana_sdk::{ @@ -40,7 +41,6 @@ use solana_sdk::{ signer::Signer, transaction::Transaction, }; -use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; use system_cpi_test::sdk::{ create_initialize_address_merkle_tree_and_queue_instruction, create_initialize_merkle_tree_instruction, diff --git a/program-tests/system-test/Cargo.toml b/program-tests/system-test/Cargo.toml index 9644d0c3b..8107f1a8b 100644 --- a/program-tests/system-test/Cargo.toml +++ b/program-tests/system-test/Cargo.toml @@ -21,23 +21,14 @@ default = ["custom-heap"] [dev-dependencies] -solana-program-test = { workspace = true } light-program-test = { workspace = true, features=["devenv"] } light-test-utils = { workspace = true, features=["devenv"]} -reqwest = "0.11.26" tokio = { workspace = true } light-prover-client = {workspace = true } -num-bigint = "0.4.6" -num-traits = "0.2.19" -spl-token = { workspace = true } -anchor-spl = { workspace = true } anchor-lang = { workspace = true } -light-compressed-token = { workspace = true } light-system-program = { workspace = true } account-compression = { workspace = true } light-hasher = {workspace = true} -light-concurrent-merkle-tree = {workspace = true} -light-indexed-merkle-tree = {workspace = true} light-utils = {workspace = true} light-client = { workspace = true } light-sdk = { workspace = true } diff --git a/program-tests/system-test/tests/test.rs b/program-tests/system-test/tests/test.rs index c31465af7..e941da0ba 100644 --- a/program-tests/system-test/tests/test.rs +++ b/program-tests/system-test/tests/test.rs @@ -1,6 +1,5 @@ #![cfg(feature = "test-sbf")] -use std::ops::Index; use account_compression::errors::AccountCompressionErrorCode; use anchor_lang::{error::ErrorCode, AnchorSerialize, InstructionData, ToAccountMetas}; use light_batched_merkle_tree::{ @@ -8,9 +7,11 @@ use light_batched_merkle_tree::{ initialize_address_tree::InitAddressTreeAccountsInstructionData, initialize_state_tree::InitStateTreeAccountsInstructionData, queue::BatchedQueueAccount, }; +use light_client::indexer::Indexer; use light_hasher::Poseidon; use light_merkle_tree_metadata::errors::MerkleTreeMetadataError; use light_program_test::{ + indexer::{TestIndexer, TestIndexerExtensions}, test_batch_forester::perform_batch_append, test_env::{ initialize_accounts, setup_test_programs, setup_test_programs_with_accounts, @@ -20,6 +21,7 @@ use light_program_test::{ }; use light_prover_client::gnark::helpers::{spawn_prover, ProofType, ProverConfig, ProverMode}; use light_registry::protocol_config::state::ProtocolConfig; +use light_sdk::merkle_context::QueueIndex as SdkQueueIndex; use light_system_program::{ errors::SystemProgramError, invoke::processor::CompressedProof, @@ -27,7 +29,7 @@ use light_system_program::{ address::{derive_address, derive_address_legacy}, compressed_account::{ CompressedAccount, CompressedAccountData, CompressedAccountWithMerkleContext, - MerkleContext, + MerkleContext, QueueIndex, }, invoke::{ create_invoke_instruction, create_invoke_instruction_data_and_remaining_accounts, @@ -40,6 +42,10 @@ use light_test_utils::{ airdrop_lamports, assert_compressed_tx::assert_created_compressed_accounts, assert_custom_error_or_program_error, assert_rpc_error, + conversions::{ + sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, + sdk_to_program_compressed_proof, sdk_to_program_merkle_context, + }, system_program::{ compress_sol_test, create_addresses_test, decompress_sol_test, transfer_compressed_sol_test, }, @@ -58,11 +64,6 @@ use solana_sdk::{ transaction::{Transaction, TransactionError}, }; use tokio::fs::write as async_write; -use light_client::indexer::Indexer; -use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; -use light_sdk::merkle_context::QueueIndex as SdkQueueIndex; -use light_system_program::sdk::compressed_account::QueueIndex; -use light_test_utils::conversions::{sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof, sdk_to_program_merkle_context}; // TODO: use lazy_static to spawn the server once /// invoke_failing_test @@ -180,7 +181,10 @@ async fn invoke_failing_test() { } #[allow(clippy::too_many_arguments)] -pub async fn failing_transaction_inputs + TestIndexerExtensions>( +pub async fn failing_transaction_inputs< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( context: &mut R, test_indexer: &mut I, payer: &Keypair, @@ -208,8 +212,9 @@ pub async fn failing_transaction_inputs + TestIn } let (mut new_address_params, derived_addresses) = create_address_test_inputs(env, num_addresses); - let input_compressed_accounts = - test_indexer.get_compressed_accounts_with_merkle_context_by_owner(&payer.pubkey())[0..num_inputs].to_vec(); + let input_compressed_accounts = test_indexer + .get_compressed_accounts_with_merkle_context_by_owner(&payer.pubkey())[0..num_inputs] + .to_vec(); let hashes = input_compressed_accounts .iter() .map(|x| x.hash().unwrap()) @@ -245,7 +250,10 @@ pub async fn failing_transaction_inputs + TestIn for (i, root_index) in proof_rpc_res.address_root_indices.iter().enumerate() { new_address_params[i].address_merkle_tree_root_index = *root_index; } - (proof_rpc_res.root_indices, Some(sdk_to_program_compressed_proof(proof_rpc_res.proof))) + ( + proof_rpc_res.root_indices, + Some(sdk_to_program_compressed_proof(proof_rpc_res.proof)), + ) } else { (Vec::new(), None) }; @@ -1431,7 +1439,8 @@ async fn test_with_address() { ]; for (n_input_compressed_accounts, n_new_addresses) in test_inputs { let compressed_input_accounts = test_indexer - .get_compressed_accounts_with_merkle_context_by_owner(&payer_pubkey)[0..n_input_compressed_accounts] + .get_compressed_accounts_with_merkle_context_by_owner(&payer_pubkey) + [0..n_input_compressed_accounts] .to_vec(); let compressed_input_accounts = compressed_input_accounts .into_iter() @@ -1631,7 +1640,6 @@ async fn test_with_compression() { let compressed_account_with_context = sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context); - decompress_sol_test( &mut context, &mut test_indexer, @@ -2012,7 +2020,7 @@ async fn batch_invoke_test() { ) .unwrap(); } - // 6. Should fail: invalid leaf index + // 6. Should fail: invalid leaf index { let input_compressed_account = test_indexer .get_compressed_accounts_with_merkle_context_by_owner(&payer_pubkey) @@ -2030,7 +2038,9 @@ async fn batch_invoke_test() { let instruction = create_invoke_instruction( &payer_pubkey, &payer_pubkey, - &[sdk_to_program_compressed_account(input_compressed_account.compressed_account)], + &[sdk_to_program_compressed_account( + input_compressed_account.compressed_account, + )], &output_compressed_accounts, &[MerkleContext { merkle_tree_pubkey, @@ -2118,7 +2128,6 @@ async fn batch_invoke_test() { ) .await; - let mut proof = None; if let Some(proof_rpc) = proof_rpc_result.proof { proof = Some(sdk_to_program_compressed_proof(proof_rpc)); @@ -2128,17 +2137,17 @@ async fn batch_invoke_test() { compressed_account_with_context_1.compressed_account, compressed_account_with_context_2.compressed_account, ] - .iter() - .map(|x| sdk_to_program_compressed_account(x.clone())) - .collect::>(); + .iter() + .map(|x| sdk_to_program_compressed_account(x.clone())) + .collect::>(); let merkle_context = vec![ compressed_account_with_context_1.merkle_context, compressed_account_with_context_2.merkle_context, ] - .iter() - .map(|x| sdk_to_program_merkle_context(x.clone())) - .collect::>(); + .iter() + .map(|x| sdk_to_program_merkle_context(x.clone())) + .collect::>(); let output_compressed_accounts = vec![ CompressedAccount { lamports: 0, @@ -2213,7 +2222,9 @@ async fn batch_invoke_test() { &mut test_indexer, &payer, TestMode::ByZkpThenIndex, - sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context_1.clone()), + sdk_to_program_compressed_account_with_merkle_context( + compressed_account_with_context_1.clone(), + ), ) .await; assert_rpc_error( @@ -2244,7 +2255,9 @@ async fn batch_invoke_test() { &mut test_indexer, &payer, TestMode::ByIndexThenZkp, - sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context_1.clone()), + sdk_to_program_compressed_account_with_merkle_context( + compressed_account_with_context_1.clone(), + ), ) .await; assert_rpc_error( @@ -2274,7 +2287,9 @@ async fn batch_invoke_test() { &mut test_indexer, &payer, TestMode::ByIndexThenIndex, - sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context_1.clone()), + sdk_to_program_compressed_account_with_merkle_context( + compressed_account_with_context_1.clone(), + ), ) .await; assert_rpc_error( @@ -2304,7 +2319,9 @@ async fn batch_invoke_test() { &mut test_indexer, &payer, TestMode::ByZkpThenZkp, - sdk_to_program_compressed_account_with_merkle_context(compressed_account_with_context_1.clone()), + sdk_to_program_compressed_account_with_merkle_context( + compressed_account_with_context_1.clone(), + ), ) .await; assert_rpc_error( @@ -2361,7 +2378,8 @@ async fn batch_invoke_test() { &mut context, ) .await; - let mut merkle_context = sdk_to_program_merkle_context(compressed_account_with_context_1.merkle_context); + let mut merkle_context = + sdk_to_program_merkle_context(compressed_account_with_context_1.merkle_context); merkle_context.queue_index = Some(QueueIndex::default()); let mut proof = None; if let Some(proof_rpc) = proof_rpc_result.proof { @@ -2371,7 +2389,9 @@ async fn batch_invoke_test() { let instruction = create_invoke_instruction( &payer_pubkey, &payer_pubkey, - &[sdk_to_program_compressed_account(compressed_account_with_context_1.compressed_account)], + &[sdk_to_program_compressed_account( + compressed_account_with_context_1.compressed_account, + )], &output_compressed_accounts, &[merkle_context], &[merkle_context.nullifier_queue_pubkey], @@ -2443,7 +2463,10 @@ pub enum TestMode { ByZkpThenZkp, } -pub async fn double_spend_compressed_account + TestIndexerExtensions>( +pub async fn double_spend_compressed_account< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( context: &mut R, test_indexer: &mut I, payer: &Keypair, diff --git a/program-tests/utils/src/assert_compressed_tx.rs b/program-tests/utils/src/assert_compressed_tx.rs index 1ad68f6d9..467c2fcb0 100644 --- a/program-tests/utils/src/assert_compressed_tx.rs +++ b/program-tests/utils/src/assert_compressed_tx.rs @@ -1,15 +1,16 @@ use account_compression::{state::QueueAccount, StateMerkleTreeAccount}; use anchor_lang::Discriminator; -use forester_utils::{ - get_concurrent_merkle_tree, get_hash_set, - AccountZeroCopy, -}; +use forester_utils::{get_concurrent_merkle_tree, get_hash_set, AccountZeroCopy}; use light_batched_merkle_tree::{ merkle_tree::{BatchedMerkleTreeAccount, BatchedMerkleTreeMetadata}, queue::BatchedQueueMetadata, }; -use light_client::rpc::RpcConnection; +use light_client::{ + indexer::{Indexer, StateMerkleTreeAccounts}, + rpc::RpcConnection, +}; use light_hasher::{Discriminator as LightDiscriminator, Poseidon}; +use light_program_test::indexer::TestIndexerExtensions; use light_system_program::sdk::{ compressed_account::{CompressedAccount, CompressedAccountWithMerkleContext}, event::{MerkleTreeSequenceNumber, PublicTransactionEvent}, @@ -18,10 +19,12 @@ use light_system_program::sdk::{ use num_bigint::BigUint; use num_traits::FromBytes; use solana_sdk::{account::ReadableAccount, pubkey::Pubkey}; -use light_client::indexer::{Indexer, StateMerkleTreeAccounts}; -use light_program_test::indexer::TestIndexerExtensions; -pub struct AssertCompressedTransactionInputs<'a, R: RpcConnection, I: Indexer + TestIndexerExtensions> { +pub struct AssertCompressedTransactionInputs< + 'a, + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +> { pub rpc: &'a mut R, pub test_indexer: &'a mut I, pub output_compressed_accounts: &'a [CompressedAccount], @@ -49,7 +52,10 @@ pub struct AssertCompressedTransactionInputs<'a, R: RpcConnection, I: Indexer /// 5. Merkle tree was updated correctly /// 6. TODO: Fees have been paid (after fee refactor) /// 7. Check compression amount was transferred -pub async fn assert_compressed_transaction + TestIndexerExtensions>( +pub async fn assert_compressed_transaction< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( input: AssertCompressedTransactionInputs<'_, R, I>, ) { // CHECK 1 @@ -317,7 +323,10 @@ pub struct MerkleTreeTestSnapShot { /// Asserts: /// 1. The root has been updated /// 2. The next index has been updated -pub async fn assert_merkle_tree_after_tx + TestIndexerExtensions>( +pub async fn assert_merkle_tree_after_tx< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, snapshots: &[MerkleTreeTestSnapShot], test_indexer: &mut I, diff --git a/program-tests/utils/src/assert_token_tx.rs b/program-tests/utils/src/assert_token_tx.rs index 77cfb735a..69c6596ff 100644 --- a/program-tests/utils/src/assert_token_tx.rs +++ b/program-tests/utils/src/assert_token_tx.rs @@ -1,13 +1,13 @@ use anchor_lang::AnchorSerialize; -use light_client::rpc::RpcConnection; +use light_client::{indexer::Indexer, rpc::RpcConnection}; use light_compressed_token::process_transfer::{get_cpi_authority_pda, TokenTransferOutputData}; +use light_program_test::indexer::TestIndexerExtensions; +use light_sdk::token::TokenDataWithMerkleContext; use light_system_program::sdk::{ compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent, }; use solana_sdk::{program_pack::Pack, pubkey::Pubkey}; -use light_client::indexer::Indexer; -use light_program_test::indexer::TestIndexerExtensions; -use light_sdk::token::TokenDataWithMerkleContext; + use crate::assert_compressed_tx::{ assert_merkle_tree_after_tx, assert_nullifiers_exist_in_hash_sets, assert_public_transaction_event, MerkleTreeTestSnapShot, @@ -79,7 +79,10 @@ pub async fn assert_transfer + TestIndexerExtens ); } -pub fn assert_compressed_token_accounts + TestIndexerExtensions>( +pub fn assert_compressed_token_accounts< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( test_indexer: &mut I, out_compressed_accounts: &[TokenTransferOutputData], lamports: Option>>, diff --git a/program-tests/utils/src/conversions.rs b/program-tests/utils/src/conversions.rs index c094e4e74..97ee92a77 100644 --- a/program-tests/utils/src/conversions.rs +++ b/program-tests/utils/src/conversions.rs @@ -1,13 +1,25 @@ -use light_sdk::{self as sdk, proof::CompressedProof}; -use light_system_program::invoke::processor::CompressedProof as ProgramCompressedProof; -use light_system_program::invoke::OutputCompressedAccountWithPackedContext as ProgramOutputCompressedAccountWithPackedContext; -use light_system_program::sdk::compressed_account::{CompressedAccount as ProgramCompressedAccount, CompressedAccountData as ProgramCompressedAccountData, CompressedAccountWithMerkleContext as ProgramCompressedAccountWithMerkleContext, MerkleContext as ProgramMerkleContext, QueueIndex as ProgramQueueIndex, QueueIndex}; - use light_compressed_token::{ token_data::AccountState as ProgramAccountState, TokenData as ProgramTokenData, }; -use light_system_program::sdk::event::MerkleTreeSequenceNumber as ProgramMerkleTreeSequenceNumber; -use light_system_program::sdk::event::PublicTransactionEvent as ProgramPublicTransactionEvent; +use light_sdk::{self as sdk, proof::CompressedProof}; +use light_system_program::{ + invoke::{ + processor::CompressedProof as ProgramCompressedProof, + OutputCompressedAccountWithPackedContext as ProgramOutputCompressedAccountWithPackedContext, + }, + sdk::{ + compressed_account::{ + CompressedAccount as ProgramCompressedAccount, + CompressedAccountData as ProgramCompressedAccountData, + CompressedAccountWithMerkleContext as ProgramCompressedAccountWithMerkleContext, + MerkleContext as ProgramMerkleContext, QueueIndex as ProgramQueueIndex, + }, + event::{ + MerkleTreeSequenceNumber as ProgramMerkleTreeSequenceNumber, + PublicTransactionEvent as ProgramPublicTransactionEvent, + }, + }, +}; pub fn sdk_to_program_queue_index( sdk_queue_index: sdk::merkle_context::QueueIndex, @@ -232,4 +244,4 @@ pub fn program_to_sdk_public_transaction_event( pubkey_array: event.pubkey_array, message: event.message, } -} \ No newline at end of file +} diff --git a/program-tests/utils/src/create_address_test_program_sdk.rs b/program-tests/utils/src/create_address_test_program_sdk.rs index 3a1ecb771..e69159e65 100644 --- a/program-tests/utils/src/create_address_test_program_sdk.rs +++ b/program-tests/utils/src/create_address_test_program_sdk.rs @@ -2,17 +2,19 @@ use std::collections::HashMap; use account_compression::utils::constants::CPI_AUTHORITY_PDA_SEED; use anchor_lang::{InstructionData, ToAccountMetas}; -use light_client::rpc::{RpcConnection, RpcError}; +use light_client::{ + indexer::Indexer, + rpc::{RpcConnection, RpcError}, +}; use light_compressed_token::process_transfer::transfer_sdk::to_account_metas; -use light_program_test::test_env::EnvAccounts; +use light_program_test::{indexer::TestIndexerExtensions, test_env::EnvAccounts}; use light_system_program::{ invoke::processor::CompressedProof, sdk::address::{derive_address, pack_new_address_params}, NewAddressParams, }; use solana_sdk::{instruction::Instruction, pubkey::Pubkey, signature::Keypair, signer::Signer}; -use light_client::indexer::Indexer; -use light_program_test::indexer::{TestIndexerExtensions}; + use crate::conversions::sdk_to_program_compressed_proof; #[derive(Debug, Clone)] @@ -69,7 +71,10 @@ pub fn create_pda_instruction(input_params: CreateCompressedPdaInstructionInputs } } -pub async fn perform_create_pda_with_event_rnd + TestIndexerExtensions>( +pub async fn perform_create_pda_with_event_rnd< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( test_indexer: &mut I, rpc: &mut R, env: &EnvAccounts, @@ -79,7 +84,10 @@ pub async fn perform_create_pda_with_event_rnd + let data = rand::random(); perform_create_pda_with_event(test_indexer, rpc, env, payer, seed, &data).await } -pub async fn perform_create_pda_with_event + TestIndexerExtensions>( +pub async fn perform_create_pda_with_event< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( test_indexer: &mut I, rpc: &mut R, env: &EnvAccounts, diff --git a/program-tests/utils/src/e2e_test_env.rs b/program-tests/utils/src/e2e_test_env.rs index a745d0951..a0564f6e6 100644 --- a/program-tests/utils/src/e2e_test_env.rs +++ b/program-tests/utils/src/e2e_test_env.rs @@ -80,7 +80,11 @@ use light_batched_merkle_tree::{ queue::BatchedQueueAccount, }; use light_client::{ - rpc::{errors::RpcError, RpcConnection}, + indexer::{ + AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, + StateMerkleTreeBundle, + }, + rpc::{errors::RpcError, merkle_tree::MerkleTreeExt, RpcConnection}, transaction_params::{FeeConfig, TransactionParams}, }; // TODO: implement traits for context object and indexer that we can implement with an rpc as well @@ -92,6 +96,7 @@ use light_indexed_merkle_tree::{ array::IndexedArray, reference::IndexedMerkleTree, HIGHEST_ADDRESS_PLUS_ONE, }; use light_program_test::{ + indexer::{TestIndexer, TestIndexerExtensions}, test_batch_forester::{perform_batch_append, perform_batch_nullify}, test_env::{create_state_merkle_tree_and_queue_account, EnvAccounts}, test_rpc::ProgramTestRpcConnection, @@ -103,6 +108,7 @@ use light_registry::{ utils::get_protocol_config_pda_address, ForesterConfig, }; +use light_sdk::token::{AccountState, TokenDataWithMerkleContext}; use light_system_program::sdk::compressed_account::CompressedAccountWithMerkleContext; use light_utils::{bigint::bigint_to_be_bytes_array, rand::gen_prime}; use log::info; @@ -120,10 +126,7 @@ use solana_sdk::{ signer::{SeedDerivable, Signer}, }; use spl_token::solana_program::native_token::LAMPORTS_PER_SOL; -use light_client::indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, StateMerkleTreeBundle}; -use light_client::rpc::merkle_tree::MerkleTreeExt; -use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; -use light_sdk::token::{AccountState, TokenDataWithMerkleContext}; + use crate::{ address_tree_rollover::{ assert_rolled_over_address_merkle_tree_and_queue, @@ -133,6 +136,7 @@ use crate::{ assert_epoch::{ assert_finalized_epoch_registration, assert_report_work, fetch_epoch_and_forester_pdas, }, + conversions::sdk_to_program_compressed_account_with_merkle_context, create_address_merkle_tree_and_queue_account_with_assert, spl::{ approve_test, burn_test, compress_test, compressed_transfer_test, create_mint_helper, @@ -145,7 +149,6 @@ use crate::{ }, test_forester::{empty_address_queue_test, nullify_compressed_accounts}, }; -use crate::conversions::sdk_to_program_compressed_account_with_merkle_context; pub struct User { pub keypair: Keypair, @@ -2200,7 +2203,9 @@ where ) -> Vec { let input_compressed_accounts = self .indexer - .get_compressed_accounts_with_merkle_context_by_owner(&self.users[user_index].keypair.pubkey()) + .get_compressed_accounts_with_merkle_context_by_owner( + &self.users[user_index].keypair.pubkey(), + ) .into_iter() .map(sdk_to_program_compressed_account_with_merkle_context) .collect::>(); @@ -2240,7 +2245,8 @@ where &self, pubkey: &Pubkey, ) -> Vec { - self.indexer.get_compressed_accounts_with_merkle_context_by_owner(pubkey) + self.indexer + .get_compressed_accounts_with_merkle_context_by_owner(pubkey) .into_iter() .map(sdk_to_program_compressed_account_with_merkle_context) .collect() diff --git a/program-tests/utils/src/lib.rs b/program-tests/utils/src/lib.rs index 078b9ac54..efe4f6028 100644 --- a/program-tests/utils/src/lib.rs +++ b/program-tests/utils/src/lib.rs @@ -20,6 +20,7 @@ pub mod assert_merkle_tree; pub mod assert_queue; pub mod assert_rollover; pub mod assert_token_tx; +pub mod conversions; pub mod create_address_test_program_sdk; pub mod e2e_test_env; #[allow(unused)] @@ -29,7 +30,6 @@ pub mod state_tree_rollover; pub mod system_program; #[allow(unused)] pub mod test_forester; -pub mod conversions; pub use create_address_test_program::ID as CREATE_ADDRESS_TEST_PROGRAM_ID; pub use forester_utils::{ diff --git a/program-tests/utils/src/spl.rs b/program-tests/utils/src/spl.rs index 6fbd87198..7b8e73bb4 100644 --- a/program-tests/utils/src/spl.rs +++ b/program-tests/utils/src/spl.rs @@ -1,8 +1,7 @@ use anchor_spl::token::{Mint, TokenAccount}; -use forester_utils::{ - create_account_instruction, -}; +use forester_utils::create_account_instruction; use light_client::{ + indexer::Indexer, rpc::{errors::RpcError, RpcConnection}, transaction_params::TransactionParams, }; @@ -25,6 +24,8 @@ use light_compressed_token::{ TokenData, }; use light_hasher::Poseidon; +use light_program_test::indexer::TestIndexerExtensions; +use light_sdk::token::TokenDataWithMerkleContext; use light_system_program::{ invoke::processor::CompressedProof, sdk::{compressed_account::MerkleContext, event::PublicTransactionEvent}, @@ -37,14 +38,16 @@ use solana_sdk::{ signature::{Keypair, Signature, Signer}, }; use spl_token::instruction::initialize_mint; -use light_client::indexer::Indexer; -use light_program_test::indexer::TestIndexerExtensions; -use light_sdk::token::TokenDataWithMerkleContext; + use crate::{ assert_compressed_tx::get_merkle_tree_snapshots, assert_token_tx::{assert_create_mint, assert_mint_to, assert_transfer}, + conversions::{ + program_to_sdk_public_transaction_event, program_to_sdk_token_data, + sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, + sdk_to_program_compressed_proof, sdk_to_program_merkle_context, sdk_to_program_token_data, + }, }; -use crate::conversions::{program_to_sdk_public_transaction_event, program_to_sdk_token_data, sdk_to_program_compressed_account, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof, sdk_to_program_merkle_context, sdk_to_program_token_data}; pub async fn mint_tokens_helper + TestIndexerExtensions>( rpc: &mut R, @@ -107,7 +110,10 @@ pub async fn mint_spl_tokens( } #[allow(clippy::too_many_arguments)] -pub async fn mint_tokens_helper_with_lamports + TestIndexerExtensions>( +pub async fn mint_tokens_helper_with_lamports< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, @@ -131,7 +137,10 @@ pub async fn mint_tokens_helper_with_lamports + .await; } #[allow(clippy::too_many_arguments)] -pub async fn mint_tokens_22_helper_with_lamports + TestIndexerExtensions>( +pub async fn mint_tokens_22_helper_with_lamports< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, @@ -158,7 +167,10 @@ pub async fn mint_tokens_22_helper_with_lamports } #[allow(clippy::too_many_arguments)] -pub async fn mint_tokens_22_helper_with_lamports_and_bump + TestIndexerExtensions>( +pub async fn mint_tokens_22_helper_with_lamports_and_bump< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, @@ -208,7 +220,10 @@ pub async fn mint_tokens_22_helper_with_lamports_and_bump( } #[allow(clippy::too_many_arguments)] -pub async fn compressed_transfer_test + TestIndexerExtensions>( +pub async fn compressed_transfer_test< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, @@ -512,7 +530,10 @@ pub async fn compressed_transfer_test + TestInde } #[allow(clippy::too_many_arguments)] -pub async fn compressed_transfer_22_test + TestIndexerExtensions>( +pub async fn compressed_transfer_22_test< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, @@ -624,7 +645,9 @@ pub async fn compressed_transfer_22_test + TestI &input_merkle_tree_context, &output_compressed_accounts, &rpc_result.root_indices, - &Some(sdk_to_program_compressed_proof(rpc_result.proof.unwrap_or_default())), + &Some(sdk_to_program_compressed_proof( + rpc_result.proof.unwrap_or_default(), + )), &input_compressed_account_token_data .into_iter() .map(sdk_to_program_token_data) @@ -696,8 +719,11 @@ pub async fn compressed_transfer_22_test + TestI .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (created_change_output_account, created_token_output_accounts) = - test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); + let (created_change_output_account, created_token_output_accounts) = test_indexer + .add_event_and_compressed_accounts( + slot, + &program_to_sdk_public_transaction_event(event.clone()), + ); let delegates = if let Some(index) = delegate_change_account_index { let mut delegates = vec![None; created_token_output_accounts.len()]; delegates[index as usize] = Some(payer.pubkey()); @@ -788,7 +814,9 @@ pub async fn decompress_test + TestIndexerExtens .collect::>(), // input_compressed_account_merkle_tree_pubkeys &[change_out_compressed_account], // output_compressed_accounts &proof_rpc_result.root_indices, // root_indices - &Some(sdk_to_program_compressed_proof(proof_rpc_result.proof.unwrap_or_default())), + &Some(sdk_to_program_compressed_proof( + proof_rpc_result.proof.unwrap_or_default(), + )), input_compressed_accounts .iter() .map(|x| sdk_to_program_token_data(x.token_data.clone())) @@ -868,7 +896,10 @@ pub async fn decompress_test + TestIndexerExtens .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts( + slot, + &program_to_sdk_public_transaction_event(event.clone()), + ); assert_transfer( rpc, test_indexer, @@ -936,7 +967,10 @@ pub async fn decompress_test + TestIndexerExtens } #[allow(clippy::too_many_arguments)] -pub async fn perform_compress_spl_token_account + TestIndexerExtensions>( +pub async fn perform_compress_spl_token_account< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, payer: &Keypair, @@ -976,7 +1010,10 @@ pub async fn perform_compress_spl_token_account .unwrap(); // TODO: replace with get_transaction_slot() this only works with Program test let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); + test_indexer.add_event_and_compressed_accounts( + slot, + &program_to_sdk_public_transaction_event(event.clone()), + ); let created_compressed_token_account = test_indexer.get_compressed_token_accounts_by_owner(&token_owner.pubkey())[0].clone(); let expected_token_data = TokenData { @@ -1078,7 +1115,10 @@ pub async fn compress_test + TestIndexerExtensio .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts( + slot, + &program_to_sdk_public_transaction_event(event.clone()), + ); assert_transfer( rpc, @@ -1230,7 +1270,10 @@ pub async fn approve_test + TestIndexerExtension .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts( + slot, + &program_to_sdk_public_transaction_event(event.clone()), + ); let expected_delegated_token_data = TokenData { mint, @@ -1360,7 +1403,10 @@ pub async fn revoke_test + TestIndexerExtensions .unwrap() .unwrap(); let slot = rpc.get_slot().await.unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.clone())); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts( + slot, + &program_to_sdk_public_transaction_event(event.clone()), + ); let input_amount = input_compressed_accounts .iter() .map(|x| x.token_data.amount) @@ -1373,7 +1419,10 @@ pub async fn revoke_test + TestIndexerExtensions state: AccountState::Initialized, tlv: None, }; - assert_eq!(expected_token_data, sdk_to_program_token_data(created_output_accounts[0].token_data.clone())); + assert_eq!( + expected_token_data, + sdk_to_program_token_data(created_output_accounts[0].token_data.clone()) + ); let expected_compressed_output_accounts = create_expected_token_output_data(vec![expected_token_data], &output_merkle_tree_pubkeys); let sum_inputs = input_compressed_accounts @@ -1443,7 +1492,11 @@ pub async fn thaw_test + TestIndexerExtensions + TestIndexerExtensions>( +pub async fn freeze_or_thaw_test< + R: RpcConnection, + const FREEZE: bool, + I: Indexer + TestIndexerExtensions, +>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, @@ -1515,7 +1568,10 @@ pub async fn freeze_or_thaw_test + TestIndexerExtensions + TestIndexerExtensions>( +pub async fn create_burn_test_instruction< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, diff --git a/program-tests/utils/src/system_program.rs b/program-tests/utils/src/system_program.rs index 3e4c6e4c9..3bc763af9 100644 --- a/program-tests/utils/src/system_program.rs +++ b/program-tests/utils/src/system_program.rs @@ -1,8 +1,10 @@ use light_client::{ + indexer::Indexer, rpc::{errors::RpcError, RpcConnection}, transaction_params::TransactionParams, }; use light_hasher::Poseidon; +use light_program_test::indexer::TestIndexerExtensions; use light_system_program::{ sdk::{ address::derive_address_legacy, @@ -18,12 +20,16 @@ use solana_sdk::{ pubkey::Pubkey, signature::{Keypair, Signature, Signer}, }; -use light_client::indexer::Indexer; -use light_program_test::indexer::TestIndexerExtensions; -use crate::assert_compressed_tx::{ - assert_compressed_transaction, get_merkle_tree_snapshots, AssertCompressedTransactionInputs, + +use crate::{ + assert_compressed_tx::{ + assert_compressed_transaction, get_merkle_tree_snapshots, AssertCompressedTransactionInputs, + }, + conversions::{ + program_to_sdk_public_transaction_event, + sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof, + }, }; -use crate::conversions::{program_to_sdk_public_transaction_event, sdk_to_program_compressed_account_with_merkle_context, sdk_to_program_compressed_proof}; #[allow(clippy::too_many_arguments)] pub async fn create_addresses_test + TestIndexerExtensions>( @@ -208,7 +214,10 @@ pub async fn decompress_sol_test + TestIndexerEx } #[allow(clippy::too_many_arguments)] -pub async fn transfer_compressed_sol_test + TestIndexerExtensions>( +pub async fn transfer_compressed_sol_test< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, authority: &Keypair, @@ -293,7 +302,10 @@ pub struct CompressedTransactionTestInputs<'a, R: RpcConnection, I: Indexer> } #[allow(clippy::too_many_arguments)] -pub async fn compressed_transaction_test + TestIndexerExtensions>( +pub async fn compressed_transaction_test< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( inputs: CompressedTransactionTestInputs<'_, R, I>, ) -> Result { let mut compressed_account_hashes = Vec::new(); @@ -352,7 +364,9 @@ pub async fn compressed_transaction_test + TestI .await; root_indices = proof_rpc_res.root_indices; - proof = Some(sdk_to_program_compressed_proof(proof_rpc_res.proof.unwrap_or_default())); + proof = Some(sdk_to_program_compressed_proof( + proof_rpc_res.proof.unwrap_or_default(), + )); let input_merkle_tree_accounts = inputs .test_indexer .get_state_merkle_tree_accounts(state_input_merkle_trees.unwrap_or(vec![]).as_slice()); @@ -431,9 +445,11 @@ pub async fn compressed_transaction_test + TestI .unwrap(); let slot = inputs.rpc.get_transaction_slot(&event.1).await.unwrap(); - let (created_output_compressed_accounts, _) = inputs - .test_indexer - .add_event_and_compressed_accounts(slot, &program_to_sdk_public_transaction_event(event.0.clone())); + let (created_output_compressed_accounts, _) = + inputs.test_indexer.add_event_and_compressed_accounts( + slot, + &program_to_sdk_public_transaction_event(event.0.clone()), + ); let created_output_compressed_accounts = created_output_compressed_accounts .into_iter() diff --git a/program-tests/utils/src/test_forester.rs b/program-tests/utils/src/test_forester.rs index f32704456..900dee482 100644 --- a/program-tests/utils/src/test_forester.rs +++ b/program-tests/utils/src/test_forester.rs @@ -5,10 +5,11 @@ use account_compression::{ AddressMerkleTreeAccount, StateMerkleTreeAccount, ID, SAFETY_MARGIN, }; use anchor_lang::{system_program, InstructionData, ToAccountMetas}; -use forester_utils::{ - get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree, +use forester_utils::{get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree}; +use light_client::{ + indexer::{AddressMerkleTreeBundle, StateMerkleTreeBundle}, + rpc::{errors::RpcError, RpcConnection}, }; -use light_client::rpc::{errors::RpcError, RpcConnection}; use light_concurrent_merkle_tree::event::MerkleTreeEvent; use light_hasher::Poseidon; use light_indexed_merkle_tree::copy::IndexedMerkleTreeCopy; @@ -30,7 +31,6 @@ use solana_sdk::{ transaction::Transaction, }; use thiserror::Error; -use light_client::indexer::{AddressMerkleTreeBundle, StateMerkleTreeBundle}; // doesn't keep its own Merkle tree but gets it from the indexer // can also get all the state and Address Merkle trees from the indexer // the lightweight version is just a function diff --git a/sdk-libs/client/src/indexer/mod.rs b/sdk-libs/client/src/indexer/mod.rs index ebc1db0a4..9866167e8 100644 --- a/sdk-libs/client/src/indexer/mod.rs +++ b/sdk-libs/client/src/indexer/mod.rs @@ -1,4 +1,5 @@ use std::fmt::Debug; + use async_trait::async_trait; use light_concurrent_merkle_tree::light_hasher::Poseidon; use light_indexed_merkle_tree::{ @@ -6,14 +7,11 @@ use light_indexed_merkle_tree::{ reference::IndexedMerkleTree, }; use light_merkle_tree_reference::MerkleTree; -use light_sdk::{ - compressed_account::CompressedAccountWithMerkleContext, - proof::ProofRpcResult -}; +use light_sdk::proof::ProofRpcResult; use num_bigint::BigUint; use solana_sdk::pubkey::Pubkey; use thiserror::Error; -use light_sdk::token::TokenData; + use crate::rpc::RpcConnection; #[derive(Error, Debug)] @@ -37,7 +35,6 @@ pub struct ProofOfLeaf { #[async_trait] pub trait Indexer: Sync + Send + Debug + 'static { - /// Returns queue elements from the queue with the given pubkey. For input /// queues account compression program does not store queue elements in the /// account data but only emits these in the public transaction event. The @@ -50,10 +47,7 @@ pub trait Indexer: Sync + Send + Debug + 'static { end_offset: u64, ) -> Result, IndexerError>; - fn get_subtrees( - &self, - merkle_tree_pubkey: [u8; 32], - ) -> Result, IndexerError>; + fn get_subtrees(&self, merkle_tree_pubkey: [u8; 32]) -> Result, IndexerError>; // fn add_event_and_compressed_accounts( // &mut self, diff --git a/sdk-libs/program-test/src/indexer/extensions.rs b/sdk-libs/program-test/src/indexer/extensions.rs index a3be164fd..1d333f20f 100644 --- a/sdk-libs/program-test/src/indexer/extensions.rs +++ b/sdk-libs/program-test/src/indexer/extensions.rs @@ -1,11 +1,16 @@ -use async_trait::async_trait; use account_compression::initialize_address_merkle_tree::Pubkey; -use light_client::indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, NewAddressProofWithContext, ProofOfLeaf, StateMerkleTreeAccounts, StateMerkleTreeBundle}; -use light_client::rpc::RpcConnection; -use light_sdk::compressed_account::CompressedAccountWithMerkleContext; -use light_sdk::event::PublicTransactionEvent; -use light_sdk::proof::{BatchedTreeProofRpcResult}; -use light_sdk::token::TokenDataWithMerkleContext; +use async_trait::async_trait; +use light_client::{ + indexer::{ + AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, NewAddressProofWithContext, + ProofOfLeaf, StateMerkleTreeAccounts, StateMerkleTreeBundle, + }, + rpc::RpcConnection, +}; +use light_sdk::{ + compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent, + proof::BatchedTreeProofRpcResult, token::TokenDataWithMerkleContext, +}; use solana_sdk::signature::Keypair; #[async_trait] @@ -15,7 +20,11 @@ pub trait TestIndexerExtensions: Indexer { merkle_tree_pubkey: Pubkey, ) -> Option<&AddressMerkleTreeBundle>; - fn add_compressed_accounts_with_token_data(&mut self, slot: u64, event: &PublicTransactionEvent); + fn add_compressed_accounts_with_token_data( + &mut self, + slot: u64, + event: &PublicTransactionEvent, + ); fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str); @@ -112,4 +121,4 @@ pub trait TestIndexerExtensions: Indexer { rpc: &mut R, merkle_tree_pubkey: Pubkey, ); -} \ No newline at end of file +} diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index b80a14595..3e4704c04 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -1,17 +1,24 @@ -use crate::indexer::TestIndexerExtensions; -use crate::test_env::{create_state_merkle_tree_and_queue_account, EnvAccounts, BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR}; -use account_compression::{AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeAccount, StateMerkleTreeConfig}; +use std::{marker::PhantomData, time::Duration}; + +use account_compression::{ + AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, + StateMerkleTreeAccount, StateMerkleTreeConfig, +}; use async_trait::async_trait; +use borsh::BorshDeserialize; use forester_utils::{get_concurrent_merkle_tree, get_indexed_merkle_tree, AccountZeroCopy}; use light_batched_merkle_tree::{ + batch::BatchState, constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, - merkle_tree::{BatchedMerkleTreeAccount}, + initialize_address_tree::InitAddressTreeAccountsInstructionData, + initialize_state_tree::InitStateTreeAccountsInstructionData, + merkle_tree::BatchedMerkleTreeAccount, + queue::{BatchedQueueAccount, BatchedQueueMetadata}, }; -use light_client::indexer::{IndexerError, MerkleProof, NewAddressProofWithContext, ProofOfLeaf}; use light_client::{ indexer::{ - AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, - StateMerkleTreeBundle, + AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, IndexerError, MerkleProof, + NewAddressProofWithContext, ProofOfLeaf, StateMerkleTreeAccounts, StateMerkleTreeBundle, }, rpc::{merkle_tree::MerkleTreeExt, RpcConnection}, transaction_params::FeeConfig, @@ -19,26 +26,21 @@ use light_client::{ use light_hasher::{Hasher, Poseidon}; use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}; use light_merkle_tree_reference::MerkleTree; -use light_prover_client::inclusion_legacy::merkle_inclusion_proof_inputs::InclusionProofInputs as InclusionProofInputsLegacy; -use light_prover_client::{ - gnark::helpers::{big_int_to_string, spawn_prover, string_to_big_int, ProofType, ProverConfig}, - helpers::bigint_to_u8_32, -}; -use light_prover_client::{ - gnark::inclusion_json_formatter_legacy::BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy, - inclusion::merkle_inclusion_proof_inputs::InclusionProofInputs, -}; use light_prover_client::{ gnark::{ combined_json_formatter::CombinedJsonStruct, combined_json_formatter_legacy::CombinedJsonStruct as CombinedJsonStructLegacy, constants::{PROVE_PATH, SERVER_ADDRESS}, + helpers::{big_int_to_string, spawn_prover, string_to_big_int, ProofType, ProverConfig}, inclusion_json_formatter::BatchInclusionJsonStruct, + inclusion_json_formatter_legacy::BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy, non_inclusion_json_formatter::BatchNonInclusionJsonStruct, non_inclusion_json_formatter_legacy::BatchNonInclusionJsonStruct as BatchNonInclusionJsonStructLegacy, proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, }, - inclusion::merkle_inclusion_proof_inputs::InclusionMerkleProofInputs, + helpers::bigint_to_u8_32, + inclusion::merkle_inclusion_proof_inputs::{InclusionMerkleProofInputs, InclusionProofInputs}, + inclusion_legacy::merkle_inclusion_proof_inputs::InclusionProofInputs as InclusionProofInputsLegacy, non_inclusion::merkle_non_inclusion_proof_inputs::{ get_non_inclusion_proof_inputs, NonInclusionProofInputs, }, @@ -48,28 +50,33 @@ use light_sdk::{ compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent, merkle_context::MerkleContext, - proof::{CompressedProof, ProofRpcResult}, + proof::{BatchedTreeProofRpcResult, CompressedProof, ProofRpcResult}, token::{TokenData, TokenDataWithMerkleContext}, STATE_MERKLE_TREE_CANOPY_DEPTH, }; -use light_utils::bigint::bigint_to_be_bytes_array; -use light_utils::hashchain::{create_hash_chain_from_slice, create_tx_hash}; +use light_utils::{ + bigint::bigint_to_be_bytes_array, + hashchain::{create_hash_chain_from_slice, create_tx_hash}, +}; use log::{info, warn}; use num_bigint::{BigInt, BigUint}; use num_traits::FromBytes; use reqwest::Client; -use solana_sdk::bs58; -use solana_sdk::pubkey::Pubkey; -use solana_sdk::signature::{Keypair, Signer}; -use std::{marker::PhantomData, time::Duration}; -use borsh::BorshDeserialize; -use light_batched_merkle_tree::batch::BatchState; -use light_batched_merkle_tree::initialize_address_tree::InitAddressTreeAccountsInstructionData; -use light_batched_merkle_tree::initialize_state_tree::InitStateTreeAccountsInstructionData; -use light_batched_merkle_tree::queue::{BatchedQueueAccount, BatchedQueueMetadata}; -use light_sdk::proof::BatchedTreeProofRpcResult; -use crate::indexer::utils::create_address_merkle_tree_and_queue_account_with_assert; -use crate::test_batch_forester::{create_batch_address_merkle_tree, create_batched_state_merkle_tree}; +use solana_sdk::{ + bs58, + pubkey::Pubkey, + signature::{Keypair, Signer}, +}; + +use crate::{ + indexer::{ + utils::create_address_merkle_tree_and_queue_account_with_assert, TestIndexerExtensions, + }, + test_batch_forester::{create_batch_address_merkle_tree, create_batched_state_merkle_tree}, + test_env::{ + create_state_merkle_tree_and_queue_account, EnvAccounts, BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR, + }, +}; #[derive(Debug)] pub struct TestIndexer @@ -123,10 +130,7 @@ where Err(IndexerError::Custom("Merkle tree not found".to_string())) } - fn get_subtrees( - &self, - merkle_tree_pubkey: [u8; 32], - ) -> Result, IndexerError> { + fn get_subtrees(&self, merkle_tree_pubkey: [u8; 32]) -> Result, IndexerError> { let merkle_tree_pubkey = Pubkey::new_from_array(merkle_tree_pubkey); let address_tree_bundle = self .address_merkle_trees @@ -294,7 +298,6 @@ where // (compressed_accounts, token_compressed_accounts) // } - async fn create_proof_for_compressed_accounts( &mut self, compressed_accounts: Option>, @@ -305,7 +308,7 @@ where ) -> ProofRpcResult { if compressed_accounts.is_some() && ![1usize, 2usize, 3usize, 4usize, 8usize] - .contains(&compressed_accounts.as_ref().unwrap().len()) + .contains(&compressed_accounts.as_ref().unwrap().len()) { panic!( "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}", @@ -375,16 +378,16 @@ where &string_to_big_int( &inclusion_payload.as_ref().unwrap().public_input_hash, ) - .unwrap(), - ) .unwrap(), + ) + .unwrap(), bigint_to_u8_32( &string_to_big_int(&non_inclusion_payload.public_input_hash) .unwrap(), ) - .unwrap(), - ]) .unwrap(), + ]) + .unwrap(), ); println!( "inclusion public input hash offchain {:?}", @@ -392,9 +395,9 @@ where &string_to_big_int( &inclusion_payload.as_ref().unwrap().public_input_hash, ) - .unwrap(), + .unwrap(), ) - .unwrap() + .unwrap() ); println!( "non inclusion public input hash offchain {:?}", @@ -402,7 +405,7 @@ where &string_to_big_int(&non_inclusion_payload.public_input_hash) .unwrap() ) - .unwrap() + .unwrap() ); println!( @@ -418,7 +421,7 @@ where inclusion: inclusion_payload.unwrap().inputs, non_inclusion: non_inclusion_payload.inputs, } - .to_string() + .to_string() } else if let Some(non_inclusion_payload) = non_inclusion_payload_legacy { CombinedJsonStructLegacy { circuit_type: ProofType::Combined.to_string(), @@ -427,7 +430,7 @@ where inclusion: inclusion_payload_legacy.unwrap().inputs, non_inclusion: non_inclusion_payload.inputs, } - .to_string() + .to_string() } else { panic!("Unsupported tree height") }; @@ -552,15 +555,22 @@ where .await } - - fn get_proofs_by_indices(&mut self, merkle_tree_pubkey: Pubkey, indices: &[u64]) -> Vec { + fn get_proofs_by_indices( + &mut self, + merkle_tree_pubkey: Pubkey, + indices: &[u64], + ) -> Vec { indices .iter() .map(|&index| self.get_proof_by_index(merkle_tree_pubkey, index)) .collect() } - fn get_leaf_indices_tx_hashes(&mut self, merkle_tree_pubkey: Pubkey, zkp_batch_size: usize) -> Vec<(u32, [u8; 32], [u8; 32])> { + fn get_leaf_indices_tx_hashes( + &mut self, + merkle_tree_pubkey: Pubkey, + zkp_batch_size: usize, + ) -> Vec<(u32, [u8; 32], [u8; 32])> { let state_merkle_tree_bundle = self .state_merkle_trees .iter_mut() @@ -573,7 +583,6 @@ where fn get_address_merkle_trees(&self) -> &Vec { &self.address_merkle_trees } - } #[async_trait] @@ -581,7 +590,6 @@ impl TestIndexerExtensions for TestIndexer where R: RpcConnection + MerkleTreeExt, { - fn get_address_merkle_tree( &self, merkle_tree_pubkey: Pubkey, @@ -597,11 +605,14 @@ where /// adds the input_compressed_accounts to the nullified_compressed_accounts /// deserialiazes token data from the output_compressed_accounts /// adds the token_compressed_accounts to the token_compressed_accounts - fn add_compressed_accounts_with_token_data(&mut self, slot: u64, event: &PublicTransactionEvent) { + fn add_compressed_accounts_with_token_data( + &mut self, + slot: u64, + event: &PublicTransactionEvent, + ) { self.add_event_and_compressed_accounts(slot, event); } - fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) { let decoded_hash: [u8; 32] = bs58::decode(account_hash) .into_vec() @@ -693,7 +704,7 @@ where state_merkle_tree_pubkeys: Option>, new_addresses: Option<&[[u8; 32]]>, address_merkle_tree_pubkeys: Option>, - rpc: &mut R + rpc: &mut R, ) -> BatchedTreeProofRpcResult { let mut indices_to_remove = Vec::new(); @@ -718,7 +729,7 @@ where let queue_zero_copy = BatchedQueueAccount::output_queue_from_bytes_mut( queue.account.data.as_mut_slice(), ) - .unwrap(); + .unwrap(); for value_array in queue_zero_copy.value_vecs.iter() { let index = value_array.iter().position(|x| *x == *compressed_account); if index.is_some() { @@ -759,7 +770,7 @@ where address_merkle_tree_pubkeys, rpc, ) - .await, + .await, ) } else { None @@ -803,9 +814,9 @@ where address_merkle_tree_accounts, )); info!( - "Address merkle tree accounts added. Total: {}", - self.address_merkle_trees.len() - ); + "Address merkle tree accounts added. Total: {}", + self.address_merkle_trees.len() + ); address_merkle_tree_accounts } @@ -862,7 +873,7 @@ where } fn get_proof_by_index(&mut self, merkle_tree_pubkey: Pubkey, index: u64) -> ProofOfLeaf { - let mut bundle = self + let bundle = self .state_merkle_trees .iter_mut() .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) @@ -889,8 +900,13 @@ where ProofOfLeaf { leaf, proof } } - - async fn update_test_indexer_after_append(&mut self, rpc: &mut R, merkle_tree_pubkey: Pubkey, output_queue_pubkey: Pubkey, num_inserted_zkps: u64) { + async fn update_test_indexer_after_append( + &mut self, + rpc: &mut R, + merkle_tree_pubkey: Pubkey, + output_queue_pubkey: Pubkey, + num_inserted_zkps: u64, + ) { let state_merkle_tree_bundle = self .state_merkle_trees .iter_mut() @@ -903,7 +919,7 @@ where let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( merkle_tree_account.data.as_mut_slice(), ) - .unwrap(); + .unwrap(); ( merkle_tree.get_metadata().next_index as usize, *merkle_tree.root_history.last().unwrap(), @@ -916,7 +932,7 @@ where let output_queue = BatchedQueueAccount::output_queue_from_bytes_mut( output_queue_account.data.as_mut_slice(), ) - .unwrap(); + .unwrap(); let output_queue_account = output_queue.get_metadata(); let max_num_zkp_updates = output_queue_account.batch_metadata.get_num_zkp_batches(); @@ -962,7 +978,12 @@ where } } - async fn update_test_indexer_after_nullification(&mut self, rpc: &mut R, merkle_tree_pubkey: Pubkey, batch_index: usize) { + async fn update_test_indexer_after_nullification( + &mut self, + rpc: &mut R, + merkle_tree_pubkey: Pubkey, + batch_index: usize, + ) { let state_merkle_tree_bundle = self .state_merkle_trees .iter_mut() @@ -973,7 +994,7 @@ where let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( merkle_tree_account.data.as_mut_slice(), ) - .unwrap(); + .unwrap(); let batch = &merkle_tree.batches[batch_index]; if batch.get_state() == BatchState::Inserted || batch.get_state() == BatchState::Full { @@ -996,7 +1017,11 @@ where } } - async fn finalize_batched_address_tree_update(&mut self, rpc: &mut R, merkle_tree_pubkey: Pubkey) { + async fn finalize_batched_address_tree_update( + &mut self, + rpc: &mut R, + merkle_tree_pubkey: Pubkey, + ) { let mut account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); let onchain_account = BatchedMerkleTreeAccount::address_tree_from_bytes_mut(account.data.as_mut_slice()) @@ -1035,7 +1060,6 @@ impl TestIndexer where R: RpcConnection + MerkleTreeExt, { - pub async fn init_from_env( payer: &Keypair, env: &EnvAccounts, @@ -1068,7 +1092,7 @@ where env.group_pda, prover_config, ) - .await + .await } pub async fn new( @@ -1135,7 +1159,6 @@ where } } - pub fn add_address_merkle_tree_bundle( address_merkle_tree_accounts: AddressMerkleTreeAccounts, // TODO: add config here @@ -1179,8 +1202,8 @@ where &AddressQueueConfig::default(), 0, ) - .await - .unwrap(); + .await + .unwrap(); self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) } @@ -1228,7 +1251,7 @@ where queue_keypair, owning_program_id, ) - .await + .await } else if version == 2 { self.add_address_merkle_tree_v2( rpc, @@ -1236,7 +1259,7 @@ where queue_keypair, owning_program_id, ) - .await + .await } else { panic!( "add_address_merkle_tree: Version not supported, {}. Versions: 1, 2", @@ -1245,7 +1268,6 @@ where } } - #[allow(clippy::too_many_arguments)] pub async fn add_state_merkle_tree( &mut self, @@ -1382,8 +1404,7 @@ where get_concurrent_merkle_tree::( rpc, pubkey, ) - .await - ; + .await; ( fetched_merkle_tree.root_index() as u32, fetched_merkle_tree.root(), @@ -1393,7 +1414,7 @@ where let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( merkle_tree_account.data.as_mut_slice(), ) - .unwrap(); + .unwrap(); ( merkle_tree.get_root_index(), merkle_tree.get_root().unwrap(), @@ -1415,7 +1436,8 @@ where )), None, ) - } else if height == account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize { + } else if height == account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize + { let inclusion_proof_inputs = InclusionProofInputsLegacy(inclusion_proofs.as_slice()); ( None, @@ -1481,7 +1503,7 @@ where let account = BatchedMerkleTreeAccount::address_tree_from_bytes_mut( account.data.as_mut_slice(), ) - .unwrap(); + .unwrap(); address_root_indices.push(account.get_root_index() as u16); } else { panic!( @@ -1489,13 +1511,17 @@ where ); } } else { - let fetched_address_merkle_tree = - get_indexed_merkle_tree::( - rpc, - address_merkle_tree_pubkeys[i], - ) - .await - ; + let fetched_address_merkle_tree = get_indexed_merkle_tree::< + AddressMerkleTreeAccount, + R, + Poseidon, + usize, + 26, + 16, + >( + rpc, address_merkle_tree_pubkeys[i] + ) + .await; address_root_indices.push(fetched_address_merkle_tree.root_index() as u16); } } @@ -1550,7 +1576,6 @@ where self.add_event_and_compressed_accounts(slot, &event); } - /// returns the compressed sol balance of the owner pubkey pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 { self.compressed_accounts @@ -1587,7 +1612,7 @@ where &event.output_compressed_account_hashes, slot, ) - .unwrap(); + .unwrap(); println!("tx_hash {:?}", tx_hash); println!("slot {:?}", slot); let hash = event.input_compressed_account_hashes[i]; @@ -1666,7 +1691,7 @@ where let merkle_tree = self.state_merkle_trees.iter().find(|x| { x.accounts.merkle_tree == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] + [event.output_compressed_accounts[i].merkle_tree_index as usize] }); // Check for output queue let merkle_tree = if let Some(merkle_tree) = merkle_tree { @@ -1677,7 +1702,7 @@ where .find(|x| { x.accounts.nullifier_queue == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] + [event.output_compressed_accounts[i].merkle_tree_index as usize] }) .unwrap() }; @@ -1761,7 +1786,7 @@ where .find(|x| { x.accounts.merkle_tree == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] + [event.output_compressed_accounts[i].merkle_tree_index as usize] }) .unwrap(); merkle_tree @@ -1785,7 +1810,7 @@ where .find(|x| { x.accounts.nullifier_queue == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] + [event.output_compressed_accounts[i].merkle_tree_index as usize] }) .unwrap(); diff --git a/sdk-libs/program-test/src/indexer/utils.rs b/sdk-libs/program-test/src/indexer/utils.rs index 68a64090f..35c54c385 100644 --- a/sdk-libs/program-test/src/indexer/utils.rs +++ b/sdk-libs/program-test/src/indexer/utils.rs @@ -1,17 +1,21 @@ use std::cmp; -use anchor_lang::solana_program::instruction::InstructionError; -use solana_sdk::signature::{Keypair, Signature, Signer}; -use solana_sdk::transaction; -use account_compression::{AddressMerkleTreeConfig, AddressQueueConfig, QueueAccount, RegisteredProgram}; -use account_compression::initialize_address_merkle_tree::Pubkey; + +use account_compression::{ + initialize_address_merkle_tree::Pubkey, AddressMerkleTreeConfig, AddressQueueConfig, + QueueAccount, RegisteredProgram, +}; use forester_utils::{get_hash_set, get_indexed_merkle_tree, AccountZeroCopy}; use light_client::rpc::{RpcConnection, RpcError}; use light_hasher::Poseidon; -use light_merkle_tree_metadata::access::AccessMetadata; -use light_merkle_tree_metadata::queue::{QueueMetadata, QueueType}; -use light_merkle_tree_metadata::rollover::RolloverMetadata; +use light_merkle_tree_metadata::{ + access::AccessMetadata, + queue::{QueueMetadata, QueueType}, + rollover::RolloverMetadata, +}; use light_registry::account_compression_cpi::sdk::get_registered_program_pda; use light_utils::fee::compute_rollover_fee; +use solana_sdk::signature::{Keypair, Signature, Signer}; + use crate::test_env::create_address_merkle_tree_and_queue_account; #[allow(clippy::too_many_arguments)] @@ -40,7 +44,7 @@ pub async fn create_address_merkle_tree_and_queue_account_with_assert, - error_code: u32, -) -> Result<(), RpcError> { - let accepted_errors = [ - (0, InstructionError::ProgramFailedToComplete), - (0, InstructionError::Custom(error_code)), - ]; - - let is_accepted = accepted_errors.iter().any(|(index, error)| { - matches!(result, Err(RpcError::TransactionError(transaction::TransactionError::InstructionError(i, ref e))) if i == (*index as u8) && e == error) - }); - - if !is_accepted { - println!("result {:?}", result); - println!("error_code {:?}", error_code); - return Err(RpcError::AssertRpcError(format!( - "Expected error code {} or program error, got {:?}", - error_code, result - ))); - } - - Ok(()) -} - +// /// Asserts that the given `BanksTransactionResultWithMetadata` is an error with a custom error code +// /// or a program error. +// /// Unfortunately BanksTransactionResultWithMetadata does not reliably expose the custom error code, so +// /// we allow program error as well. +// // TODO: unify with assert_rpc_error +// pub fn assert_custom_error_or_program_error( +// result: Result, +// error_code: u32, +// ) -> Result<(), RpcError> { +// let accepted_errors = [ +// (0, InstructionError::ProgramFailedToComplete), +// (0, InstructionError::Custom(error_code)), +// ]; +// +// let is_accepted = accepted_errors.iter().any(|(index, error)| { +// matches!(result, Err(RpcError::TransactionError(transaction::TransactionError::InstructionError(i, ref e))) if i == (*index as u8) && e == error) +// }); +// +// if !is_accepted { +// println!("result {:?}", result); +// println!("error_code {:?}", error_code); +// return Err(RpcError::AssertRpcError(format!( +// "Expected error code {} or program error, got {:?}", +// error_code, result +// ))); +// } +// +// Ok(()) +// } #[allow(clippy::too_many_arguments)] pub async fn assert_address_merkle_tree_initialized( @@ -169,7 +172,7 @@ pub async fn assert_address_merkle_tree_initialized( rpc, *merkle_tree_pubkey, ) - .await; + .await; let merkle_tree_account = merkle_tree.deserialized(); assert_eq!( @@ -232,7 +235,7 @@ pub async fn assert_address_merkle_tree_initialized( 26, 16, >(rpc, *merkle_tree_pubkey) - .await; + .await; assert_eq!(merkle_tree.height, merkle_tree_config.height as usize); assert_eq!( @@ -273,7 +276,6 @@ pub async fn assert_address_merkle_tree_initialized( ); } - #[allow(clippy::too_many_arguments)] pub async fn assert_address_queue_initialized( rpc: &mut R, @@ -301,10 +303,9 @@ pub async fn assert_address_queue_initialized( None, payer_pubkey, ) - .await; + .await; } - #[allow(clippy::too_many_arguments)] pub async fn assert_address_queue( rpc: &mut R, @@ -337,10 +338,10 @@ pub async fn assert_address_queue( Some(threshold) => { compute_rollover_fee(threshold, associated_tree_config.height, balance_queue).unwrap() + compute_rollover_fee( - threshold, - associated_tree_config.height, - balance_merkle_tree, - ) + threshold, + associated_tree_config.height, + balance_merkle_tree, + ) .unwrap() } None => 0, @@ -360,7 +361,7 @@ pub async fn assert_address_queue( expected_next_queue, payer_pubkey, ) - .await; + .await; } #[allow(clippy::too_many_arguments)] pub async fn assert_queue( @@ -413,4 +414,3 @@ pub async fn assert_queue( queue_config.sequence_threshold as usize ); } - diff --git a/sdk-libs/program-test/src/lib.rs b/sdk-libs/program-test/src/lib.rs index 98029f4b8..5ec9f5724 100644 --- a/sdk-libs/program-test/src/lib.rs +++ b/sdk-libs/program-test/src/lib.rs @@ -1,6 +1,6 @@ pub mod env_accounts; +pub mod indexer; pub mod test_batch_forester; pub mod test_env; pub mod test_indexer; pub mod test_rpc; -pub mod indexer; diff --git a/sdk-libs/program-test/src/test_batch_forester.rs b/sdk-libs/program-test/src/test_batch_forester.rs index 7d70e92e1..bb08cce3b 100644 --- a/sdk-libs/program-test/src/test_batch_forester.rs +++ b/sdk-libs/program-test/src/test_batch_forester.rs @@ -1,9 +1,6 @@ use anchor_lang::AnchorDeserialize; use borsh::BorshSerialize; -use forester_utils::{ - create_account_instruction, - AccountZeroCopy, -}; +use forester_utils::{create_account_instruction, AccountZeroCopy}; use light_batched_merkle_tree::{ constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, event::{BatchAppendEvent, BatchNullifyEvent}, diff --git a/sdk-libs/program-test/src/test_indexer.rs b/sdk-libs/program-test/src/test_indexer.rs index 0932d1343..d28ffdd50 100644 --- a/sdk-libs/program-test/src/test_indexer.rs +++ b/sdk-libs/program-test/src/test_indexer.rs @@ -1,5 +1,5 @@ // use std::{marker::PhantomData, time::Duration}; -// +// // use account_compression::StateMerkleTreeAccount; // use anchor_lang::Discriminator; // use borsh::BorshDeserialize; @@ -58,7 +58,7 @@ // use num_traits::FromBytes; // use reqwest::Client; // use solana_sdk::pubkey::Pubkey; -// +// // #[derive(Debug)] // pub struct TestIndexer // where @@ -73,7 +73,7 @@ // pub events: Vec, // _rpc: PhantomData, // } -// +// // impl Indexer for TestIndexer // where // R: RpcConnection + MerkleTreeExt, @@ -121,7 +121,7 @@ // self.token_compressed_accounts.remove(index); // } // } -// +// // let mut compressed_accounts = Vec::new(); // let mut token_compressed_accounts = Vec::new(); // for (i, compressed_account) in event.output_compressed_accounts.iter().enumerate() { @@ -220,11 +220,11 @@ // ) // .expect("insert failed"); // } -// +// // self.events.push(event.clone()); // (compressed_accounts, token_compressed_accounts) // } -// +// // async fn create_proof_for_compressed_accounts( // &mut self, // compressed_accounts: Option<&[[u8; 32]]>, @@ -252,7 +252,7 @@ // let (payload, payload_legacy, indices) = self // .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) // .await; -// +// // if let Some(payload) = payload { // (indices, Vec::new(), payload.to_string()) // } else { @@ -278,7 +278,7 @@ // let (inclusion_payload, inclusion_payload_legacy, inclusion_indices) = self // .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) // .await; -// +// // let ( // non_inclusion_payload, // non_inclusion_payload_legacy, @@ -309,7 +309,7 @@ // ]) // .unwrap(), // ); -// +// // CombinedJsonStruct { // circuit_type: ProofType::Combined.to_string(), // state_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, @@ -337,7 +337,7 @@ // panic!("At least one of compressed_accounts or new_addresses must be provided") // } // }; -// +// // let mut retries = 3; // while retries > 0 { // let response_result = client @@ -370,7 +370,7 @@ // } // panic!("Failed to get proof from server"); // } -// +// // /// Returns compressed accounts owned by the given `owner`. // fn get_compressed_accounts_by_owner( // &self, @@ -383,7 +383,7 @@ // .collect() // } // } -// +// // impl TestIndexer // where // R: RpcConnection + MerkleTreeExt, @@ -408,28 +408,28 @@ // } // }) // .collect::>(); -// +// // let address_merkle_trees = address_merkle_tree_accounts // .iter() // .map(|accounts| Self::add_address_merkle_tree_bundle(accounts)) // .collect::>(); -// +// // let mut prover_config = ProverConfig { // circuits: vec![], // run_mode: None, // }; -// +// // if inclusion { // prover_config.circuits.push(ProofType::Inclusion); // } // if non_inclusion { // prover_config.circuits.push(ProofType::NonInclusion); // } -// +// // spawn_prover(true, prover_config).await; -// +// // health_check(20, 1).await; -// +// // Self { // state_merkle_trees, // address_merkle_trees, @@ -441,7 +441,7 @@ // _rpc: PhantomData, // } // } -// +// // pub fn add_address_merkle_tree_bundle( // accounts: &AddressMerkleTreeAccounts, // // TODO: add config here @@ -463,7 +463,7 @@ // rollover_fee: FeeConfig::default().address_queue_rollover, // } // } -// +// // async fn process_inclusion_proofs( // &self, // merkle_tree_pubkeys: &[Pubkey], @@ -477,7 +477,7 @@ // let mut inclusion_proofs = Vec::new(); // let mut root_indices = Vec::new(); // let mut height = 0; -// +// // for (i, account) in accounts.iter().enumerate() { // let bundle = &self // .state_merkle_trees @@ -494,7 +494,7 @@ // .await // .unwrap() // .unwrap(); -// +// // let discriminator = merkle_tree_account.data[0..8].try_into().unwrap(); // let version = match discriminator { // StateMerkleTreeAccount::DISCRIMINATOR => 1, @@ -549,10 +549,10 @@ // ) // }; // assert_eq!(merkle_tree.root(), root, "Merkle tree root mismatch"); -// +// // root_indices.push(root_index as u16); // } -// +// // let (batch_inclusion_proof_inputs, legacy) = if height // == DEFAULT_BATCH_STATE_TREE_HEIGHT as usize // { @@ -575,10 +575,10 @@ // } else { // panic!("Unsupported tree height") // }; -// +// // (batch_inclusion_proof_inputs, legacy, root_indices) // } -// +// // async fn process_non_inclusion_proofs( // &self, // address_merkle_tree_pubkeys: &[Pubkey], @@ -618,7 +618,7 @@ // tree_heights // ); // } -// +// // let (batch_non_inclusion_proof_inputs, batch_non_inclusion_proof_inputs_legacy) = // if tree_heights[0] == 26 { // let non_inclusion_proof_inputs = @@ -651,7 +651,7 @@ // address_root_indices, // ) // } -// +// // /// deserializes an event // /// adds the output_compressed_accounts to the compressed_accounts // /// removes the input_compressed_accounts from the compressed_accounts diff --git a/sdk-libs/sdk/src/lib.rs b/sdk-libs/sdk/src/lib.rs index 5d064b210..cf0fab912 100644 --- a/sdk-libs/sdk/src/lib.rs +++ b/sdk-libs/sdk/src/lib.rs @@ -21,4 +21,4 @@ pub mod token; pub mod traits; pub mod transfer; pub mod utils; -pub mod verify; \ No newline at end of file +pub mod verify; diff --git a/sdk-libs/sdk/src/proof.rs b/sdk-libs/sdk/src/proof.rs index 9f579faf9..c31c64b90 100644 --- a/sdk-libs/sdk/src/proof.rs +++ b/sdk-libs/sdk/src/proof.rs @@ -58,4 +58,4 @@ pub struct BatchedTreeProofRpcResult { // If none -> proof by index, else included in zkp pub root_indices: Vec>, pub address_root_indices: Vec, -} \ No newline at end of file +} From 1fdf0dde269dbdd0f95dacfda474945cb2192928 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 15:23:48 +0000 Subject: [PATCH 15/27] refactor examples/* --- Cargo.lock | 1 + .../name-service-without-macros/tests/test.rs | 6 +- .../programs/name-service/tests/test.rs | 6 +- .../programs/token-escrow/Cargo.toml | 1 + .../programs/token-escrow/tests/test.rs | 74 ++++++----- .../token-escrow/tests/test_compressed_pda.rs | 121 +++++++++--------- 6 files changed, 115 insertions(+), 94 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e843619d1..f85893d61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7248,6 +7248,7 @@ dependencies = [ "account-compression", "anchor-lang", "anchor-spl", + "light-client", "light-compressed-token", "light-hasher", "light-program-test", diff --git a/examples/name-service/programs/name-service-without-macros/tests/test.rs b/examples/name-service/programs/name-service-without-macros/tests/test.rs index 3d01c2902..f98d5f20d 100644 --- a/examples/name-service/programs/name-service-without-macros/tests/test.rs +++ b/examples/name-service/programs/name-service-without-macros/tests/test.rs @@ -348,7 +348,8 @@ where let event = rpc .create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None) .await?; - test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); Ok(()) } @@ -421,7 +422,8 @@ where let event = rpc .create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None) .await?; - test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); Ok(()) } diff --git a/examples/name-service/programs/name-service/tests/test.rs b/examples/name-service/programs/name-service/tests/test.rs index 17a75eefa..35dd036a5 100644 --- a/examples/name-service/programs/name-service/tests/test.rs +++ b/examples/name-service/programs/name-service/tests/test.rs @@ -356,7 +356,8 @@ where let event = rpc .create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None) .await?; - test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); Ok(()) } @@ -435,7 +436,8 @@ where let event = rpc .create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None) .await?; - test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); Ok(()) } diff --git a/examples/token-escrow/programs/token-escrow/Cargo.toml b/examples/token-escrow/programs/token-escrow/Cargo.toml index 9d0ca7213..3243b78bb 100644 --- a/examples/token-escrow/programs/token-escrow/Cargo.toml +++ b/examples/token-escrow/programs/token-escrow/Cargo.toml @@ -27,6 +27,7 @@ account-compression = { workspace = true } light-hasher = { workspace = true } light-verifier = { workspace = true } light-sdk = { workspace = true, features = ["legacy"] } +light-client = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dependencies] solana-sdk = { workspace = true } diff --git a/examples/token-escrow/programs/token-escrow/tests/test.rs b/examples/token-escrow/programs/token-escrow/tests/test.rs index bd0b92504..b6caecc03 100644 --- a/examples/token-escrow/programs/token-escrow/tests/test.rs +++ b/examples/token-escrow/programs/token-escrow/tests/test.rs @@ -16,15 +16,17 @@ use light_prover_client::gnark::helpers::{ProofType, ProverConfig}; use light_system_program::sdk::{compressed_account::MerkleContext, event::PublicTransactionEvent}; use light_test_utils::{ airdrop_lamports, assert_rpc_error, - indexer::TestIndexer, spl::{create_mint_helper, mint_tokens_helper}, - FeeConfig, Indexer, RpcConnection, RpcError, TransactionParams, + FeeConfig, RpcConnection, RpcError, TransactionParams, }; use light_verifier::VerifierError; use solana_sdk::{ instruction::Instruction, pubkey::Pubkey, signature::Keypair, signer::Signer, transaction::Transaction, }; +use light_client::indexer::Indexer; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; +use light_test_utils::conversions::{program_to_sdk_public_transaction_event, sdk_to_program_compressed_account, sdk_to_program_compressed_proof, sdk_to_program_token_data}; use token_escrow::{ escrow_with_compressed_pda::sdk::get_token_owner_pda, escrow_with_pda::sdk::{ @@ -205,16 +207,16 @@ async fn test_escrow_pda() { ); } -pub async fn perform_escrow( +pub async fn perform_escrow + TestIndexerExtensions>( rpc: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, env: &EnvAccounts, payer: &Keypair, escrow_amount: &u64, lock_up_time: &u64, ) -> Instruction { let input_compressed_token_account_data = test_indexer - .token_compressed_accounts + .get_token_compressed_accounts() .iter() .find(|x| { println!("searching token account: {:?}", x.token_data); @@ -253,7 +255,9 @@ pub async fn perform_escrow( .await; let create_ix_inputs = CreateEscrowInstructionInputs { - input_token_data: &[input_compressed_token_account_data.token_data.clone()], + input_token_data: &[sdk_to_program_token_data( + input_compressed_token_account_data.token_data.clone(), + )], lock_up_time: *lock_up_time, signer: &payer_pubkey, input_merkle_context: &[MerkleContext { @@ -270,16 +274,18 @@ pub async fn perform_escrow( ], output_compressed_accounts: &Vec::new(), root_indices: &rpc_result.root_indices, - proof: &Some(rpc_result.proof), + proof: &Some(sdk_to_program_compressed_proof(rpc_result.proof)), mint: &input_compressed_token_account_data.token_data.mint, - input_compressed_accounts: &[compressed_input_account_with_context.compressed_account], + input_compressed_accounts: &[sdk_to_program_compressed_account( + compressed_input_account_with_context.compressed_account, + )], }; create_escrow_instruction(create_ix_inputs, *escrow_amount) } -pub async fn perform_escrow_with_event( +pub async fn perform_escrow_with_event + TestIndexerExtensions>( rpc: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, env: &EnvAccounts, payer: &Keypair, escrow_amount: &u64, @@ -307,13 +313,13 @@ pub async fn perform_escrow_with_event( .await? .unwrap(); let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_compressed_accounts_with_token_data(slot, &event.0); + test_indexer.add_compressed_accounts_with_token_data(slot, &program_to_sdk_public_transaction_event(event.0)); Ok(()) } -pub async fn perform_escrow_failing( +pub async fn perform_escrow_failing + TestIndexerExtensions>( rpc: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, env: &EnvAccounts, payer: &Keypair, escrow_amount: &u64, @@ -330,9 +336,9 @@ pub async fn perform_escrow_failing( rpc.process_transaction(transaction).await } -pub async fn assert_escrow( +pub async fn assert_escrow + TestIndexerExtensions> ( rpc: &mut R, - test_indexer: &TestIndexer, + test_indexer: &I, payer_pubkey: &Pubkey, amount: u64, escrow_amount: u64, @@ -340,7 +346,7 @@ pub async fn assert_escrow( ) { let token_owner_pda = get_token_owner_pda(payer_pubkey).0; let token_data_escrow = test_indexer - .token_compressed_accounts + .get_token_compressed_accounts() .iter() .find(|x| x.token_data.owner == token_owner_pda) .unwrap() @@ -350,7 +356,7 @@ pub async fn assert_escrow( assert_eq!(token_data_escrow.owner, token_owner_pda); let token_data_change_compressed_token_account = - test_indexer.token_compressed_accounts[0].token_data.clone(); + test_indexer.get_token_compressed_accounts()[0].token_data.clone(); assert_eq!( token_data_change_compressed_token_account.amount, amount - escrow_amount @@ -369,9 +375,9 @@ pub async fn assert_escrow( assert_eq!(timelock_account.slot, *lock_up_time + current_slot); } -pub async fn perform_withdrawal( +pub async fn perform_withdrawal + TestIndexerExtensions>( context: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, env: &EnvAccounts, payer: &Keypair, withdrawal_amount: &u64, @@ -380,7 +386,7 @@ pub async fn perform_withdrawal( let payer_pubkey = payer.pubkey(); let token_owner_pda = get_token_owner_pda(&invalid_signer.unwrap_or(payer_pubkey)).0; let escrow_token_data_with_context = test_indexer - .token_compressed_accounts + .get_token_compressed_accounts() .iter() .find(|x| { x.token_data.owner == token_owner_pda && x.token_data.amount >= *withdrawal_amount @@ -414,7 +420,9 @@ pub async fn perform_withdrawal( .await; let create_ix_inputs = CreateEscrowInstructionInputs { - input_token_data: &[escrow_token_data_with_context.token_data.clone()], + input_token_data: &[sdk_to_program_token_data( + escrow_token_data_with_context.token_data.clone(), + )], lock_up_time: 0, signer: &payer_pubkey, input_merkle_context: &[MerkleContext { @@ -431,17 +439,19 @@ pub async fn perform_withdrawal( ], output_compressed_accounts: &Vec::new(), root_indices: &rpc_result.root_indices, - proof: &Some(rpc_result.proof), + proof: &Some(sdk_to_program_compressed_proof(rpc_result.proof)), mint: &escrow_token_data_with_context.token_data.mint, - input_compressed_accounts: &[compressed_input_account_with_context.compressed_account], + input_compressed_accounts: &[sdk_to_program_compressed_account( + compressed_input_account_with_context.compressed_account, + )], }; create_withdrawal_escrow_instruction(create_ix_inputs, *withdrawal_amount) } -pub async fn perform_withdrawal_with_event( +pub async fn perform_withdrawal_with_event + TestIndexerExtensions>( rpc: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, env: &EnvAccounts, payer: &Keypair, withdrawal_amount: &u64, @@ -466,13 +476,13 @@ pub async fn perform_withdrawal_with_event( .await? .unwrap(); let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_compressed_accounts_with_token_data(slot, &event.0); + test_indexer.add_compressed_accounts_with_token_data(slot, &program_to_sdk_public_transaction_event(event.0)); Ok(()) } -pub async fn perform_withdrawal_failing( +pub async fn perform_withdrawal_failing + TestIndexerExtensions>( rpc: &mut R, - test_indexer: &mut TestIndexer, + test_indexer: &mut I, env: &EnvAccounts, payer: &Keypair, withdrawal_amount: &u64, @@ -495,15 +505,15 @@ pub async fn perform_withdrawal_failing( ); rpc.process_transaction(transaction).await } -pub fn assert_withdrawal( - test_indexer: &TestIndexer, +pub fn assert_withdrawal + TestIndexerExtensions>( + test_indexer: &I, payer_pubkey: &Pubkey, withdrawal_amount: u64, escrow_amount: u64, ) { let token_owner_pda = get_token_owner_pda(payer_pubkey).0; let token_data_withdrawal = test_indexer - .token_compressed_accounts + .get_token_compressed_accounts() .iter() .any(|x| x.token_data.owner == *payer_pubkey && x.token_data.amount == withdrawal_amount); @@ -512,7 +522,7 @@ pub fn assert_withdrawal( "Withdrawal compressed account doesn't exist or has incorrect amount {} expected amount", withdrawal_amount ); - let token_data_escrow_change = test_indexer.token_compressed_accounts.iter().any(|x| { + let token_data_escrow_change = test_indexer.get_token_compressed_accounts().iter().any(|x| { x.token_data.owner == token_owner_pda && x.token_data.amount == escrow_amount - withdrawal_amount }); diff --git a/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs b/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs index f1f654fbd..49b30267c 100644 --- a/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs +++ b/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs @@ -15,33 +15,29 @@ use anchor_lang::AnchorDeserialize; use light_hasher::{Hasher, Poseidon}; +use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; use light_program_test::test_env::{setup_test_programs_with_accounts, EnvAccounts}; use light_prover_client::gnark::helpers::{ProverConfig, ProverMode}; -use light_system_program::{ - sdk::{ - address::derive_address_legacy, compressed_account::MerkleContext, - event::PublicTransactionEvent, - }, - NewAddressParams, +use light_system_program::sdk::address::{derive_address, derive_address_legacy}; +use light_system_program::sdk::compressed_account::MerkleContext; +use light_system_program::sdk::event::PublicTransactionEvent; +use light_system_program::NewAddressParams; +use light_test_utils::conversions::{ + program_to_sdk_public_transaction_event, sdk_to_program_compressed_account, + sdk_to_program_compressed_proof, sdk_to_program_token_data, }; -use light_test_utils::{ - indexer::TestIndexer, - spl::{create_mint_helper, mint_tokens_helper}, - FeeConfig, Indexer, RpcConnection, RpcError, TransactionParams, -}; -use solana_sdk::{ - instruction::{Instruction, InstructionError}, - signature::Keypair, - signer::Signer, - transaction::Transaction, -}; -use token_escrow::{ - escrow_with_compressed_pda::sdk::{ - create_escrow_instruction, create_withdrawal_instruction, get_token_owner_pda, - CreateCompressedPdaEscrowInstructionInputs, CreateCompressedPdaWithdrawalInstructionInputs, - }, - EscrowError, EscrowTimeLock, +use light_test_utils::spl::{create_mint_helper, mint_tokens_helper}; +use light_test_utils::{FeeConfig, RpcConnection, RpcError, TransactionParams}; +use solana_sdk::instruction::{Instruction, InstructionError}; +use solana_sdk::signature::Keypair; +use solana_sdk::{signer::Signer, transaction::Transaction}; +use light_client::indexer::Indexer; +use light_client::rpc::merkle_tree::MerkleTreeExt; +use token_escrow::escrow_with_compressed_pda::sdk::{ + create_escrow_instruction, create_withdrawal_instruction, get_token_owner_pda, + CreateCompressedPdaEscrowInstructionInputs, CreateCompressedPdaWithdrawalInstructionInputs, }; +use token_escrow::{EscrowError, EscrowTimeLock}; #[tokio::test] async fn test_escrow_with_compressed_pda() { @@ -49,7 +45,7 @@ async fn test_escrow_with_compressed_pda() { String::from("token_escrow"), token_escrow::ID, )])) - .await; + .await; let payer = rpc.get_payer().insecure_clone(); let test_indexer = TestIndexer::init_from_env( @@ -62,7 +58,7 @@ async fn test_escrow_with_compressed_pda() { ); let mint = create_mint_helper(&mut rpc, &payer).await; let mut test_indexer = test_indexer.await; - test_indexer.state_merkle_trees.remove(1); + let amount = 10000u64; mint_tokens_helper( &mut rpc, @@ -73,7 +69,7 @@ async fn test_escrow_with_compressed_pda() { vec![amount], vec![payer.pubkey()], ) - .await; + .await; let seed = [1u8; 32]; let escrow_amount = 100u64; @@ -88,8 +84,8 @@ async fn test_escrow_with_compressed_pda() { escrow_amount, seed, ) - .await - .unwrap(); + .await + .unwrap(); let current_slot = rpc.get_slot().await.unwrap(); let lockup_end = lock_up_time + current_slot; @@ -102,7 +98,7 @@ async fn test_escrow_with_compressed_pda() { &seed, &lockup_end, ) - .await; + .await; println!("withdrawal _----------------------------------------------------------------"); let withdrawal_amount = escrow_amount; @@ -116,7 +112,7 @@ async fn test_escrow_with_compressed_pda() { new_lock_up_time, withdrawal_amount, ) - .await; + .await; let instruction_error = InstructionError::Custom(EscrowError::EscrowLocked.into()); let transaction_error = @@ -134,8 +130,8 @@ async fn test_escrow_with_compressed_pda() { new_lock_up_time, withdrawal_amount, ) - .await - .unwrap(); + .await + .unwrap(); assert_withdrawal( &mut rpc, @@ -147,10 +143,10 @@ async fn test_escrow_with_compressed_pda() { &seed, new_lock_up_time, ) - .await; + .await; } -pub async fn perform_escrow_failing( +pub async fn perform_escrow_failing( test_indexer: &mut TestIndexer, rpc: &mut R, env: &EnvAccounts, @@ -168,7 +164,7 @@ pub async fn perform_escrow_failing( lock_up_time, escrow_amount, ) - .await; + .await; let latest_blockhash = rpc.get_latest_blockhash().await.unwrap(); let transaction = Transaction::new_signed_with_payer( &[instruction], @@ -180,7 +176,7 @@ pub async fn perform_escrow_failing( rpc.process_transaction(transaction).await } -pub async fn perform_escrow_with_event( +pub async fn perform_escrow_with_event( test_indexer: &mut TestIndexer, rpc: &mut R, env: &EnvAccounts, @@ -198,7 +194,7 @@ pub async fn perform_escrow_with_event( lock_up_time, escrow_amount, ) - .await; + .await; let event = rpc .create_and_send_transaction_with_event::( &[instruction], @@ -214,11 +210,13 @@ pub async fn perform_escrow_with_event( ) .await?; let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); + test_indexer.add_compressed_accounts_with_token_data(slot, &program_to_sdk_public_transaction_event( + event.unwrap().0, + )); Ok(()) } -async fn create_escrow_ix( +async fn create_escrow_ix( payer: &Keypair, test_indexer: &mut TestIndexer, env: &EnvAccounts, @@ -248,11 +246,9 @@ async fn create_escrow_ix( let rpc_result = test_indexer .create_proof_for_compressed_accounts( Some(vec![input_compressed_account_hash]), - Some(vec![ - compressed_input_account_with_context - .merkle_context - .merkle_tree_pubkey, - ]), + Some(vec![compressed_input_account_with_context + .merkle_context + .merkle_tree_pubkey]), Some(&[address]), Some(vec![env.address_merkle_tree_pubkey]), context, @@ -266,7 +262,9 @@ async fn create_escrow_ix( address_merkle_tree_root_index: rpc_result.address_root_indices[0], }; let create_ix_inputs = CreateCompressedPdaEscrowInstructionInputs { - input_token_data: &[input_compressed_token_account_data.token_data.clone()], + input_token_data: &[sdk_to_program_token_data( + input_compressed_token_account_data.token_data.clone(), + )], lock_up_time, signer: &payer_pubkey, input_merkle_context: &[MerkleContext { @@ -283,17 +281,19 @@ async fn create_escrow_ix( ], output_compressed_accounts: &Vec::new(), root_indices: &rpc_result.root_indices, - proof: &Some(rpc_result.proof), + proof: &Some(sdk_to_program_compressed_proof(rpc_result.proof)), mint: &input_compressed_token_account_data.token_data.mint, new_address_params, cpi_context_account: &env.cpi_context_account_pubkey, - input_compressed_accounts: &[compressed_input_account_with_context.compressed_account], + input_compressed_accounts: &[sdk_to_program_compressed_account( + compressed_input_account_with_context.compressed_account, + )], }; let instruction = create_escrow_instruction(create_ix_inputs.clone(), escrow_amount); (payer_pubkey, instruction) } -pub async fn assert_escrow( +pub async fn assert_escrow( test_indexer: &mut TestIndexer, env: &EnvAccounts, payer: &Keypair, @@ -327,6 +327,7 @@ pub async fn assert_escrow( .unwrap() .clone(); let address = derive_address_legacy(&env.address_merkle_tree_pubkey, seed).unwrap(); + assert_eq!( compressed_escrow_pda.compressed_account.address.unwrap(), address @@ -357,7 +358,7 @@ pub async fn assert_escrow( Poseidon::hash(&compressed_escrow_pda_data.slot.to_le_bytes()).unwrap(), ); } -pub async fn perform_withdrawal_with_event( +pub async fn perform_withdrawal_with_event( rpc: &mut R, test_indexer: &mut TestIndexer, env: &EnvAccounts, @@ -375,7 +376,7 @@ pub async fn perform_withdrawal_with_event( new_lock_up_time, escrow_amount, ) - .await; + .await; let event = rpc .create_and_send_transaction_with_event::( &[instruction], @@ -385,11 +386,13 @@ pub async fn perform_withdrawal_with_event( ) .await?; let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); + test_indexer.add_compressed_accounts_with_token_data(slot, &program_to_sdk_public_transaction_event( + event.unwrap().0, + )); Ok(()) } -pub async fn perform_withdrawal_failing( +pub async fn perform_withdrawal_failing( rpc: &mut R, test_indexer: &mut TestIndexer, env: &EnvAccounts, @@ -407,7 +410,7 @@ pub async fn perform_withdrawal_failing( new_lock_up_time, escrow_amount, ) - .await; + .await; let latest_blockhash = rpc.get_latest_blockhash().await.unwrap(); let transaction = Transaction::new_signed_with_payer( &[instruction], @@ -417,7 +420,7 @@ pub async fn perform_withdrawal_failing( ); rpc.process_transaction(transaction).await } -pub async fn perform_withdrawal( +pub async fn perform_withdrawal( rpc: &mut R, test_indexer: &mut TestIndexer, env: &EnvAccounts, @@ -476,7 +479,7 @@ pub async fn perform_withdrawal( .await; let create_withdrawal_ix_inputs = CreateCompressedPdaWithdrawalInstructionInputs { - input_token_data: &[token_escrow.token_data.clone()], + input_token_data: &[sdk_to_program_token_data(token_escrow.token_data.clone())], signer: &payer_pubkey, input_token_escrow_merkle_context: MerkleContext { leaf_index: token_escrow_account.merkle_context.leaf_index, @@ -496,13 +499,15 @@ pub async fn perform_withdrawal( ], output_compressed_accounts: &Vec::new(), root_indices: &rpc_result.root_indices, - proof: &Some(rpc_result.proof), + proof: &Some(sdk_to_program_compressed_proof(rpc_result.proof)), mint: &token_escrow.token_data.mint, cpi_context_account: &env.cpi_context_account_pubkey, old_lock_up_time, new_lock_up_time, address: compressed_escrow_pda.compressed_account.address.unwrap(), - input_compressed_accounts: &[compressed_escrow_pda.compressed_account], + input_compressed_accounts: &[sdk_to_program_compressed_account( + compressed_escrow_pda.compressed_account, + )], }; create_withdrawal_instruction(create_withdrawal_ix_inputs.clone(), escrow_amount) } @@ -511,7 +516,7 @@ pub async fn perform_withdrawal( /// 2. Withdrawal token account exists /// 3. Compressed pda with update lock-up time exists #[allow(clippy::too_many_arguments)] -pub async fn assert_withdrawal( +pub async fn assert_withdrawal( rpc: &mut R, test_indexer: &mut TestIndexer, env: &EnvAccounts, From 7195829358cd2847b148f6ed9dc42d2b2c641199 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 16:55:19 +0000 Subject: [PATCH 16/27] refactor forester/* tests --- forester/src/photon_indexer.rs | 30 +-- forester/tests/batched_address_test.rs | 5 +- forester/tests/batched_state_test.rs | 3 +- forester/tests/e2e_test.rs | 5 +- forester/tests/test_utils.rs | 24 +- js/compressed-token/src/idl.ts | 302 +++++++++++++++++++++++-- sdk-libs/client/src/rpc/solana_rpc.rs | 3 + 7 files changed, 322 insertions(+), 50 deletions(-) diff --git a/forester/src/photon_indexer.rs b/forester/src/photon_indexer.rs index f404203ed..78e0fa2ad 100644 --- a/forester/src/photon_indexer.rs +++ b/forester/src/photon_indexer.rs @@ -64,12 +64,15 @@ impl Indexer for PhotonIndexer { unimplemented!() } - async fn get_multiple_new_address_proofs_full( - &self, - _merkle_tree_pubkey: [u8; 32], - _addresses: Vec<[u8; 32]>, - ) -> Result>, IndexerError> { - unimplemented!() + async fn create_proof_for_compressed_accounts( + &mut self, + _compressed_accounts: Option>, + _state_merkle_tree_pubkeys: Option>, + _new_addresses: Option<&[[u8; 32]]>, + _address_merkle_tree_pubkeys: Option>, + _rpc: &mut R, + ) -> ProofRpcResult { + todo!() } async fn get_multiple_compressed_account_proofs( &self, @@ -221,15 +224,12 @@ impl Indexer for PhotonIndexer { Ok(proofs) } - async fn create_proof_for_compressed_accounts( - &mut self, - _compressed_accounts: Option>, - _state_merkle_tree_pubkeys: Option>, - _new_addresses: Option<&[[u8; 32]]>, - _address_merkle_tree_pubkeys: Option>, - _rpc: &mut R, - ) -> ProofRpcResult { - todo!() + async fn get_multiple_new_address_proofs_full( + &self, + _merkle_tree_pubkey: [u8; 32], + _addresses: Vec<[u8; 32]>, + ) -> Result>, IndexerError> { + unimplemented!() } fn get_proofs_by_indices( diff --git a/forester/tests/batched_address_test.rs b/forester/tests/batched_address_test.rs index ab6b3317c..3ca0ec266 100644 --- a/forester/tests/batched_address_test.rs +++ b/forester/tests/batched_address_test.rs @@ -2,7 +2,6 @@ use std::{sync::Arc, time::Duration}; use forester::run_pipeline; use forester_utils::{ - indexer::AddressMerkleTreeAccounts, registry::{register_test_forester, update_test_forester}, }; use light_batched_merkle_tree::{ @@ -17,7 +16,6 @@ use light_program_test::test_env::EnvAccounts; use light_prover_client::gnark::helpers::{LightValidatorConfig, ProverConfig, ProverMode}; use light_test_utils::{ create_address_test_program_sdk::perform_create_pda_with_event_rnd, e2e_test_env::E2ETestEnv, - indexer::TestIndexer, }; use serial_test::serial; use solana_program::native_token::LAMPORTS_PER_SOL; @@ -27,7 +25,8 @@ use tokio::{ time::{sleep, timeout}, }; use tracing::log::info; - +use light_client::indexer::AddressMerkleTreeAccounts; +use light_program_test::indexer::TestIndexer; use crate::test_utils::{forester_config, general_action_config, init, keypair_action_config}; mod test_utils; diff --git a/forester/tests/batched_state_test.rs b/forester/tests/batched_state_test.rs index d19aa0898..74cdb57d6 100644 --- a/forester/tests/batched_state_test.rs +++ b/forester/tests/batched_state_test.rs @@ -14,7 +14,6 @@ use light_program_test::test_env::EnvAccounts; use light_prover_client::gnark::helpers::LightValidatorConfig; use light_test_utils::{ e2e_test_env::{init_program_test_env, E2ETestEnv}, - indexer::TestIndexer, }; use serial_test::serial; use solana_program::native_token::LAMPORTS_PER_SOL; @@ -26,7 +25,7 @@ use tokio::{ time::timeout, }; use tracing::log::info; - +use light_program_test::indexer::TestIndexer; use crate::test_utils::{forester_config, init}; mod test_utils; diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index f30045dd1..9fd58c40a 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -6,7 +6,6 @@ use account_compression::{ }; use forester::{queue_helpers::fetch_queue_item_data, run_pipeline, utils::get_protocol_config}; use forester_utils::{ - indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}, registry::register_test_forester, }; use light_client::{ @@ -21,7 +20,7 @@ use light_registry::{ utils::{get_epoch_pda_address, get_forester_epoch_pda_from_authority}, EpochPda, ForesterEpochPda, }; -use light_test_utils::{e2e_test_env::E2ETestEnv, indexer::TestIndexer, update_test_forester}; +use light_test_utils::{e2e_test_env::E2ETestEnv, update_test_forester}; use serial_test::serial; use solana_sdk::{ commitment_config::CommitmentConfig, native_token::LAMPORTS_PER_SOL, pubkey::Pubkey, @@ -31,6 +30,8 @@ use tokio::{ sync::{mpsc, oneshot, Mutex}, time::{sleep, timeout}, }; +use light_client::indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}; +use light_program_test::indexer::TestIndexer; mod test_utils; use test_utils::*; diff --git a/forester/tests/test_utils.rs b/forester/tests/test_utils.rs index f1a96cca0..b6f11106c 100644 --- a/forester/tests/test_utils.rs +++ b/forester/tests/test_utils.rs @@ -6,16 +6,16 @@ use forester::{ telemetry::setup_telemetry, ForesterConfig, }; -use forester_utils::indexer::{Indexer, IndexerError, NewAddressProofWithContext}; -use light_client::rpc::{RpcConnection, SolanaRpcConnection}; +use light_client::rpc::RpcConnection; use light_program_test::test_env::get_test_env_accounts; use light_prover_client::gnark::helpers::{spawn_validator, LightValidatorConfig}; use light_test_utils::{ e2e_test_env::{GeneralActionConfig, KeypairActionConfig, User}, - indexer::TestIndexer, }; use solana_sdk::signature::{Keypair, Signer}; use tracing::debug; +use light_client::indexer::{Indexer, IndexerError, NewAddressProofWithContext}; +use light_program_test::indexer::TestIndexerExtensions; #[allow(dead_code)] pub async fn init(config: Option) { @@ -108,8 +108,8 @@ pub fn generate_pubkey_254() -> Pubkey { } #[allow(dead_code)] -pub async fn assert_new_address_proofs_for_photon_and_test_indexer( - indexer: &mut TestIndexer, +pub async fn assert_new_address_proofs_for_photon_and_test_indexer + TestIndexerExtensions>( + indexer: &mut I, trees: &[Pubkey], addresses: &[Pubkey], photon_indexer: &PhotonIndexer, @@ -176,19 +176,19 @@ pub async fn assert_new_address_proofs_for_photon_and_test_indexer( - indexer: &mut TestIndexer, +pub async fn assert_accounts_by_owner + TestIndexerExtensions>( + indexer: &mut I, user: &User, photon_indexer: &PhotonIndexer, ) { let mut photon_accs = photon_indexer - .get_rpc_compressed_accounts_by_owner(&user.keypair.pubkey()) + .get_compressed_accounts_by_owner(&user.keypair.pubkey()) .await .unwrap(); photon_accs.sort(); let mut test_accs = indexer - .get_rpc_compressed_accounts_by_owner(&user.keypair.pubkey()) + .get_compressed_accounts_by_owner(&user.keypair.pubkey()) .await .unwrap(); test_accs.sort(); @@ -210,13 +210,13 @@ pub async fn assert_accounts_by_owner( } #[allow(dead_code)] -pub async fn assert_account_proofs_for_photon_and_test_indexer( - indexer: &mut TestIndexer, +pub async fn assert_account_proofs_for_photon_and_test_indexer + TestIndexerExtensions>( + indexer: &mut I, user_pubkey: &Pubkey, photon_indexer: &PhotonIndexer, ) { let accs: Result, IndexerError> = indexer - .get_rpc_compressed_accounts_by_owner(user_pubkey) + .get_compressed_accounts_by_owner(user_pubkey) .await; for account_hash in accs.unwrap() { let photon_result = photon_indexer diff --git a/js/compressed-token/src/idl.ts b/js/compressed-token/src/idl.ts index 909301341..5ce45b666 100644 --- a/js/compressed-token/src/idl.ts +++ b/js/compressed-token/src/idl.ts @@ -1581,23 +1581,158 @@ export type LightCompressedToken = { errors: [ { code: 6000; - name: 'SignerCheckFailed'; - msg: 'Signer check failed'; + name: 'PublicKeyAmountMissmatch'; + msg: 'public keys and amounts must be of same length'; }, { code: 6001; - name: 'CreateTransferInstructionFailed'; - msg: 'Create transfer instruction failed'; + name: 'ComputeInputSumFailed'; + msg: 'ComputeInputSumFailed'; }, { code: 6002; - name: 'AccountNotFound'; - msg: 'Account not found'; + name: 'ComputeOutputSumFailed'; + msg: 'ComputeOutputSumFailed'; }, { code: 6003; - name: 'SerializationError'; - msg: 'Serialization error'; + name: 'ComputeCompressSumFailed'; + msg: 'ComputeCompressSumFailed'; + }, + { + code: 6004; + name: 'ComputeDecompressSumFailed'; + msg: 'ComputeDecompressSumFailed'; + }, + { + code: 6005; + name: 'SumCheckFailed'; + msg: 'SumCheckFailed'; + }, + { + code: 6006; + name: 'DecompressRecipientUndefinedForDecompress'; + msg: 'DecompressRecipientUndefinedForDecompress'; + }, + { + code: 6007; + name: 'CompressedPdaUndefinedForDecompress'; + msg: 'CompressedPdaUndefinedForDecompress'; + }, + { + code: 6008; + name: 'DeCompressAmountUndefinedForDecompress'; + msg: 'DeCompressAmountUndefinedForDecompress'; + }, + { + code: 6009; + name: 'CompressedPdaUndefinedForCompress'; + msg: 'CompressedPdaUndefinedForCompress'; + }, + { + code: 6010; + name: 'DeCompressAmountUndefinedForCompress'; + msg: 'DeCompressAmountUndefinedForCompress'; + }, + { + code: 6011; + name: 'DelegateSignerCheckFailed'; + msg: 'DelegateSignerCheckFailed'; + }, + { + code: 6012; + name: 'MintTooLarge'; + msg: 'Minted amount greater than u64::MAX'; + }, + { + code: 6013; + name: 'SplTokenSupplyMismatch'; + msg: 'SplTokenSupplyMismatch'; + }, + { + code: 6014; + name: 'HeapMemoryCheckFailed'; + msg: 'HeapMemoryCheckFailed'; + }, + { + code: 6015; + name: 'InstructionNotCallable'; + msg: 'The instruction is not callable'; + }, + { + code: 6016; + name: 'ArithmeticUnderflow'; + msg: 'ArithmeticUnderflow'; + }, + { + code: 6017; + name: 'HashToFieldError'; + msg: 'HashToFieldError'; + }, + { + code: 6018; + name: 'InvalidAuthorityMint'; + msg: 'Expected the authority to be also a mint authority'; + }, + { + code: 6019; + name: 'InvalidFreezeAuthority'; + msg: 'Provided authority is not the freeze authority'; + }, + { + code: 6020; + name: 'InvalidDelegateIndex'; + }, + { + code: 6021; + name: 'TokenPoolPdaUndefined'; + }, + { + code: 6022; + name: 'IsTokenPoolPda'; + msg: 'Compress or decompress recipient is the same account as the token pool pda.'; + }, + { + code: 6023; + name: 'InvalidTokenPoolPda'; + }, + { + code: 6024; + name: 'NoInputTokenAccountsProvided'; + }, + { + code: 6025; + name: 'NoInputsProvided'; + }, + { + code: 6026; + name: 'MintHasNoFreezeAuthority'; + }, + { + code: 6027; + name: 'MintWithInvalidExtension'; + }, + { + code: 6028; + name: 'InsufficientTokenAccountBalance'; + msg: 'The token account balance is less than the remaining amount.'; + }, + { + code: 6029; + name: 'InvalidTokenPoolBump'; + msg: 'Max number of token pools reached.'; + }, + { + code: 6030; + name: 'FailedToDecompress'; + }, + { + code: 6031; + name: 'FailedToBurnSplTokensFromTokenPool'; + }, + { + code: 6032; + name: 'NoMatchingBumpFound'; }, ]; }; @@ -3189,23 +3324,158 @@ export const IDL: LightCompressedToken = { errors: [ { code: 6000, - name: 'SignerCheckFailed', - msg: 'Signer check failed', + name: 'PublicKeyAmountMissmatch', + msg: 'public keys and amounts must be of same length', }, { code: 6001, - name: 'CreateTransferInstructionFailed', - msg: 'Create transfer instruction failed', + name: 'ComputeInputSumFailed', + msg: 'ComputeInputSumFailed', }, { code: 6002, - name: 'AccountNotFound', - msg: 'Account not found', + name: 'ComputeOutputSumFailed', + msg: 'ComputeOutputSumFailed', }, { code: 6003, - name: 'SerializationError', - msg: 'Serialization error', + name: 'ComputeCompressSumFailed', + msg: 'ComputeCompressSumFailed', + }, + { + code: 6004, + name: 'ComputeDecompressSumFailed', + msg: 'ComputeDecompressSumFailed', + }, + { + code: 6005, + name: 'SumCheckFailed', + msg: 'SumCheckFailed', + }, + { + code: 6006, + name: 'DecompressRecipientUndefinedForDecompress', + msg: 'DecompressRecipientUndefinedForDecompress', + }, + { + code: 6007, + name: 'CompressedPdaUndefinedForDecompress', + msg: 'CompressedPdaUndefinedForDecompress', + }, + { + code: 6008, + name: 'DeCompressAmountUndefinedForDecompress', + msg: 'DeCompressAmountUndefinedForDecompress', + }, + { + code: 6009, + name: 'CompressedPdaUndefinedForCompress', + msg: 'CompressedPdaUndefinedForCompress', + }, + { + code: 6010, + name: 'DeCompressAmountUndefinedForCompress', + msg: 'DeCompressAmountUndefinedForCompress', + }, + { + code: 6011, + name: 'DelegateSignerCheckFailed', + msg: 'DelegateSignerCheckFailed', + }, + { + code: 6012, + name: 'MintTooLarge', + msg: 'Minted amount greater than u64::MAX', + }, + { + code: 6013, + name: 'SplTokenSupplyMismatch', + msg: 'SplTokenSupplyMismatch', + }, + { + code: 6014, + name: 'HeapMemoryCheckFailed', + msg: 'HeapMemoryCheckFailed', + }, + { + code: 6015, + name: 'InstructionNotCallable', + msg: 'The instruction is not callable', + }, + { + code: 6016, + name: 'ArithmeticUnderflow', + msg: 'ArithmeticUnderflow', + }, + { + code: 6017, + name: 'HashToFieldError', + msg: 'HashToFieldError', + }, + { + code: 6018, + name: 'InvalidAuthorityMint', + msg: 'Expected the authority to be also a mint authority', + }, + { + code: 6019, + name: 'InvalidFreezeAuthority', + msg: 'Provided authority is not the freeze authority', + }, + { + code: 6020, + name: 'InvalidDelegateIndex', + }, + { + code: 6021, + name: 'TokenPoolPdaUndefined', + }, + { + code: 6022, + name: 'IsTokenPoolPda', + msg: 'Compress or decompress recipient is the same account as the token pool pda.', + }, + { + code: 6023, + name: 'InvalidTokenPoolPda', + }, + { + code: 6024, + name: 'NoInputTokenAccountsProvided', + }, + { + code: 6025, + name: 'NoInputsProvided', + }, + { + code: 6026, + name: 'MintHasNoFreezeAuthority', + }, + { + code: 6027, + name: 'MintWithInvalidExtension', + }, + { + code: 6028, + name: 'InsufficientTokenAccountBalance', + msg: 'The token account balance is less than the remaining amount.', + }, + { + code: 6029, + name: 'InvalidTokenPoolBump', + msg: 'Max number of token pools reached.', + }, + { + code: 6030, + name: 'FailedToDecompress', + }, + { + code: 6031, + name: 'FailedToBurnSplTokensFromTokenPool', + }, + { + code: 6032, + name: 'NoMatchingBumpFound', }, ], }; diff --git a/sdk-libs/client/src/rpc/solana_rpc.rs b/sdk-libs/client/src/rpc/solana_rpc.rs index ce8e0ed38..e2ec99a23 100644 --- a/sdk-libs/client/src/rpc/solana_rpc.rs +++ b/sdk-libs/client/src/rpc/solana_rpc.rs @@ -30,6 +30,7 @@ use crate::{ rpc::{errors::RpcError, rpc_connection::RpcConnection}, transaction_params::TransactionParams, }; +use crate::rpc::merkle_tree::MerkleTreeExt; pub enum SolanaRpcUrl { Testnet, @@ -461,3 +462,5 @@ impl RpcConnection for SolanaRpcConnection { .await } } + +impl MerkleTreeExt for SolanaRpcConnection {} \ No newline at end of file From 68f5e488632fa3a793c867f04212e0275fc8c15d Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 17:08:27 +0000 Subject: [PATCH 17/27] program-tests/system_program.rs: fix proof Some/None --- program-tests/utils/src/system_program.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/program-tests/utils/src/system_program.rs b/program-tests/utils/src/system_program.rs index 3bc763af9..c54fc7fca 100644 --- a/program-tests/utils/src/system_program.rs +++ b/program-tests/utils/src/system_program.rs @@ -337,9 +337,9 @@ pub async fn compressed_transaction_test< Some(state_input_merkle_trees) }; let mut root_indices = Vec::new(); - let mut proof = None; let mut input_merkle_tree_snapshots = Vec::new(); let mut address_params = Vec::new(); + let mut proof = None; if !inputs.input_compressed_accounts.is_empty() || !inputs.new_address_params.is_empty() { let address_merkle_tree_pubkeys = if inputs.new_address_params.is_empty() { None @@ -364,9 +364,11 @@ pub async fn compressed_transaction_test< .await; root_indices = proof_rpc_res.root_indices; - proof = Some(sdk_to_program_compressed_proof( - proof_rpc_res.proof.unwrap_or_default(), - )); + + if let Some(proof_rpc_res) = proof_rpc_res.proof { + proof = Some(sdk_to_program_compressed_proof(proof_rpc_res)); + } + let input_merkle_tree_accounts = inputs .test_indexer .get_state_merkle_tree_accounts(state_input_merkle_trees.unwrap_or(vec![]).as_slice()); From dfe6f93ed6598addef5be990f422704e813032bc Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 18:34:13 +0000 Subject: [PATCH 18/27] refactor dependencies in compressed-token-test --- program-tests/compressed-token-test/Cargo.toml | 6 +++--- program-tests/compressed-token-test/tests/test.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/program-tests/compressed-token-test/Cargo.toml b/program-tests/compressed-token-test/Cargo.toml index 1775c35a1..cd05bd812 100644 --- a/program-tests/compressed-token-test/Cargo.toml +++ b/program-tests/compressed-token-test/Cargo.toml @@ -22,14 +22,14 @@ anchor-lang = { workspace = true } light-compressed-token = { workspace = true } light-system-program = { workspace = true } account-compression = { workspace = true } -light-client = { workspace = true } -light-sdk = { workspace = true } -light-verifier = {workspace = true} [target.'cfg(not(target_os = "solana"))'.dependencies] solana-sdk = { workspace = true } [dev-dependencies] +light-client = { workspace = true } +light-sdk = { workspace = true } +light-verifier = {workspace = true} light-test-utils = { workspace = true, features=["devenv"] } light-program-test = { workspace = true, features = ["devenv"] } tokio = { workspace = true } diff --git a/program-tests/compressed-token-test/tests/test.rs b/program-tests/compressed-token-test/tests/test.rs index f954719e0..5a361c169 100644 --- a/program-tests/compressed-token-test/tests/test.rs +++ b/program-tests/compressed-token-test/tests/test.rs @@ -5113,7 +5113,7 @@ async fn test_invalid_inputs() { let mut input_compressed_account_token_data = test_indexer.token_compressed_accounts[0].token_data.clone(); input_compressed_account_token_data.delegate = Some(Pubkey::new_unique()); - let mut input_compressed_accounts = vec![test_indexer.token_compressed_accounts[0] + let input_compressed_accounts = vec![test_indexer.token_compressed_accounts[0] .compressed_account .clone()]; let mut input_compressed_accounts = input_compressed_accounts From d49d687dda35a3e3e34f2b10c078071f636ebc63 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 19:21:47 +0000 Subject: [PATCH 19/27] remove old indexer files --- Cargo.lock | 262 +-- forester-utils/src/indexer/mod.rs | 357 ---- forester-utils/src/lib.rs | 1 - program-tests/utils/src/indexer/mod.rs | 2 - .../utils/src/indexer/test_indexer.rs | 1779 ----------------- program-tests/utils/src/lib.rs | 1 - sdk-libs/sdk/Cargo.toml | 25 +- 7 files changed, 28 insertions(+), 2399 deletions(-) delete mode 100644 forester-utils/src/indexer/mod.rs delete mode 100644 program-tests/utils/src/indexer/mod.rs delete mode 100644 program-tests/utils/src/indexer/test_indexer.rs diff --git a/Cargo.lock b/Cargo.lock index f85893d61..08e83d793 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -674,12 +674,6 @@ dependencies = [ "syn 2.0.85", ] -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - [[package]] name = "atty" version = "0.2.14" @@ -1682,7 +1676,7 @@ dependencies = [ "num-bigint 0.4.6", "num-traits", "rand 0.8.5", - "reqwest 0.11.27", + "reqwest", "solana-client", "solana-program-test", "solana-sdk", @@ -1956,7 +1950,7 @@ dependencies = [ "light-test-utils", "photon-api", "prometheus", - "reqwest 0.11.27", + "reqwest", "scopeguard", "serde", "serde_json", @@ -1998,7 +1992,7 @@ dependencies = [ "num-bigint 0.4.6", "num-traits", "photon-api", - "reqwest 0.11.27", + "reqwest", "solana-client", "solana-program-test", "solana-sdk", @@ -2215,25 +2209,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "h2" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" -dependencies = [ - "atomic-waker", - "bytes", - "fnv", - "futures-core", - "futures-sink", - "http 1.1.0", - "indexmap 2.5.0", - "slab", - "tokio", - "tokio-util 0.7.13", - "tracing", -] - [[package]] name = "hash32" version = "0.2.1" @@ -2411,29 +2386,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-body" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" -dependencies = [ - "bytes", - "http 1.1.0", -] - -[[package]] -name = "http-body-util" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" -dependencies = [ - "bytes", - "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "pin-project-lite", -] - [[package]] name = "httparse" version = "1.9.4" @@ -2462,9 +2414,9 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", + "h2", "http 0.2.12", - "http-body 0.4.6", + "http-body", "httparse", "httpdate", "itoa", @@ -2476,26 +2428,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "h2 0.4.6", - "http 1.1.0", - "http-body 1.0.1", - "httparse", - "itoa", - "pin-project-lite", - "smallvec", - "tokio", - "want", -] - [[package]] name = "hyper-rustls" version = "0.24.2" @@ -2504,7 +2436,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.30", + "hyper", "rustls", "tokio", "tokio-rustls", @@ -2517,48 +2449,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.30", + "hyper", "native-tls", "tokio", "tokio-native-tls", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper 1.4.1", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - -[[package]] -name = "hyper-util" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "hyper 1.4.1", - "pin-project-lite", - "socket2", - "tokio", - "tower", - "tower-service", - "tracing", -] - [[package]] name = "iana-time-zone" version = "0.1.60" @@ -2897,7 +2793,7 @@ dependencies = [ "num-traits", "photon-api", "rand 0.8.5", - "reqwest 0.11.27", + "reqwest", "solana-banks-client", "solana-client", "solana-program", @@ -3079,7 +2975,7 @@ dependencies = [ "log", "num-bigint 0.4.6", "num-traits", - "reqwest 0.11.27", + "reqwest", "solana-banks-client", "solana-program-test", "solana-sdk", @@ -3106,7 +3002,7 @@ dependencies = [ "num-bigint 0.4.6", "num-traits", "once_cell", - "reqwest 0.11.27", + "reqwest", "serde", "serde_json", "serial_test", @@ -3136,35 +3032,18 @@ name = "light-sdk" version = "0.11.0" dependencies = [ "account-compression", - "aligned-sized", "anchor-lang", "borsh 0.10.3", - "bytemuck", - "groth16-solana", - "lazy_static", - "light-concurrent-merkle-tree", - "light-hash-set", "light-hasher", "light-heap", "light-indexed-merkle-tree", "light-macros", - "light-merkle-tree-reference", - "light-prover-client", "light-sdk-macros", "light-system-program", "light-utils 1.1.0", - "light-verifier", "num-bigint 0.4.6", - "num-traits", - "rand 0.8.5", - "reqwest 0.12.4", - "serde_json", - "solana-banks-interface", - "solana-cli-output", "solana-program", - "solana-program-test", "solana-sdk", - "tokio", ] [[package]] @@ -3236,7 +3115,7 @@ dependencies = [ "num-traits", "photon-api", "rand 0.8.5", - "reqwest 0.11.27", + "reqwest", "serde", "solana-client", "solana-program-test", @@ -3291,7 +3170,7 @@ dependencies = [ "groth16-solana", "light-prover-client", "light-utils 1.1.0", - "reqwest 0.11.27", + "reqwest", "serial_test", "solana-program", "thiserror", @@ -3984,7 +3863,7 @@ dependencies = [ name = "photon-api" version = "0.45.0" dependencies = [ - "reqwest 0.11.27", + "reqwest", "serde", "serde_derive", "serde_json", @@ -4522,12 +4401,12 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.26", + "h2", "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.30", + "http-body", + "hyper", "hyper-rustls", - "hyper-tls 0.5.0", + "hyper-tls", "ipnet", "js-sys", "log", @@ -4538,7 +4417,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls", - "rustls-pemfile 1.0.4", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", @@ -4554,49 +4433,7 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "webpki-roots 0.25.4", - "winreg 0.50.0", -] - -[[package]] -name = "reqwest" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" -dependencies = [ - "base64 0.22.1", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.4.6", - "http 1.1.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.4.1", - "hyper-tls 0.6.0", - "hyper-util", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile 2.1.3", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper", - "system-configuration", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg 0.52.0", + "winreg", ] [[package]] @@ -4712,7 +4549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.4", + "rustls-pemfile", "schannel", "security-framework", ] @@ -4726,22 +4563,6 @@ dependencies = [ "base64 0.21.7", ] -[[package]] -name = "rustls-pemfile" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" -dependencies = [ - "base64 0.22.1", - "rustls-pki-types", -] - -[[package]] -name = "rustls-pki-types" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" - [[package]] name = "rustls-webpki" version = "0.101.7" @@ -5588,7 +5409,7 @@ dependencies = [ "gethostname", "lazy_static", "log", - "reqwest 0.11.27", + "reqwest", "solana-sdk", "thiserror", ] @@ -5766,7 +5587,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "log", - "reqwest 0.11.27", + "reqwest", "semver", "serde", "serde_derive", @@ -5846,7 +5667,7 @@ dependencies = [ "bs58 0.4.0", "indicatif", "log", - "reqwest 0.11.27", + "reqwest", "semver", "serde", "serde_derive", @@ -5868,7 +5689,7 @@ dependencies = [ "base64 0.21.7", "bs58 0.4.0", "jsonrpc-core", - "reqwest 0.11.27", + "reqwest", "semver", "serde", "serde_derive", @@ -6961,7 +6782,7 @@ dependencies = [ "light-verifier", "num-bigint 0.4.6", "num-traits", - "reqwest 0.11.27", + "reqwest", "serial_test", "solana-program-test", "solana-sdk", @@ -7429,27 +7250,6 @@ dependencies = [ "winnow 0.6.18", ] -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tokio", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-layer" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" - [[package]] name = "tower-service" version = "0.3.3" @@ -7802,7 +7602,7 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.30", + "hyper", "log", "mime", "mime_guess", @@ -8193,16 +7993,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "wyz" version = "0.5.1" diff --git a/forester-utils/src/indexer/mod.rs b/forester-utils/src/indexer/mod.rs deleted file mode 100644 index 2cbf037b2..000000000 --- a/forester-utils/src/indexer/mod.rs +++ /dev/null @@ -1,357 +0,0 @@ -// use std::fmt::Debug; -// -// use account_compression::initialize_address_merkle_tree::{ -// Error as AccountCompressionError, Pubkey, -// }; -// use async_trait::async_trait; -// use light_client::rpc::RpcConnection; -// use light_compressed_token::TokenData; -// use light_hash_set::HashSetError; -// use light_hasher::Poseidon; -// use light_indexed_merkle_tree::{ -// array::{IndexedArray, IndexedElement}, -// reference::IndexedMerkleTree, -// }; -// use light_merkle_tree_reference::MerkleTree; -// use light_system_program::{ -// invoke::processor::CompressedProof, -// sdk::{compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent}, -// }; -// use num_bigint::BigUint; -// use photon_api::apis::{default_api::GetCompressedAccountProofPostError, Error as PhotonApiError}; -// use solana_sdk::signature::Keypair; -// use thiserror::Error; -// -// #[derive(Debug, Clone)] -// pub struct TokenDataWithContext { -// pub token_data: TokenData, -// pub compressed_account: CompressedAccountWithMerkleContext, -// } -// -// #[derive(Debug, Default)] -// pub struct BatchedTreeProofRpcResult { -// pub proof: Option, -// // If none -> proof by index, else included in zkp -// pub root_indices: Vec>, -// pub address_root_indices: Vec, -// } -// -// #[derive(Debug, Default)] -// pub struct ProofRpcResult { -// pub proof: CompressedProof, -// pub root_indices: Vec>, -// pub address_root_indices: Vec, -// } -// -// #[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] -// pub struct StateMerkleTreeAccounts { -// pub merkle_tree: Pubkey, -// pub nullifier_queue: Pubkey, -// pub cpi_context: Pubkey, -// } -// -// #[derive(Debug, Clone, Copy)] -// pub struct AddressMerkleTreeAccounts { -// pub merkle_tree: Pubkey, -// pub queue: Pubkey, -// } -// -// #[derive(Debug, Clone)] -// pub struct StateMerkleTreeBundle { -// pub rollover_fee: i64, -// pub merkle_tree: Box>, -// pub accounts: StateMerkleTreeAccounts, -// pub version: u64, -// pub output_queue_elements: Vec<[u8; 32]>, -// /// leaf index, leaf, tx hash -// pub input_leaf_indices: Vec<(u32, [u8; 32], [u8; 32])>, -// } -// -// #[derive(Debug, Clone)] -// pub struct AddressMerkleTreeBundle { -// pub rollover_fee: i64, -// pub merkle_tree: Box>, -// pub indexed_array: Box>, -// pub accounts: AddressMerkleTreeAccounts, -// pub queue_elements: Vec<[u8; 32]>, -// } -// -// pub struct ProofOfLeaf { -// pub leaf: [u8; 32], -// pub proof: Vec<[u8; 32]>, -// } -// -// #[async_trait] -// pub trait Indexer: Sync + Send + Debug + 'static { -// /// Returns queue elements from the queue with the given pubkey. For input -// /// queues account compression program does not store queue elements in the -// /// account data but only emits these in the public transaction event. The -// /// indexer needs the queue elements to create batch update proofs. -// -// // i -// async fn get_queue_elements( -// &self, -// pubkey: [u8; 32], -// batch: u64, -// start_offset: u64, -// end_offset: u64, -// ) -> Result, IndexerError>; -// -// // e -// fn get_proof_by_index(&mut self, _merkle_tree_pubkey: Pubkey, _index: u64) -> ProofOfLeaf { -// unimplemented!("get_proof_by_index not implemented") -// } -// -// // e -// fn get_proofs_by_indices( -// &mut self, -// _merkle_tree_pubkey: Pubkey, -// _indices: &[u64], -// ) -> Vec { -// unimplemented!("get_proof_by_index not implemented") -// } -// -// // e -// fn get_leaf_indices_tx_hashes( -// &mut self, -// _merkle_tree_pubkey: Pubkey, -// _zkp_batch_size: usize, -// ) -> Vec<(u32, [u8; 32], [u8; 32])> { -// unimplemented!(); -// } -// -// // i -// async fn get_subtrees( -// &self, -// merkle_tree_pubkey: [u8; 32], -// ) -> Result, IndexerError>; -// -// // i -// async fn get_multiple_compressed_account_proofs( -// &self, -// hashes: Vec, -// ) -> Result, IndexerError>; -// -// async fn get_rpc_compressed_accounts_by_owner( -// &self, -// owner: &Pubkey, -// ) -> Result, IndexerError>; -// -// // i -// async fn get_multiple_new_address_proofs( -// &self, -// merkle_tree_pubkey: [u8; 32], -// addresses: Vec<[u8; 32]>, -// ) -> Result>, IndexerError>; -// -// // i -// async fn get_multiple_new_address_proofs_full( -// &self, -// merkle_tree_pubkey: [u8; 32], -// addresses: Vec<[u8; 32]>, -// ) -> Result>, IndexerError>; -// -// // e -// fn account_nullified(&mut self, _merkle_tree_pubkey: Pubkey, _account_hash: &str) {} -// -// // e -// fn address_tree_updated( -// &mut self, -// _merkle_tree_pubkey: Pubkey, -// _context: &NewAddressProofWithContext<16>, -// ) { -// } -// -// // e -// fn get_state_merkle_tree_accounts(&self, _pubkeys: &[Pubkey]) -> Vec { -// unimplemented!() -// } -// -// // e -// fn add_event_and_compressed_accounts( -// &mut self, -// _slot: u64, -// _event: &PublicTransactionEvent, -// ) -> ( -// Vec, -// Vec, -// ) { -// unimplemented!() -// } -// -// // e -// fn get_state_merkle_trees(&self) -> &Vec { -// unimplemented!() -// } -// -// // e -// fn get_state_merkle_trees_mut(&mut self) -> &mut Vec { -// unimplemented!() -// } -// -// // e -// fn get_address_merkle_trees(&self) -> &Vec { -// unimplemented!() -// } -// -// // e -// fn get_address_merkle_trees_mut(&mut self) -> &mut Vec { -// unimplemented!() -// } -// -// // e -// fn get_token_compressed_accounts(&self) -> &Vec { -// unimplemented!() -// } -// -// // e -// fn get_payer(&self) -> &Keypair { -// unimplemented!() -// } -// -// // e -// fn get_group_pda(&self) -> &Pubkey { -// unimplemented!() -// } -// -// // i + e -// async fn create_proof_for_compressed_accounts( -// &mut self, -// _compressed_accounts: Option>, -// _state_merkle_tree_pubkeys: Option>, -// _new_addresses: Option<&[[u8; 32]]>, -// _address_merkle_tree_pubkeys: Option>, -// _rpc: &mut R, -// ) -> ProofRpcResult { -// unimplemented!() -// } -// -// // e -// async fn create_proof_for_compressed_accounts2( -// &mut self, -// _compressed_accounts: Option>, -// _state_merkle_tree_pubkeys: Option>, -// _new_addresses: Option<&[[u8; 32]]>, -// _address_merkle_tree_pubkeys: Option>, -// _rpc: &mut R, -// ) -> BatchedTreeProofRpcResult { -// unimplemented!() -// } -// -// // e -// fn add_address_merkle_tree_accounts( -// &mut self, -// _merkle_tree_keypair: &Keypair, -// _queue_keypair: &Keypair, -// _owning_program_id: Option, -// ) -> AddressMerkleTreeAccounts { -// unimplemented!() -// } -// -// // i -// fn get_compressed_accounts_by_owner( -// &self, -// _owner: &Pubkey, -// ) -> Vec { -// unimplemented!() -// } -// -// // e -// fn get_compressed_token_accounts_by_owner(&self, _owner: &Pubkey) -> Vec { -// unimplemented!() -// } -// -// // e -// fn add_state_bundle(&mut self, _state_bundle: StateMerkleTreeBundle) { -// unimplemented!() -// } -// -// // e -// async fn update_test_indexer_after_append( -// &mut self, -// _rpc: &mut R, -// _merkle_tree_pubkey: Pubkey, -// _output_queue_pubkey: Pubkey, -// _num_inserted_zkps: u64, -// ) { -// unimplemented!() -// } -// -// // e -// async fn update_test_indexer_after_nullification( -// &mut self, -// _rpc: &mut R, -// _merkle_tree_pubkey: Pubkey, -// _batch_index: usize, -// ) { -// unimplemented!() -// } -// -// // e -// async fn finalize_batched_address_tree_update( -// &mut self, -// _rpc: &mut R, -// _merkle_tree_pubkey: Pubkey, -// ) { -// unimplemented!() -// } -// } -// -// #[derive(Debug, Clone)] -// pub struct MerkleProof { -// pub hash: String, -// pub leaf_index: u64, -// pub merkle_tree: String, -// pub proof: Vec<[u8; 32]>, -// pub root_seq: u64, -// } -// -// // For consistency with the Photon API. -// #[derive(Clone, Debug, PartialEq)] -// pub struct NewAddressProofWithContext { -// pub merkle_tree: [u8; 32], -// pub root: [u8; 32], -// pub root_seq: u64, -// pub low_address_index: u64, -// pub low_address_value: [u8; 32], -// pub low_address_next_index: u64, -// pub low_address_next_value: [u8; 32], -// pub low_address_proof: [[u8; 32]; NET_HEIGHT], -// pub new_low_element: Option>, -// pub new_element: Option>, -// pub new_element_next_value: Option, -// } -// -// #[derive(Error, Debug)] -// pub enum IndexerError { -// #[error("RPC Error: {0}")] -// RpcError(#[from] solana_client::client_error::ClientError), -// #[error("failed to deserialize account data")] -// DeserializeError(#[from] solana_sdk::program_error::ProgramError), -// #[error("failed to copy merkle tree")] -// CopyMerkleTreeError(#[from] std::io::Error), -// #[error(transparent)] -// AccountCompressionError(#[from] AccountCompressionError), -// #[error(transparent)] -// HashSetError(#[from] HashSetError), -// #[error(transparent)] -// PhotonApiError(PhotonApiErrorWrapper), -// #[error("error: {0:?}")] -// Custom(String), -// #[error("unknown error")] -// Unknown, -// } -// -// #[derive(Error, Debug)] -// pub enum PhotonApiErrorWrapper { -// #[error(transparent)] -// GetCompressedAccountProofPostError(#[from] PhotonApiError), -// } -// -// impl From> for IndexerError { -// fn from(err: PhotonApiError) -> Self { -// IndexerError::PhotonApiError(PhotonApiErrorWrapper::GetCompressedAccountProofPostError( -// err, -// )) -// } -// } diff --git a/forester-utils/src/lib.rs b/forester-utils/src/lib.rs index d8beb6159..c83a36c21 100644 --- a/forester-utils/src/lib.rs +++ b/forester-utils/src/lib.rs @@ -16,7 +16,6 @@ use solana_sdk::{ pub mod address_merkle_tree_config; pub mod forester_epoch; -pub mod indexer; pub mod instructions; pub mod registry; diff --git a/program-tests/utils/src/indexer/mod.rs b/program-tests/utils/src/indexer/mod.rs deleted file mode 100644 index 6a14bdebe..000000000 --- a/program-tests/utils/src/indexer/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -// pub mod test_indexer; -// pub use test_indexer::TestIndexer; diff --git a/program-tests/utils/src/indexer/test_indexer.rs b/program-tests/utils/src/indexer/test_indexer.rs deleted file mode 100644 index d0ed4242e..000000000 --- a/program-tests/utils/src/indexer/test_indexer.rs +++ /dev/null @@ -1,1779 +0,0 @@ -// use std::{ -// future::Future, -// marker::PhantomData, -// sync::{Arc, Mutex}, -// time::Duration, -// }; -// -// use account_compression::{ -// utils::constants::{STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT}, -// AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, -// StateMerkleTreeAccount, StateMerkleTreeConfig, -// }; -// use anchor_lang::AnchorDeserialize; -// use async_trait::async_trait; -// use forester_utils::{ -// get_concurrent_merkle_tree, get_indexed_merkle_tree, -// indexer::{ -// AddressMerkleTreeAccounts, AddressMerkleTreeBundle, BatchedTreeProofRpcResult, Indexer, -// IndexerError, MerkleProof, NewAddressProofWithContext, ProofOfLeaf, ProofRpcResult, -// StateMerkleTreeAccounts, StateMerkleTreeBundle, TokenDataWithContext, -// }, -// AccountZeroCopy, -// }; -// use light_batched_merkle_tree::{ -// batch::BatchState, -// constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, -// initialize_address_tree::InitAddressTreeAccountsInstructionData, -// initialize_state_tree::InitStateTreeAccountsInstructionData, -// merkle_tree::BatchedMerkleTreeAccount, -// queue::{BatchedQueueAccount, BatchedQueueMetadata}, -// }; -// use light_client::{ -// rpc::{RpcConnection, RpcError}, -// transaction_params::FeeConfig, -// }; -// use light_compressed_token::{ -// constants::TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR, get_token_pool_pda, -// mint_sdk::create_create_token_pool_instruction, TokenData, -// }; -// use light_hasher::{Hasher, Poseidon}; -// use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}; -// use light_macros::pubkey; -// use light_merkle_tree_reference::MerkleTree; -// use light_program_test::{ -// test_batch_forester::{create_batch_address_merkle_tree, create_batched_state_merkle_tree}, -// test_env::{ -// create_address_merkle_tree_and_queue_account, create_state_merkle_tree_and_queue_account, -// EnvAccounts, BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR, -// }, -// }; -// use light_prover_client::{ -// gnark::{ -// batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson, -// combined_json_formatter::CombinedJsonStruct, -// combined_json_formatter_legacy::CombinedJsonStruct as CombinedJsonStructLegacy, -// constants::{PROVE_PATH, SERVER_ADDRESS}, -// helpers::{ -// big_int_to_string, spawn_prover, string_to_big_int, ProofType, ProverConfig, ProverMode, -// }, -// inclusion_json_formatter::BatchInclusionJsonStruct, -// inclusion_json_formatter_legacy::BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy, -// non_inclusion_json_formatter::BatchNonInclusionJsonStruct, -// non_inclusion_json_formatter_legacy::BatchNonInclusionJsonStruct as BatchNonInclusionJsonStructLegacy, -// proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, -// }, -// helpers::bigint_to_u8_32, -// inclusion::merkle_inclusion_proof_inputs::{InclusionMerkleProofInputs, InclusionProofInputs}, -// inclusion_legacy::merkle_inclusion_proof_inputs::InclusionProofInputs as InclusionProofInputsLegacy, -// non_inclusion::merkle_non_inclusion_proof_inputs::{ -// get_non_inclusion_proof_inputs, NonInclusionProofInputs, -// }, -// non_inclusion_legacy::merkle_non_inclusion_proof_inputs::NonInclusionProofInputs as NonInclusionProofInputsLegacy, -// }; -// use light_system_program::{ -// invoke::processor::CompressedProof, -// sdk::{ -// compressed_account::{CompressedAccountWithMerkleContext, MerkleContext, QueueIndex}, -// event::PublicTransactionEvent, -// }, -// }; -// use light_utils::{ -// bigint::bigint_to_be_bytes_array, -// hashchain::{create_hash_chain_from_slice, create_tx_hash}, -// }; -// use log::{debug, info, warn}; -// use num_bigint::{BigInt, BigUint}; -// use num_traits::ops::bytes::FromBytes; -// use reqwest::Client; -// use solana_sdk::{ -// bs58, instruction::Instruction, program_pack::Pack, pubkey::Pubkey, signature::Keypair, -// signer::Signer, -// }; -// use spl_token::instruction::initialize_mint; -// -// use crate::{ -// create_address_merkle_tree_and_queue_account_with_assert, e2e_test_env::KeypairActionConfig, -// spl::create_initialize_mint_instructions, -// }; -// -// #[derive(Debug)] -// pub struct TestIndexer { -// pub state_merkle_trees: Vec, -// pub address_merkle_trees: Vec, -// pub payer: Keypair, -// pub group_pda: Pubkey, -// pub compressed_accounts: Vec, -// pub nullified_compressed_accounts: Vec, -// pub token_compressed_accounts: Vec, -// pub token_nullified_compressed_accounts: Vec, -// pub events: Vec, -// pub prover_config: Option, -// phantom: PhantomData, -// } -// -// #[async_trait] -// impl Indexer for TestIndexer { -// async fn get_queue_elements( -// &self, -// pubkey: [u8; 32], -// _batch: u64, -// start_offset: u64, -// end_offset: u64, -// ) -> Result, IndexerError> { -// let pubkey = Pubkey::new_from_array(pubkey); -// let address_tree_bundle = self -// .address_merkle_trees -// .iter() -// .find(|x| x.accounts.merkle_tree == pubkey); -// if let Some(address_tree_bundle) = address_tree_bundle { -// return Ok(address_tree_bundle.queue_elements -// [start_offset as usize..end_offset as usize] -// .to_vec()); -// } -// let state_tree_bundle = self -// .state_merkle_trees -// .iter() -// .find(|x| x.accounts.merkle_tree == pubkey); -// if let Some(state_tree_bundle) = state_tree_bundle { -// return Ok(state_tree_bundle.output_queue_elements -// [start_offset as usize..end_offset as usize] -// .to_vec()); -// } -// Err(IndexerError::Custom("Merkle tree not found".to_string())) -// } -// -// fn get_proof_by_index(&mut self, merkle_tree_pubkey: Pubkey, index: u64) -> ProofOfLeaf { -// let mut bundle = self -// .state_merkle_trees -// .iter_mut() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) -// .unwrap(); -// -// while bundle.merkle_tree.leaves().len() <= index as usize { -// bundle.merkle_tree.append(&[0u8; 32]).unwrap(); -// } -// -// let leaf = match bundle.merkle_tree.get_leaf(index as usize) { -// Ok(leaf) => leaf, -// Err(_) => { -// bundle.merkle_tree.append(&[0u8; 32]).unwrap(); -// bundle.merkle_tree.get_leaf(index as usize).unwrap() -// } -// }; -// -// let proof = bundle -// .merkle_tree -// .get_proof_of_leaf(index as usize, true) -// .unwrap() -// .to_vec(); -// -// ProofOfLeaf { leaf, proof } -// } -// -// fn get_proofs_by_indices( -// &mut self, -// merkle_tree_pubkey: Pubkey, -// indices: &[u64], -// ) -> Vec { -// indices -// .iter() -// .map(|&index| self.get_proof_by_index(merkle_tree_pubkey, index)) -// .collect() -// } -// -// /// leaf index, leaf, tx hash -// fn get_leaf_indices_tx_hashes( -// &mut self, -// merkle_tree_pubkey: Pubkey, -// zkp_batch_size: usize, -// ) -> Vec<(u32, [u8; 32], [u8; 32])> { -// let mut state_merkle_tree_bundle = self -// .state_merkle_trees -// .iter_mut() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) -// .unwrap(); -// -// state_merkle_tree_bundle.input_leaf_indices[..zkp_batch_size].to_vec() -// } -// -// async fn get_subtrees( -// &self, -// merkle_tree_pubkey: [u8; 32], -// ) -> Result, IndexerError> { -// let merkle_tree_pubkey = Pubkey::new_from_array(merkle_tree_pubkey); -// let address_tree_bundle = self -// .address_merkle_trees -// .iter() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); -// if let Some(address_tree_bundle) = address_tree_bundle { -// Ok(address_tree_bundle.merkle_tree.merkle_tree.get_subtrees()) -// } else { -// let state_tree_bundle = self -// .state_merkle_trees -// .iter() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); -// if let Some(state_tree_bundle) = state_tree_bundle { -// Ok(state_tree_bundle.merkle_tree.get_subtrees()) -// } else { -// Err(IndexerError::Custom("Merkle tree not found".to_string())) -// } -// } -// } -// -// async fn get_multiple_compressed_account_proofs( -// &self, -// hashes: Vec, -// ) -> Result, IndexerError> { -// info!("Getting proofs for {:?}", hashes); -// let mut proofs: Vec = Vec::new(); -// hashes.iter().for_each(|hash| { -// let hash_array: [u8; 32] = bs58::decode(hash) -// .into_vec() -// .unwrap() -// .as_slice() -// .try_into() -// .unwrap(); -// -// self.state_merkle_trees.iter().for_each(|tree| { -// if let Some(leaf_index) = tree.merkle_tree.get_leaf_index(&hash_array) { -// let proof = tree -// .merkle_tree -// .get_proof_of_leaf(leaf_index, false) -// .unwrap(); -// proofs.push(MerkleProof { -// hash: hash.clone(), -// leaf_index: leaf_index as u64, -// merkle_tree: tree.accounts.merkle_tree.to_string(), -// proof: proof.to_vec(), -// root_seq: tree.merkle_tree.sequence_number as u64, -// }); -// } -// }) -// }); -// Ok(proofs) -// } -// -// async fn get_rpc_compressed_accounts_by_owner( -// &self, -// owner: &Pubkey, -// ) -> Result, IndexerError> { -// let result = self.get_compressed_accounts_by_owner(owner); -// let mut hashes: Vec = Vec::new(); -// for account in result.iter() { -// let hash = account.hash()?; -// let bs58_hash = bs58::encode(hash).into_string(); -// hashes.push(bs58_hash); -// } -// Ok(hashes) -// } -// -// async fn get_multiple_new_address_proofs( -// &self, -// merkle_tree_pubkey: [u8; 32], -// addresses: Vec<[u8; 32]>, -// ) -> Result>, IndexerError> { -// self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, false) -// .await -// } -// -// async fn get_multiple_new_address_proofs_full( -// &self, -// merkle_tree_pubkey: [u8; 32], -// addresses: Vec<[u8; 32]>, -// ) -> Result>, IndexerError> { -// self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, true) -// .await -// } -// -// fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) { -// let decoded_hash: [u8; 32] = bs58::decode(account_hash) -// .into_vec() -// .unwrap() -// .as_slice() -// .try_into() -// .unwrap(); -// -// if let Some(state_tree_bundle) = self -// .state_merkle_trees -// .iter_mut() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) -// { -// if let Some(leaf_index) = state_tree_bundle.merkle_tree.get_leaf_index(&decoded_hash) { -// state_tree_bundle -// .merkle_tree -// .update(&[0u8; 32], leaf_index) -// .unwrap(); -// } -// } -// } -// -// fn address_tree_updated( -// &mut self, -// merkle_tree_pubkey: Pubkey, -// context: &NewAddressProofWithContext<16>, -// ) { -// info!("Updating address tree..."); -// let mut address_tree_bundle: &mut AddressMerkleTreeBundle = self -// .address_merkle_trees -// .iter_mut() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) -// .unwrap(); -// -// let new_low_element = context.new_low_element.clone().unwrap(); -// let new_element = context.new_element.clone().unwrap(); -// let new_element_next_value = context.new_element_next_value.clone().unwrap(); -// address_tree_bundle -// .merkle_tree -// .update(&new_low_element, &new_element, &new_element_next_value) -// .unwrap(); -// address_tree_bundle -// .indexed_array -// .append_with_low_element_index(new_low_element.index, &new_element.value) -// .unwrap(); -// info!("Address tree updated"); -// } -// -// fn get_state_merkle_tree_accounts(&self, pubkeys: &[Pubkey]) -> Vec { -// pubkeys -// .iter() -// .map(|x| { -// self.state_merkle_trees -// .iter() -// .find(|y| y.accounts.merkle_tree == *x || y.accounts.nullifier_queue == *x) -// .unwrap() -// .accounts -// }) -// .collect::>() -// } -// -// fn add_event_and_compressed_accounts( -// &mut self, -// slot: u64, -// event: &PublicTransactionEvent, -// ) -> ( -// Vec, -// Vec, -// ) { -// let mut compressed_accounts = Vec::new(); -// let mut token_compressed_accounts = Vec::new(); -// let event_inputs_len = event.input_compressed_account_hashes.len(); -// let event_outputs_len = event.output_compressed_account_hashes.len(); -// for i in 0..std::cmp::max(event_inputs_len, event_outputs_len) { -// self.process_v1_compressed_account( -// slot, -// event, -// i, -// &mut token_compressed_accounts, -// &mut compressed_accounts, -// ); -// } -// -// self.events.push(event.clone()); -// (compressed_accounts, token_compressed_accounts) -// } -// -// fn get_state_merkle_trees(&self) -> &Vec { -// &self.state_merkle_trees -// } -// -// fn get_state_merkle_trees_mut(&mut self) -> &mut Vec { -// &mut self.state_merkle_trees -// } -// -// fn get_address_merkle_trees(&self) -> &Vec { -// &self.address_merkle_trees -// } -// -// fn get_address_merkle_trees_mut(&mut self) -> &mut Vec { -// &mut self.address_merkle_trees -// } -// -// fn get_token_compressed_accounts(&self) -> &Vec { -// &self.token_compressed_accounts -// } -// -// fn get_payer(&self) -> &Keypair { -// &self.payer -// } -// -// fn get_group_pda(&self) -> &Pubkey { -// &self.group_pda -// } -// -// async fn create_proof_for_compressed_accounts( -// &mut self, -// compressed_accounts: Option>, -// state_merkle_tree_pubkeys: Option>, -// new_addresses: Option<&[[u8; 32]]>, -// address_merkle_tree_pubkeys: Option>, -// rpc: &mut R, -// ) -> ProofRpcResult { -// if compressed_accounts.is_some() -// && ![1usize, 2usize, 3usize, 4usize, 8usize] -// .contains(&compressed_accounts.as_ref().unwrap().len()) -// { -// panic!( -// "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}", -// compressed_accounts.unwrap().len() -// ) -// } -// if new_addresses.is_some() && ![1usize, 2usize].contains(&new_addresses.unwrap().len()) { -// panic!("new_addresses must be of length 1, 2") -// } -// let client = Client::new(); -// let (root_indices, address_root_indices, json_payload) = -// match (compressed_accounts, new_addresses) { -// (Some(accounts), None) => { -// let (payload, payload_legacy, indices) = self -// .process_inclusion_proofs( -// &state_merkle_tree_pubkeys.unwrap(), -// &accounts, -// rpc, -// ) -// .await; -// if let Some(payload) = payload { -// (indices, Vec::new(), payload.to_string()) -// } else { -// (indices, Vec::new(), payload_legacy.unwrap().to_string()) -// } -// } -// (None, Some(addresses)) => { -// let (payload, payload_legacy, indices) = self -// .process_non_inclusion_proofs( -// address_merkle_tree_pubkeys.unwrap().as_slice(), -// addresses, -// rpc, -// ) -// .await; -// let payload_string = if let Some(payload) = payload { -// payload.to_string() -// } else { -// payload_legacy.unwrap().to_string() -// }; -// (Vec::::new(), indices, payload_string) -// } -// (Some(accounts), Some(addresses)) => { -// let (inclusion_payload, inclusion_payload_legacy, inclusion_indices) = self -// .process_inclusion_proofs( -// &state_merkle_tree_pubkeys.unwrap(), -// &accounts, -// rpc, -// ) -// .await; -// -// let ( -// non_inclusion_payload, -// non_inclusion_payload_legacy, -// non_inclusion_indices, -// ) = self -// .process_non_inclusion_proofs( -// address_merkle_tree_pubkeys.unwrap().as_slice(), -// addresses, -// rpc, -// ) -// .await; -// let json_payload = if let Some(non_inclusion_payload) = non_inclusion_payload { -// let public_input_hash = BigInt::from_bytes_be( -// num_bigint::Sign::Plus, -// &create_hash_chain_from_slice(&[ -// bigint_to_u8_32( -// &string_to_big_int( -// &inclusion_payload.as_ref().unwrap().public_input_hash, -// ) -// .unwrap(), -// ) -// .unwrap(), -// bigint_to_u8_32( -// &string_to_big_int(&non_inclusion_payload.public_input_hash) -// .unwrap(), -// ) -// .unwrap(), -// ]) -// .unwrap(), -// ); -// println!( -// "inclusion public input hash offchain {:?}", -// bigint_to_u8_32( -// &string_to_big_int( -// &inclusion_payload.as_ref().unwrap().public_input_hash, -// ) -// .unwrap(), -// ) -// .unwrap() -// ); -// println!( -// "non inclusion public input hash offchain {:?}", -// bigint_to_u8_32( -// &string_to_big_int(&non_inclusion_payload.public_input_hash) -// .unwrap() -// ) -// .unwrap() -// ); -// -// println!( -// "public input hash offchain {:?}", -// public_input_hash.to_bytes_be() -// ); -// -// CombinedJsonStruct { -// circuit_type: ProofType::Combined.to_string(), -// state_tree_height: DEFAULT_BATCH_STATE_TREE_HEIGHT, -// address_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, -// public_input_hash: big_int_to_string(&public_input_hash), -// inclusion: inclusion_payload.unwrap().inputs, -// non_inclusion: non_inclusion_payload.inputs, -// } -// .to_string() -// } else if let Some(non_inclusion_payload) = non_inclusion_payload_legacy { -// CombinedJsonStructLegacy { -// circuit_type: ProofType::Combined.to_string(), -// state_tree_height: 26, -// address_tree_height: 26, -// inclusion: inclusion_payload_legacy.unwrap().inputs, -// non_inclusion: non_inclusion_payload.inputs, -// } -// .to_string() -// } else { -// panic!("Unsupported tree height") -// }; -// (inclusion_indices, non_inclusion_indices, json_payload) -// } -// _ => { -// panic!("At least one of compressed_accounts or new_addresses must be provided") -// } -// }; -// -// println!("json_payload {:?}", json_payload); -// let mut retries = 3; -// while retries > 0 { -// let response_result = client -// .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) -// .header("Content-Type", "text/plain; charset=utf-8") -// .body(json_payload.clone()) -// .send() -// .await -// .expect("Failed to execute request."); -// println!("response_result {:?}", response_result); -// if response_result.status().is_success() { -// let body = response_result.text().await.unwrap(); -// println!("body {:?}", body); -// println!("root_indices {:?}", root_indices); -// println!("address_root_indices {:?}", address_root_indices); -// let proof_json = deserialize_gnark_proof_json(&body).unwrap(); -// let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); -// let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); -// let root_indices = root_indices.iter().map(|x| Some(*x)).collect(); -// return ProofRpcResult { -// root_indices, -// address_root_indices: address_root_indices.clone(), -// proof: CompressedProof { -// a: proof_a, -// b: proof_b, -// c: proof_c, -// }, -// }; -// } else { -// warn!("Error: {}", response_result.text().await.unwrap()); -// tokio::time::sleep(Duration::from_secs(1)).await; -// retries -= 1; -// } -// } -// panic!("Failed to get proof from server"); -// } -// -// async fn create_proof_for_compressed_accounts2( -// &mut self, -// compressed_accounts: Option>, -// state_merkle_tree_pubkeys: Option>, -// new_addresses: Option<&[[u8; 32]]>, -// address_merkle_tree_pubkeys: Option>, -// rpc: &mut R, -// ) -> BatchedTreeProofRpcResult { -// let mut indices_to_remove = Vec::new(); -// -// // for all accounts in batched trees, check whether values are in tree or queue -// let (compressed_accounts, state_merkle_tree_pubkeys) = -// if let Some((compressed_accounts, state_merkle_tree_pubkeys)) = -// compressed_accounts.zip(state_merkle_tree_pubkeys) -// { -// for (i, (compressed_account, state_merkle_tree_pubkey)) in compressed_accounts -// .iter() -// .zip(state_merkle_tree_pubkeys.iter()) -// .enumerate() -// { -// let accounts = self.state_merkle_trees.iter().find(|x| { -// x.accounts.merkle_tree == *state_merkle_tree_pubkey && x.version == 2 -// }); -// if let Some(accounts) = accounts { -// let output_queue_pubkey = accounts.accounts.nullifier_queue; -// let mut queue = -// AccountZeroCopy::::new(rpc, output_queue_pubkey) -// .await; -// let queue_zero_copy = BatchedQueueAccount::output_queue_from_bytes_mut( -// queue.account.data.as_mut_slice(), -// ) -// .unwrap(); -// for value_array in queue_zero_copy.value_vecs.iter() { -// let index = value_array.iter().position(|x| *x == *compressed_account); -// if index.is_some() { -// indices_to_remove.push(i); -// } -// } -// } -// } -// let compress_accounts = compressed_accounts -// .iter() -// .enumerate() -// .filter(|(i, _)| !indices_to_remove.contains(i)) -// .map(|(_, x)| *x) -// .collect::>(); -// let state_merkle_tree_pubkeys = state_merkle_tree_pubkeys -// .iter() -// .enumerate() -// .filter(|(i, _)| !indices_to_remove.contains(i)) -// .map(|(_, x)| *x) -// .collect::>(); -// if compress_accounts.is_empty() { -// (None, None) -// } else { -// (Some(compress_accounts), Some(state_merkle_tree_pubkeys)) -// } -// } else { -// (None, None) -// }; -// let rpc_result = if (compressed_accounts.is_some() -// && !compressed_accounts.as_ref().unwrap().is_empty()) -// || address_merkle_tree_pubkeys.is_some() -// { -// Some( -// self.create_proof_for_compressed_accounts( -// compressed_accounts, -// state_merkle_tree_pubkeys, -// new_addresses, -// address_merkle_tree_pubkeys, -// rpc, -// ) -// .await, -// ) -// } else { -// None -// }; -// let address_root_indices = if let Some(rpc_result) = rpc_result.as_ref() { -// rpc_result.address_root_indices.clone() -// } else { -// Vec::new() -// }; -// let root_indices = { -// let mut root_indices = if let Some(rpc_result) = rpc_result.as_ref() { -// rpc_result.root_indices.clone() -// } else { -// Vec::new() -// }; -// for index in indices_to_remove { -// root_indices.insert(index, None); -// } -// root_indices -// }; -// BatchedTreeProofRpcResult { -// proof: rpc_result.map(|x| x.proof), -// root_indices, -// address_root_indices, -// } -// } -// -// fn add_address_merkle_tree_accounts( -// &mut self, -// merkle_tree_keypair: &Keypair, -// queue_keypair: &Keypair, -// _owning_program_id: Option, -// ) -> AddressMerkleTreeAccounts { -// info!("Adding address merkle tree accounts..."); -// let address_merkle_tree_accounts = AddressMerkleTreeAccounts { -// merkle_tree: merkle_tree_keypair.pubkey(), -// queue: queue_keypair.pubkey(), -// }; -// self.address_merkle_trees -// .push(Self::add_address_merkle_tree_bundle( -// address_merkle_tree_accounts, -// )); -// info!( -// "Address merkle tree accounts added. Total: {}", -// self.address_merkle_trees.len() -// ); -// address_merkle_tree_accounts -// } -// -// /// returns compressed_accounts with the owner pubkey -// /// does not return token accounts. -// fn get_compressed_accounts_by_owner( -// &self, -// owner: &Pubkey, -// ) -> Vec { -// self.compressed_accounts -// .iter() -// .filter(|x| x.compressed_account.owner == *owner) -// .cloned() -// .collect() -// } -// -// fn get_compressed_token_accounts_by_owner(&self, owner: &Pubkey) -> Vec { -// self.token_compressed_accounts -// .iter() -// .filter(|x| x.token_data.owner == *owner) -// .cloned() -// .collect() -// } -// -// fn add_state_bundle(&mut self, state_bundle: StateMerkleTreeBundle) { -// self.get_state_merkle_trees_mut().push(state_bundle); -// } -// -// async fn update_test_indexer_after_append( -// &mut self, -// rpc: &mut R, -// merkle_tree_pubkey: Pubkey, -// output_queue_pubkey: Pubkey, -// num_inserted_zkps: u64, -// ) { -// let mut state_merkle_tree_bundle = self -// .state_merkle_trees -// .iter_mut() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) -// .unwrap(); -// -// let (merkle_tree_next_index, root) = { -// let mut merkle_tree_account = -// rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); -// let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( -// merkle_tree_account.data.as_mut_slice(), -// ) -// .unwrap(); -// ( -// merkle_tree.get_metadata().next_index as usize, -// *merkle_tree.root_history.last().unwrap(), -// ) -// }; -// -// let (max_num_zkp_updates, zkp_batch_size) = { -// let mut output_queue_account = -// rpc.get_account(output_queue_pubkey).await.unwrap().unwrap(); -// let output_queue = BatchedQueueAccount::output_queue_from_bytes_mut( -// output_queue_account.data.as_mut_slice(), -// ) -// .unwrap(); -// -// let output_queue_account = output_queue.get_metadata(); -// let max_num_zkp_updates = output_queue_account.batch_metadata.get_num_zkp_batches(); -// let zkp_batch_size = output_queue_account.batch_metadata.zkp_batch_size; -// (max_num_zkp_updates, zkp_batch_size) -// }; -// -// let leaves = state_merkle_tree_bundle.output_queue_elements.to_vec(); -// -// let start = (num_inserted_zkps as usize) * zkp_batch_size as usize; -// let end = start + zkp_batch_size as usize; -// let batch_update_leaves = leaves[start..end].to_vec(); -// -// for (i, _) in batch_update_leaves.iter().enumerate() { -// // if leaves[i] == [0u8; 32] { -// let index = merkle_tree_next_index + i - zkp_batch_size as usize; -// // This is dangerous it should call self.get_leaf_by_index() but it -// // can t for mutable borrow -// // TODO: call a get_leaf_by_index equivalent, we could move the method to the reference merkle tree -// let leaf = state_merkle_tree_bundle -// .merkle_tree -// .get_leaf(index) -// .unwrap(); -// if leaf == [0u8; 32] { -// state_merkle_tree_bundle -// .merkle_tree -// .update(&batch_update_leaves[i], index) -// .unwrap(); -// } -// } -// assert_eq!( -// root, -// state_merkle_tree_bundle.merkle_tree.root(), -// "update indexer after append root invalid" -// ); -// -// let num_inserted_zkps = num_inserted_zkps + 1; -// // check can we get rid of this and use the data from the merkle tree -// if num_inserted_zkps == max_num_zkp_updates { -// for _ in 0..zkp_batch_size * max_num_zkp_updates { -// state_merkle_tree_bundle.output_queue_elements.remove(0); -// } -// } -// } -// -// async fn update_test_indexer_after_nullification( -// &mut self, -// rpc: &mut R, -// merkle_tree_pubkey: Pubkey, -// batch_index: usize, -// ) { -// let state_merkle_tree_bundle = self -// .state_merkle_trees -// .iter_mut() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) -// .unwrap(); -// -// let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); -// let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( -// merkle_tree_account.data.as_mut_slice(), -// ) -// .unwrap(); -// -// let batch = &merkle_tree.batches[batch_index]; -// if batch.get_state() == BatchState::Inserted || batch.get_state() == BatchState::Full { -// let batch_size = batch.zkp_batch_size; -// let leaf_indices_tx_hashes = -// state_merkle_tree_bundle.input_leaf_indices[..batch_size as usize].to_vec(); -// for (index, leaf, tx_hash) in leaf_indices_tx_hashes.iter() { -// let index = *index as usize; -// let leaf = *leaf; -// let index_bytes = index.to_be_bytes(); -// -// let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, tx_hash]).unwrap(); -// -// state_merkle_tree_bundle.input_leaf_indices.remove(0); -// state_merkle_tree_bundle -// .merkle_tree -// .update(&nullifier, index) -// .unwrap(); -// } -// } -// } -// -// async fn finalize_batched_address_tree_update( -// &mut self, -// rpc: &mut R, -// merkle_tree_pubkey: Pubkey, -// ) { -// let mut account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); -// let onchain_account = -// BatchedMerkleTreeAccount::address_tree_from_bytes_mut(account.data.as_mut_slice()) -// .unwrap(); -// let address_tree = self -// .address_merkle_trees -// .iter_mut() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) -// .unwrap(); -// let address_tree_index = address_tree.merkle_tree.merkle_tree.rightmost_index; -// let onchain_next_index = onchain_account.get_metadata().next_index; -// let diff_onchain_indexer = onchain_next_index - address_tree_index as u64; -// let addresses = address_tree.queue_elements[0..diff_onchain_indexer as usize].to_vec(); -// -// for _ in 0..diff_onchain_indexer { -// address_tree.queue_elements.remove(0); -// } -// for new_element_value in &addresses { -// address_tree -// .merkle_tree -// .append( -// &BigUint::from_bytes_be(new_element_value), -// &mut address_tree.indexed_array, -// ) -// .unwrap(); -// } -// -// let onchain_root = onchain_account.root_history.last().unwrap(); -// let new_root = address_tree.merkle_tree.root(); -// assert_eq!(*onchain_root, new_root); -// println!("finalized batched address tree update"); -// } -// } -// -// impl TestIndexer { -// async fn _get_multiple_new_address_proofs( -// &self, -// merkle_tree_pubkey: [u8; 32], -// addresses: Vec<[u8; 32]>, -// full: bool, -// ) -> Result>, IndexerError> { -// let mut proofs: Vec> = Vec::new(); -// -// for address in addresses.iter() { -// info!("Getting new address proof for {:?}", address); -// let pubkey = Pubkey::from(merkle_tree_pubkey); -// let address_tree_bundle = self -// .address_merkle_trees -// .iter() -// .find(|x| x.accounts.merkle_tree == pubkey) -// .unwrap(); -// -// let address_biguint = BigUint::from_bytes_be(address.as_slice()); -// let (old_low_address, _old_low_address_next_value) = address_tree_bundle -// .indexed_array -// .find_low_element_for_nonexistent(&address_biguint) -// .unwrap(); -// let address_bundle = address_tree_bundle -// .indexed_array -// .new_element_with_low_element_index(old_low_address.index, &address_biguint) -// .unwrap(); -// -// let (old_low_address, old_low_address_next_value) = address_tree_bundle -// .indexed_array -// .find_low_element_for_nonexistent(&address_biguint) -// .unwrap(); -// -// // Get the Merkle proof for updating low element. -// let low_address_proof = address_tree_bundle -// .merkle_tree -// .get_proof_of_leaf(old_low_address.index, full) -// .unwrap(); -// -// let low_address_index: u64 = old_low_address.index as u64; -// let low_address_value: [u8; 32] = -// bigint_to_be_bytes_array(&old_low_address.value).unwrap(); -// let low_address_next_index: u64 = old_low_address.next_index as u64; -// let low_address_next_value: [u8; 32] = -// bigint_to_be_bytes_array(&old_low_address_next_value).unwrap(); -// let low_address_proof: [[u8; 32]; NET_HEIGHT] = low_address_proof.to_array().unwrap(); -// let proof = NewAddressProofWithContext:: { -// merkle_tree: merkle_tree_pubkey, -// low_address_index, -// low_address_value, -// low_address_next_index, -// low_address_next_value, -// low_address_proof, -// root: address_tree_bundle.merkle_tree.root(), -// root_seq: address_tree_bundle.merkle_tree.merkle_tree.sequence_number as u64, -// new_low_element: Some(address_bundle.new_low_element), -// new_element: Some(address_bundle.new_element), -// new_element_next_value: Some(address_bundle.new_element_next_value), -// }; -// proofs.push(proof); -// } -// Ok(proofs) -// } -// -// fn count_matching_hashes(&self, query_hashes: &[String]) -> usize { -// self.nullified_compressed_accounts -// .iter() -// .map(|account| self.compute_hash(account)) -// .filter(|bs58_hash| query_hashes.contains(bs58_hash)) -// .count() -// } -// -// fn compute_hash(&self, account: &CompressedAccountWithMerkleContext) -> String { -// // replace AccountType with actual type -// let hash = account -// .compressed_account -// .hash::( -// &account.merkle_context.merkle_tree_pubkey, -// &account.merkle_context.leaf_index, -// ) -// .unwrap(); -// bs58::encode(hash).into_string() -// } -// -// pub async fn init_from_env( -// payer: &Keypair, -// env: &EnvAccounts, -// prover_config: Option, -// ) -> Self { -// Self::new( -// vec![ -// StateMerkleTreeAccounts { -// merkle_tree: env.merkle_tree_pubkey, -// nullifier_queue: env.nullifier_queue_pubkey, -// cpi_context: env.cpi_context_account_pubkey, -// }, -// StateMerkleTreeAccounts { -// merkle_tree: env.batched_state_merkle_tree, -// nullifier_queue: env.batched_output_queue, -// cpi_context: env.batched_cpi_context, -// }, -// ], -// vec![ -// AddressMerkleTreeAccounts { -// merkle_tree: env.address_merkle_tree_pubkey, -// queue: env.address_merkle_tree_queue_pubkey, -// }, -// AddressMerkleTreeAccounts { -// merkle_tree: env.batch_address_merkle_tree, -// queue: env.batch_address_merkle_tree, -// }, -// ], -// payer.insecure_clone(), -// env.group_pda, -// prover_config, -// ) -// .await -// } -// -// pub async fn new( -// state_merkle_tree_accounts: Vec, -// address_merkle_tree_accounts: Vec, -// payer: Keypair, -// group_pda: Pubkey, -// prover_config: Option, -// ) -> Self { -// if let Some(ref prover_config) = prover_config { -// // TODO: remove restart input and check whether prover is already -// // running with correct config -// spawn_prover(true, prover_config.clone()).await; -// } -// let mut state_merkle_trees = Vec::new(); -// for state_merkle_tree_account in state_merkle_tree_accounts.iter() { -// let test_batched_output_queue = -// Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap(); -// let (version, merkle_tree) = if state_merkle_tree_account.nullifier_queue -// == test_batched_output_queue.pubkey() -// { -// let merkle_tree = Box::new(MerkleTree::::new( -// DEFAULT_BATCH_STATE_TREE_HEIGHT as usize, -// 0, -// )); -// (2, merkle_tree) -// } else { -// let merkle_tree = Box::new(MerkleTree::::new( -// STATE_MERKLE_TREE_HEIGHT as usize, -// STATE_MERKLE_TREE_CANOPY_DEPTH as usize, -// )); -// (1, merkle_tree) -// }; -// -// state_merkle_trees.push(StateMerkleTreeBundle { -// accounts: *state_merkle_tree_account, -// merkle_tree, -// rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64, -// version, -// output_queue_elements: vec![], -// input_leaf_indices: vec![], -// }); -// } -// -// let mut address_merkle_trees = Vec::new(); -// for address_merkle_tree_account in address_merkle_tree_accounts { -// address_merkle_trees.push(Self::add_address_merkle_tree_bundle( -// address_merkle_tree_account, -// )); -// } -// -// Self { -// state_merkle_trees, -// address_merkle_trees, -// payer, -// compressed_accounts: vec![], -// nullified_compressed_accounts: vec![], -// events: vec![], -// token_compressed_accounts: vec![], -// token_nullified_compressed_accounts: vec![], -// prover_config, -// phantom: Default::default(), -// group_pda, -// } -// } -// -// pub fn add_address_merkle_tree_bundle( -// address_merkle_tree_accounts: AddressMerkleTreeAccounts, -// // TODO: add config here -// ) -> AddressMerkleTreeBundle { -// let (height, canopy) = -// if address_merkle_tree_accounts.merkle_tree == address_merkle_tree_accounts.queue { -// (40, 0) -// } else { -// (26, STATE_MERKLE_TREE_CANOPY_DEPTH as usize) -// }; -// let mut merkle_tree = -// Box::new(IndexedMerkleTree::::new(height, canopy).unwrap()); -// merkle_tree.init().unwrap(); -// let mut indexed_array = Box::>::default(); -// indexed_array.init().unwrap(); -// AddressMerkleTreeBundle { -// merkle_tree, -// indexed_array, -// accounts: address_merkle_tree_accounts, -// rollover_fee: FeeConfig::default().address_queue_rollover as i64, -// queue_elements: vec![], -// } -// } -// -// async fn add_address_merkle_tree_v1( -// &mut self, -// rpc: &mut R, -// merkle_tree_keypair: &Keypair, -// queue_keypair: &Keypair, -// owning_program_id: Option, -// ) -> AddressMerkleTreeAccounts { -// create_address_merkle_tree_and_queue_account_with_assert( -// &self.payer, -// true, -// rpc, -// merkle_tree_keypair, -// queue_keypair, -// owning_program_id, -// None, -// &AddressMerkleTreeConfig::default(), -// &AddressQueueConfig::default(), -// 0, -// ) -// .await -// .unwrap(); -// self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) -// } -// -// async fn add_address_merkle_tree_v2( -// &mut self, -// rpc: &mut R, -// merkle_tree_keypair: &Keypair, -// queue_keypair: &Keypair, -// owning_program_id: Option, -// ) -> AddressMerkleTreeAccounts { -// info!( -// "Adding address merkle tree accounts v2 {:?}", -// merkle_tree_keypair.pubkey() -// ); -// -// let params = InitAddressTreeAccountsInstructionData::test_default(); -// -// info!( -// "Creating batched address merkle tree {:?}", -// merkle_tree_keypair.pubkey() -// ); -// create_batch_address_merkle_tree(rpc, &self.payer, merkle_tree_keypair, params) -// .await -// .unwrap(); -// info!( -// "Batched address merkle tree created {:?}", -// merkle_tree_keypair.pubkey() -// ); -// -// self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) -// } -// -// pub async fn add_address_merkle_tree( -// &mut self, -// rpc: &mut R, -// merkle_tree_keypair: &Keypair, -// queue_keypair: &Keypair, -// owning_program_id: Option, -// version: u64, -// ) -> AddressMerkleTreeAccounts { -// if version == 1 { -// self.add_address_merkle_tree_v1( -// rpc, -// merkle_tree_keypair, -// queue_keypair, -// owning_program_id, -// ) -// .await -// } else if version == 2 { -// self.add_address_merkle_tree_v2( -// rpc, -// merkle_tree_keypair, -// queue_keypair, -// owning_program_id, -// ) -// .await -// } else { -// panic!( -// "add_address_merkle_tree: Version not supported, {}. Versions: 1, 2", -// version -// ) -// } -// } -// -// #[allow(clippy::too_many_arguments)] -// pub async fn add_state_merkle_tree( -// &mut self, -// rpc: &mut R, -// merkle_tree_keypair: &Keypair, -// queue_keypair: &Keypair, -// cpi_context_keypair: &Keypair, -// owning_program_id: Option, -// forester: Option, -// version: u64, -// ) { -// let (rollover_fee, merkle_tree) = match version { -// 1 => { -// create_state_merkle_tree_and_queue_account( -// &self.payer, -// true, -// rpc, -// merkle_tree_keypair, -// queue_keypair, -// Some(cpi_context_keypair), -// owning_program_id, -// forester, -// self.state_merkle_trees.len() as u64, -// &StateMerkleTreeConfig::default(), -// &NullifierQueueConfig::default(), -// ) -// .await -// .unwrap(); -// let merkle_tree = Box::new(MerkleTree::::new( -// STATE_MERKLE_TREE_HEIGHT as usize, -// STATE_MERKLE_TREE_CANOPY_DEPTH as usize, -// )); -// (FeeConfig::default().state_merkle_tree_rollover as i64,merkle_tree) -// } -// 2 => { -// let params = InitStateTreeAccountsInstructionData::test_default(); -// -// create_batched_state_merkle_tree( -// &self.payer, -// true, -// rpc, -// merkle_tree_keypair, -// queue_keypair, -// cpi_context_keypair, -// params, -// ).await; -// let merkle_tree = Box::new(MerkleTree::::new( -// DEFAULT_BATCH_STATE_TREE_HEIGHT as usize, -// 0 -// )); -// (FeeConfig::test_batched().state_merkle_tree_rollover as i64,merkle_tree) -// } -// _ => panic!( -// "add_state_merkle_tree: Version not supported, {}. Versions: 1 concurrent, 2 batched", -// version -// ), -// }; -// let state_merkle_tree_account = StateMerkleTreeAccounts { -// merkle_tree: merkle_tree_keypair.pubkey(), -// nullifier_queue: queue_keypair.pubkey(), -// cpi_context: cpi_context_keypair.pubkey(), -// }; -// -// self.state_merkle_trees.push(StateMerkleTreeBundle { -// merkle_tree, -// accounts: state_merkle_tree_account, -// rollover_fee, -// version, -// output_queue_elements: vec![], -// input_leaf_indices: vec![], -// }); -// } -// -// async fn process_inclusion_proofs( -// &self, -// merkle_tree_pubkeys: &[Pubkey], -// accounts: &[[u8; 32]], -// rpc: &mut R, -// ) -> ( -// Option, -// Option, -// Vec, -// ) { -// let mut inclusion_proofs = Vec::new(); -// let mut root_indices = Vec::new(); -// let mut height = 0; -// -// // Collect all proofs first before any await points -// let proof_data: Vec<_> = accounts -// .iter() -// .zip(merkle_tree_pubkeys.iter()) -// .map(|(account, &pubkey)| { -// let bundle = &self -// .state_merkle_trees -// .iter() -// .find(|x| x.accounts.merkle_tree == pubkey) -// .unwrap(); -// let merkle_tree = &bundle.merkle_tree; -// let leaf_index = merkle_tree.get_leaf_index(account).unwrap(); -// let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap(); -// -// // Convert proof to owned data that implements Send -// let proof: Vec = proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(); -// -// if height == 0 { -// height = merkle_tree.height; -// } else { -// assert_eq!(height, merkle_tree.height); -// } -// -// ( -// bundle.version, -// pubkey, -// leaf_index, -// proof, -// merkle_tree.root(), -// ) -// }) -// .collect(); -// -// // Now handle the async operations with the collected data -// for (i, (version, pubkey, leaf_index, proof, merkle_root)) in -// proof_data.into_iter().enumerate() -// { -// inclusion_proofs.push(InclusionMerkleProofInputs { -// root: BigInt::from_be_bytes(merkle_root.as_slice()), -// leaf: BigInt::from_be_bytes(&accounts[i]), -// path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()), -// path_elements: proof, -// }); -// -// let (root_index, root) = if version == 1 { -// let fetched_merkle_tree = unsafe { -// get_concurrent_merkle_tree::( -// rpc, pubkey, -// ) -// .await -// }; -// ( -// fetched_merkle_tree.root_index() as u32, -// fetched_merkle_tree.root(), -// ) -// } else { -// let mut merkle_tree_account = rpc.get_account(pubkey).await.unwrap().unwrap(); -// let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( -// merkle_tree_account.data.as_mut_slice(), -// ) -// .unwrap(); -// ( -// merkle_tree.get_root_index(), -// merkle_tree.get_root().unwrap(), -// ) -// }; -// -// assert_eq!(merkle_root, root, "Merkle tree root mismatch"); -// root_indices.push(root_index as u16); -// } -// -// let (batch_inclusion_proof_inputs, legacy) = if height -// == DEFAULT_BATCH_STATE_TREE_HEIGHT as usize -// { -// let inclusion_proof_inputs = -// InclusionProofInputs::new(inclusion_proofs.as_slice()).unwrap(); -// ( -// Some(BatchInclusionJsonStruct::from_inclusion_proof_inputs( -// &inclusion_proof_inputs, -// )), -// None, -// ) -// } else if height == STATE_MERKLE_TREE_HEIGHT as usize { -// let inclusion_proof_inputs = InclusionProofInputsLegacy(inclusion_proofs.as_slice()); -// ( -// None, -// Some(BatchInclusionJsonStructLegacy::from_inclusion_proof_inputs( -// &inclusion_proof_inputs, -// )), -// ) -// } else { -// panic!("Unsupported tree height") -// }; -// -// (batch_inclusion_proof_inputs, legacy, root_indices) -// } -// -// pub async fn process_non_inclusion_proofs( -// &self, -// address_merkle_tree_pubkeys: &[Pubkey], -// addresses: &[[u8; 32]], -// rpc: &mut R, -// ) -> ( -// Option, -// Option, -// Vec, -// ) { -// let mut non_inclusion_proofs = Vec::new(); -// let mut address_root_indices = Vec::new(); -// let mut tree_heights = Vec::new(); -// for tree in self.address_merkle_trees.iter() { -// println!("height {:?}", tree.merkle_tree.merkle_tree.height); -// println!("accounts {:?}", tree.accounts); -// } -// println!("process_non_inclusion_proofs: addresses {:?}", addresses); -// for (i, address) in addresses.iter().enumerate() { -// let address_tree = &self -// .address_merkle_trees -// .iter() -// .find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i]) -// .unwrap(); -// tree_heights.push(address_tree.merkle_tree.merkle_tree.height); -// -// let proof_inputs = get_non_inclusion_proof_inputs( -// address, -// &address_tree.merkle_tree, -// &address_tree.indexed_array, -// ); -// non_inclusion_proofs.push(proof_inputs); -// -// // We don't have address queues in v2 (batch) address Merkle trees -// // hence both accounts in this struct are the same. -// let is_v2 = address_tree.accounts.merkle_tree == address_tree.accounts.queue; -// println!("is v2 {:?}", is_v2); -// println!( -// "address_merkle_tree_pubkeys[i] {:?}", -// address_merkle_tree_pubkeys[i] -// ); -// println!("address_tree.accounts {:?}", address_tree.accounts); -// if is_v2 { -// let account = rpc -// .get_account(address_merkle_tree_pubkeys[i]) -// .await -// .unwrap(); -// if let Some(mut account) = account { -// let account = BatchedMerkleTreeAccount::address_tree_from_bytes_mut( -// account.data.as_mut_slice(), -// ) -// .unwrap(); -// address_root_indices.push(account.get_root_index() as u16); -// } else { -// panic!( -// "TestIndexer.process_non_inclusion_proofs(): Address tree account not found." -// ); -// } -// } else { -// let fetched_address_merkle_tree = unsafe { -// get_indexed_merkle_tree::( -// rpc, -// address_merkle_tree_pubkeys[i], -// ) -// .await -// }; -// address_root_indices.push(fetched_address_merkle_tree.root_index() as u16); -// } -// } -// // if tree heights are not the same, panic -// if tree_heights.iter().any(|&x| x != tree_heights[0]) { -// panic!( -// "All address merkle trees must have the same height {:?}", -// tree_heights -// ); -// } -// let (batch_non_inclusion_proof_inputs, batch_non_inclusion_proof_inputs_legacy) = -// if tree_heights[0] == 26 { -// let non_inclusion_proof_inputs = -// NonInclusionProofInputsLegacy::new(non_inclusion_proofs.as_slice()); -// ( -// None, -// Some( -// BatchNonInclusionJsonStructLegacy::from_non_inclusion_proof_inputs( -// &non_inclusion_proof_inputs, -// ), -// ), -// ) -// } else if tree_heights[0] == 40 { -// let non_inclusion_proof_inputs = -// NonInclusionProofInputs::new(non_inclusion_proofs.as_slice()).unwrap(); -// ( -// Some( -// BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs( -// &non_inclusion_proof_inputs, -// ), -// ), -// None, -// ) -// } else { -// panic!("Unsupported tree height") -// }; -// ( -// batch_non_inclusion_proof_inputs, -// batch_non_inclusion_proof_inputs_legacy, -// address_root_indices, -// ) -// } -// -// /// deserializes an event -// /// adds the output_compressed_accounts to the compressed_accounts -// /// removes the input_compressed_accounts from the compressed_accounts -// /// adds the input_compressed_accounts to the nullified_compressed_accounts -// pub fn add_lamport_compressed_accounts(&mut self, slot: u64, event_bytes: Vec) { -// let event_bytes = event_bytes.clone(); -// let event = PublicTransactionEvent::deserialize(&mut event_bytes.as_slice()).unwrap(); -// self.add_event_and_compressed_accounts(slot, &event); -// } -// -// /// deserializes an event -// /// adds the output_compressed_accounts to the compressed_accounts -// /// removes the input_compressed_accounts from the compressed_accounts -// /// adds the input_compressed_accounts to the nullified_compressed_accounts -// /// deserialiazes token data from the output_compressed_accounts -// /// adds the token_compressed_accounts to the token_compressed_accounts -// pub fn add_compressed_accounts_with_token_data( -// &mut self, -// slot: u64, -// event: &PublicTransactionEvent, -// ) { -// self.add_event_and_compressed_accounts(slot, event); -// } -// -// /// returns the compressed sol balance of the owner pubkey -// pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 { -// self.compressed_accounts -// .iter() -// .filter(|x| x.compressed_account.owner == *owner) -// .map(|x| x.compressed_account.lamports) -// .sum() -// } -// -// /// returns the compressed token balance of the owner pubkey for a token by mint -// pub fn get_compressed_token_balance(&self, owner: &Pubkey, mint: &Pubkey) -> u64 { -// self.token_compressed_accounts -// .iter() -// .filter(|x| { -// x.compressed_account.compressed_account.owner == *owner -// && x.token_data.mint == *mint -// }) -// .map(|x| x.token_data.amount) -// .sum() -// } -// -// fn process_v1_compressed_account( -// &mut self, -// slot: u64, -// event: &PublicTransactionEvent, -// i: usize, -// token_compressed_accounts: &mut Vec, -// compressed_accounts: &mut Vec, -// ) { -// let mut input_addresses = vec![]; -// if event.input_compressed_account_hashes.len() > i { -// let tx_hash: [u8; 32] = create_tx_hash( -// &event.input_compressed_account_hashes, -// &event.output_compressed_account_hashes, -// slot, -// ) -// .unwrap(); -// println!("tx_hash {:?}", tx_hash); -// println!("slot {:?}", slot); -// let hash = event.input_compressed_account_hashes[i]; -// let index = self.compressed_accounts.iter().position(|x| { -// x.compressed_account -// .hash::( -// &x.merkle_context.merkle_tree_pubkey, -// &x.merkle_context.leaf_index, -// ) -// .unwrap() -// == hash -// }); -// let (leaf_index, merkle_tree_pubkey) = if let Some(index) = index { -// self.nullified_compressed_accounts -// .push(self.compressed_accounts[index].clone()); -// let leaf_index = self.compressed_accounts[index].merkle_context.leaf_index; -// let merkle_tree_pubkey = self.compressed_accounts[index] -// .merkle_context -// .merkle_tree_pubkey; -// if let Some(address) = self.compressed_accounts[index].compressed_account.address { -// input_addresses.push(address); -// } -// self.compressed_accounts.remove(index); -// (leaf_index, merkle_tree_pubkey) -// } else { -// let index = self -// .token_compressed_accounts -// .iter() -// .position(|x| { -// x.compressed_account -// .compressed_account -// .hash::( -// &x.compressed_account.merkle_context.merkle_tree_pubkey, -// &x.compressed_account.merkle_context.leaf_index, -// ) -// .unwrap() -// == hash -// }) -// .expect("input compressed account not found"); -// self.token_nullified_compressed_accounts -// .push(self.token_compressed_accounts[index].clone()); -// let leaf_index = self.token_compressed_accounts[index] -// .compressed_account -// .merkle_context -// .leaf_index; -// let merkle_tree_pubkey = self.token_compressed_accounts[index] -// .compressed_account -// .merkle_context -// .merkle_tree_pubkey; -// self.token_compressed_accounts.remove(index); -// (leaf_index, merkle_tree_pubkey) -// }; -// let bundle = &mut self -// .get_state_merkle_trees_mut() -// .iter_mut() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) -// .unwrap(); -// // Store leaf indices of input accounts for batched trees -// if bundle.version == 2 { -// let leaf_hash = event.input_compressed_account_hashes[i]; -// bundle -// .input_leaf_indices -// .push((leaf_index, leaf_hash, tx_hash)); -// } -// } -// let mut new_addresses = vec![]; -// if event.output_compressed_accounts.len() > i { -// let compressed_account = &event.output_compressed_accounts[i]; -// println!("output compressed account {:?}", compressed_account); -// if let Some(address) = compressed_account.compressed_account.address { -// if !input_addresses.iter().any(|x| x == &address) { -// new_addresses.push(address); -// } -// } -// -// let merkle_tree = self.state_merkle_trees.iter().find(|x| { -// x.accounts.merkle_tree -// == event.pubkey_array -// [event.output_compressed_accounts[i].merkle_tree_index as usize] -// }); -// // Check for output queue -// let merkle_tree = if let Some(merkle_tree) = merkle_tree { -// merkle_tree -// } else { -// self.state_merkle_trees -// .iter() -// .find(|x| { -// x.accounts.nullifier_queue -// == event.pubkey_array -// [event.output_compressed_accounts[i].merkle_tree_index as usize] -// }) -// .unwrap() -// }; -// println!("found merkle tree {:?}", merkle_tree.accounts.merkle_tree); -// let nullifier_queue_pubkey = merkle_tree.accounts.nullifier_queue; -// let merkle_tree_pubkey = merkle_tree.accounts.merkle_tree; -// // if data is some, try to deserialize token data, if it fails, add to compressed_accounts -// // if data is none add to compressed_accounts -// // new accounts are inserted in front so that the newest accounts are found first -// match compressed_account.compressed_account.data.as_ref() { -// Some(data) => { -// if compressed_account.compressed_account.owner == light_compressed_token::ID -// && data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR -// { -// if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { -// let token_account = TokenDataWithContext { -// token_data, -// compressed_account: CompressedAccountWithMerkleContext { -// compressed_account: compressed_account -// .compressed_account -// .clone(), -// merkle_context: MerkleContext { -// leaf_index: event.output_leaf_indices[i], -// merkle_tree_pubkey, -// nullifier_queue_pubkey, -// queue_index: None, -// }, -// }, -// }; -// token_compressed_accounts.push(token_account.clone()); -// self.token_compressed_accounts.insert(0, token_account); -// } -// } else { -// let compressed_account = CompressedAccountWithMerkleContext { -// compressed_account: compressed_account.compressed_account.clone(), -// merkle_context: MerkleContext { -// leaf_index: event.output_leaf_indices[i], -// merkle_tree_pubkey, -// nullifier_queue_pubkey, -// queue_index: None, -// }, -// }; -// compressed_accounts.push(compressed_account.clone()); -// self.compressed_accounts.insert(0, compressed_account); -// } -// } -// None => { -// let compressed_account = CompressedAccountWithMerkleContext { -// compressed_account: compressed_account.compressed_account.clone(), -// merkle_context: MerkleContext { -// leaf_index: event.output_leaf_indices[i], -// merkle_tree_pubkey, -// nullifier_queue_pubkey, -// queue_index: None, -// }, -// }; -// compressed_accounts.push(compressed_account.clone()); -// self.compressed_accounts.insert(0, compressed_account); -// } -// }; -// let seq = event -// .sequence_numbers -// .iter() -// .find(|x| x.pubkey == merkle_tree_pubkey); -// let seq = if let Some(seq) = seq { -// seq -// } else { -// event -// .sequence_numbers -// .iter() -// .find(|x| x.pubkey == nullifier_queue_pubkey) -// .unwrap() -// }; -// let is_batched = seq.seq == u64::MAX; -// -// println!("Output is batched {:?}", is_batched); -// if !is_batched { -// let merkle_tree = &mut self -// .state_merkle_trees -// .iter_mut() -// .find(|x| { -// x.accounts.merkle_tree -// == event.pubkey_array -// [event.output_compressed_accounts[i].merkle_tree_index as usize] -// }) -// .unwrap(); -// merkle_tree -// .merkle_tree -// .append( -// &compressed_account -// .compressed_account -// .hash::( -// &event.pubkey_array[event.output_compressed_accounts[i] -// .merkle_tree_index -// as usize], -// &event.output_leaf_indices[i], -// ) -// .unwrap(), -// ) -// .expect("insert failed"); -// } else { -// let merkle_tree = &mut self -// .state_merkle_trees -// .iter_mut() -// .find(|x| { -// x.accounts.nullifier_queue -// == event.pubkey_array -// [event.output_compressed_accounts[i].merkle_tree_index as usize] -// }) -// .unwrap(); -// -// merkle_tree -// .output_queue_elements -// .push(event.output_compressed_account_hashes[i]); -// } -// } -// println!("new addresses {:?}", new_addresses); -// println!("event.pubkey_array {:?}", event.pubkey_array); -// println!( -// "address merkle trees {:?}", -// self.address_merkle_trees -// .iter() -// .map(|x| x.accounts.merkle_tree) -// .collect::>() -// ); -// // checks whether there are addresses in outputs which don't exist in inputs. -// // if so check pubkey_array for the first address Merkle tree and append to the bundles queue elements. -// // Note: -// // - creating addresses in multiple address Merkle trees in one tx is not supported -// // TODO: reimplement this is not a good solution -// // - take addresses and address Merkle tree pubkeys from cpi to account compression program -// if !new_addresses.is_empty() { -// for pubkey in event.pubkey_array.iter() { -// if let Some((_, address_merkle_tree)) = self -// .address_merkle_trees -// .iter_mut() -// .enumerate() -// .find(|(i, x)| x.accounts.merkle_tree == *pubkey) -// { -// address_merkle_tree -// .queue_elements -// .append(&mut new_addresses); -// } -// } -// } -// } -// -// pub(crate) fn get_address_merkle_tree( -// &self, -// merkle_tree_pubkey: Pubkey, -// ) -> Option<&AddressMerkleTreeBundle> { -// self.address_merkle_trees -// .iter() -// .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) -// } -// } diff --git a/program-tests/utils/src/lib.rs b/program-tests/utils/src/lib.rs index efe4f6028..6cfc74829 100644 --- a/program-tests/utils/src/lib.rs +++ b/program-tests/utils/src/lib.rs @@ -24,7 +24,6 @@ pub mod conversions; pub mod create_address_test_program_sdk; pub mod e2e_test_env; #[allow(unused)] -pub mod indexer; pub mod spl; pub mod state_tree_rollover; pub mod system_program; diff --git a/sdk-libs/sdk/Cargo.toml b/sdk-libs/sdk/Cargo.toml index 93a48b9fa..70ae8d830 100644 --- a/sdk-libs/sdk/Cargo.toml +++ b/sdk-libs/sdk/Cargo.toml @@ -24,45 +24,24 @@ idl-build = ["anchor-lang/idl-build"] legacy = ["account-compression", "light-system-program"] [dependencies] -# Solana -solana-program = { workspace = true } -# Anchor +solana-program = { workspace = true } anchor-lang = { workspace = true } - -# Math and crypto num-bigint = { workspace = true } +borsh = "0.10.0" -aligned-sized = { workspace = true } light-macros = { workspace = true } light-sdk-macros = { workspace = true } -bytemuck = "1.17" light-hasher = { workspace = true, features=["solana"] } light-heap = { workspace = true, optional = true } light-indexed-merkle-tree = { workspace = true } account-compression = { workspace = true , optional = true } light-system-program = { workspace = true , optional = true } -light-concurrent-merkle-tree = { workspace = true } light-utils = { workspace = true } -groth16-solana = "0.0.3" -light-verifier = { workspace = true, features = ["solana"] } -borsh = "0.10.0" [target.'cfg(not(target_os = "solana"))'.dependencies] solana-sdk = { workspace = true } [dev-dependencies] -solana-banks-interface = { workspace = true } -solana-cli-output = { workspace = true } -solana-program-test = { workspace = true } -serde_json = "1.0.133" -reqwest = "0.12" -tokio = { workspace = true } -light-prover-client = { workspace = true } -light-merkle-tree-reference = { workspace = true } light-indexed-merkle-tree = { workspace = true } num-bigint = "0.4.6" -num-traits = "0.2.19" -lazy_static = "1.4.0" -light-hash-set = { workspace = true, features = ["solana"] } -rand = "0.8.5" From 36aad15623a5f3b9039c2fd10fc8f2be3f145754 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 19:29:13 +0000 Subject: [PATCH 20/27] format --- .../programs/token-escrow/tests/test.rs | 57 +++++++--- .../token-escrow/tests/test_compressed_pda.rs | 100 ++++++++++-------- forester/src/batch_processor/common.rs | 6 +- forester/tests/batched_address_test.rs | 14 ++- forester/tests/batched_state_test.rs | 8 +- forester/tests/e2e_test.rs | 9 +- forester/tests/test_utils.rs | 33 +++--- sdk-libs/client/src/rpc/solana_rpc.rs | 5 +- 8 files changed, 136 insertions(+), 96 deletions(-) diff --git a/examples/token-escrow/programs/token-escrow/tests/test.rs b/examples/token-escrow/programs/token-escrow/tests/test.rs index b6caecc03..7ff502fa2 100644 --- a/examples/token-escrow/programs/token-escrow/tests/test.rs +++ b/examples/token-escrow/programs/token-escrow/tests/test.rs @@ -10,12 +10,20 @@ // - create escrow pda and just prove that utxo exists -> read utxo from compressed token account // release compressed tokens +use light_client::indexer::Indexer; use light_hasher::Poseidon; -use light_program_test::test_env::{setup_test_programs_with_accounts, EnvAccounts}; +use light_program_test::{ + indexer::{TestIndexer, TestIndexerExtensions}, + test_env::{setup_test_programs_with_accounts, EnvAccounts}, +}; use light_prover_client::gnark::helpers::{ProofType, ProverConfig}; use light_system_program::sdk::{compressed_account::MerkleContext, event::PublicTransactionEvent}; use light_test_utils::{ airdrop_lamports, assert_rpc_error, + conversions::{ + program_to_sdk_public_transaction_event, sdk_to_program_compressed_account, + sdk_to_program_compressed_proof, sdk_to_program_token_data, + }, spl::{create_mint_helper, mint_tokens_helper}, FeeConfig, RpcConnection, RpcError, TransactionParams, }; @@ -24,9 +32,6 @@ use solana_sdk::{ instruction::Instruction, pubkey::Pubkey, signature::Keypair, signer::Signer, transaction::Transaction, }; -use light_client::indexer::Indexer; -use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; -use light_test_utils::conversions::{program_to_sdk_public_transaction_event, sdk_to_program_compressed_account, sdk_to_program_compressed_proof, sdk_to_program_token_data}; use token_escrow::{ escrow_with_compressed_pda::sdk::get_token_owner_pda, escrow_with_pda::sdk::{ @@ -283,7 +288,10 @@ pub async fn perform_escrow + TestIndexerExtensi create_escrow_instruction(create_ix_inputs, *escrow_amount) } -pub async fn perform_escrow_with_event + TestIndexerExtensions>( +pub async fn perform_escrow_with_event< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, env: &EnvAccounts, @@ -313,7 +321,10 @@ pub async fn perform_escrow_with_event + TestInd .await? .unwrap(); let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_compressed_accounts_with_token_data(slot, &program_to_sdk_public_transaction_event(event.0)); + test_indexer.add_compressed_accounts_with_token_data( + slot, + &program_to_sdk_public_transaction_event(event.0), + ); Ok(()) } @@ -336,7 +347,7 @@ pub async fn perform_escrow_failing + TestIndexe rpc.process_transaction(transaction).await } -pub async fn assert_escrow + TestIndexerExtensions> ( +pub async fn assert_escrow + TestIndexerExtensions>( rpc: &mut R, test_indexer: &I, payer_pubkey: &Pubkey, @@ -355,8 +366,10 @@ pub async fn assert_escrow + TestIndexerExtensio assert_eq!(token_data_escrow.amount, escrow_amount); assert_eq!(token_data_escrow.owner, token_owner_pda); - let token_data_change_compressed_token_account = - test_indexer.get_token_compressed_accounts()[0].token_data.clone(); + let token_data_change_compressed_token_account = test_indexer.get_token_compressed_accounts() + [0] + .token_data + .clone(); assert_eq!( token_data_change_compressed_token_account.amount, amount - escrow_amount @@ -449,7 +462,10 @@ pub async fn perform_withdrawal + TestIndexerExt create_withdrawal_escrow_instruction(create_ix_inputs, *withdrawal_amount) } -pub async fn perform_withdrawal_with_event + TestIndexerExtensions>( +pub async fn perform_withdrawal_with_event< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, env: &EnvAccounts, @@ -476,11 +492,17 @@ pub async fn perform_withdrawal_with_event + Tes .await? .unwrap(); let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_compressed_accounts_with_token_data(slot, &program_to_sdk_public_transaction_event(event.0)); + test_indexer.add_compressed_accounts_with_token_data( + slot, + &program_to_sdk_public_transaction_event(event.0), + ); Ok(()) } -pub async fn perform_withdrawal_failing + TestIndexerExtensions>( +pub async fn perform_withdrawal_failing< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( rpc: &mut R, test_indexer: &mut I, env: &EnvAccounts, @@ -522,10 +544,13 @@ pub fn assert_withdrawal + TestIndexerExtensions "Withdrawal compressed account doesn't exist or has incorrect amount {} expected amount", withdrawal_amount ); - let token_data_escrow_change = test_indexer.get_token_compressed_accounts().iter().any(|x| { - x.token_data.owner == token_owner_pda - && x.token_data.amount == escrow_amount - withdrawal_amount - }); + let token_data_escrow_change = test_indexer + .get_token_compressed_accounts() + .iter() + .any(|x| { + x.token_data.owner == token_owner_pda + && x.token_data.amount == escrow_amount - withdrawal_amount + }); assert!( token_data_escrow_change, "Escrow change compressed account doesn't exist or has incorrect amount {} expected amount", diff --git a/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs b/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs index 49b30267c..24cc35e96 100644 --- a/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs +++ b/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs @@ -14,30 +14,42 @@ // release compressed tokens use anchor_lang::AnchorDeserialize; +use light_client::{indexer::Indexer, rpc::merkle_tree::MerkleTreeExt}; use light_hasher::{Hasher, Poseidon}; -use light_program_test::indexer::{TestIndexer, TestIndexerExtensions}; -use light_program_test::test_env::{setup_test_programs_with_accounts, EnvAccounts}; +use light_program_test::{ + indexer::{TestIndexer, TestIndexerExtensions}, + test_env::{setup_test_programs_with_accounts, EnvAccounts}, +}; use light_prover_client::gnark::helpers::{ProverConfig, ProverMode}; -use light_system_program::sdk::address::{derive_address, derive_address_legacy}; -use light_system_program::sdk::compressed_account::MerkleContext; -use light_system_program::sdk::event::PublicTransactionEvent; -use light_system_program::NewAddressParams; -use light_test_utils::conversions::{ - program_to_sdk_public_transaction_event, sdk_to_program_compressed_account, - sdk_to_program_compressed_proof, sdk_to_program_token_data, +use light_system_program::{ + sdk::{ + address::{derive_address, derive_address_legacy}, + compressed_account::MerkleContext, + event::PublicTransactionEvent, + }, + NewAddressParams, +}; +use light_test_utils::{ + conversions::{ + program_to_sdk_public_transaction_event, sdk_to_program_compressed_account, + sdk_to_program_compressed_proof, sdk_to_program_token_data, + }, + spl::{create_mint_helper, mint_tokens_helper}, + FeeConfig, RpcConnection, RpcError, TransactionParams, +}; +use solana_sdk::{ + instruction::{Instruction, InstructionError}, + signature::Keypair, + signer::Signer, + transaction::Transaction, }; -use light_test_utils::spl::{create_mint_helper, mint_tokens_helper}; -use light_test_utils::{FeeConfig, RpcConnection, RpcError, TransactionParams}; -use solana_sdk::instruction::{Instruction, InstructionError}; -use solana_sdk::signature::Keypair; -use solana_sdk::{signer::Signer, transaction::Transaction}; -use light_client::indexer::Indexer; -use light_client::rpc::merkle_tree::MerkleTreeExt; -use token_escrow::escrow_with_compressed_pda::sdk::{ - create_escrow_instruction, create_withdrawal_instruction, get_token_owner_pda, - CreateCompressedPdaEscrowInstructionInputs, CreateCompressedPdaWithdrawalInstructionInputs, +use token_escrow::{ + escrow_with_compressed_pda::sdk::{ + create_escrow_instruction, create_withdrawal_instruction, get_token_owner_pda, + CreateCompressedPdaEscrowInstructionInputs, CreateCompressedPdaWithdrawalInstructionInputs, + }, + EscrowError, EscrowTimeLock, }; -use token_escrow::{EscrowError, EscrowTimeLock}; #[tokio::test] async fn test_escrow_with_compressed_pda() { @@ -45,7 +57,7 @@ async fn test_escrow_with_compressed_pda() { String::from("token_escrow"), token_escrow::ID, )])) - .await; + .await; let payer = rpc.get_payer().insecure_clone(); let test_indexer = TestIndexer::init_from_env( @@ -69,7 +81,7 @@ async fn test_escrow_with_compressed_pda() { vec![amount], vec![payer.pubkey()], ) - .await; + .await; let seed = [1u8; 32]; let escrow_amount = 100u64; @@ -84,8 +96,8 @@ async fn test_escrow_with_compressed_pda() { escrow_amount, seed, ) - .await - .unwrap(); + .await + .unwrap(); let current_slot = rpc.get_slot().await.unwrap(); let lockup_end = lock_up_time + current_slot; @@ -98,7 +110,7 @@ async fn test_escrow_with_compressed_pda() { &seed, &lockup_end, ) - .await; + .await; println!("withdrawal _----------------------------------------------------------------"); let withdrawal_amount = escrow_amount; @@ -112,7 +124,7 @@ async fn test_escrow_with_compressed_pda() { new_lock_up_time, withdrawal_amount, ) - .await; + .await; let instruction_error = InstructionError::Custom(EscrowError::EscrowLocked.into()); let transaction_error = @@ -130,8 +142,8 @@ async fn test_escrow_with_compressed_pda() { new_lock_up_time, withdrawal_amount, ) - .await - .unwrap(); + .await + .unwrap(); assert_withdrawal( &mut rpc, @@ -143,7 +155,7 @@ async fn test_escrow_with_compressed_pda() { &seed, new_lock_up_time, ) - .await; + .await; } pub async fn perform_escrow_failing( @@ -164,7 +176,7 @@ pub async fn perform_escrow_failing( lock_up_time, escrow_amount, ) - .await; + .await; let latest_blockhash = rpc.get_latest_blockhash().await.unwrap(); let transaction = Transaction::new_signed_with_payer( &[instruction], @@ -194,7 +206,7 @@ pub async fn perform_escrow_with_event( lock_up_time, escrow_amount, ) - .await; + .await; let event = rpc .create_and_send_transaction_with_event::( &[instruction], @@ -210,9 +222,10 @@ pub async fn perform_escrow_with_event( ) .await?; let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_compressed_accounts_with_token_data(slot, &program_to_sdk_public_transaction_event( - event.unwrap().0, - )); + test_indexer.add_compressed_accounts_with_token_data( + slot, + &program_to_sdk_public_transaction_event(event.unwrap().0), + ); Ok(()) } @@ -246,9 +259,11 @@ async fn create_escrow_ix( let rpc_result = test_indexer .create_proof_for_compressed_accounts( Some(vec![input_compressed_account_hash]), - Some(vec![compressed_input_account_with_context - .merkle_context - .merkle_tree_pubkey]), + Some(vec![ + compressed_input_account_with_context + .merkle_context + .merkle_tree_pubkey, + ]), Some(&[address]), Some(vec![env.address_merkle_tree_pubkey]), context, @@ -376,7 +391,7 @@ pub async fn perform_withdrawal_with_event( new_lock_up_time, escrow_amount, ) - .await; + .await; let event = rpc .create_and_send_transaction_with_event::( &[instruction], @@ -386,9 +401,10 @@ pub async fn perform_withdrawal_with_event( ) .await?; let slot = rpc.get_slot().await.unwrap(); - test_indexer.add_compressed_accounts_with_token_data(slot, &program_to_sdk_public_transaction_event( - event.unwrap().0, - )); + test_indexer.add_compressed_accounts_with_token_data( + slot, + &program_to_sdk_public_transaction_event(event.unwrap().0), + ); Ok(()) } @@ -410,7 +426,7 @@ pub async fn perform_withdrawal_failing( new_lock_up_time, escrow_amount, ) - .await; + .await; let latest_blockhash = rpc.get_latest_blockhash().await.unwrap(); let transaction = Transaction::new_signed_with_payer( &[instruction], diff --git a/forester/src/batch_processor/common.rs b/forester/src/batch_processor/common.rs index 8cf9b9a00..3fe4e4b81 100644 --- a/forester/src/batch_processor/common.rs +++ b/forester/src/batch_processor/common.rs @@ -108,7 +108,7 @@ impl + IndexerType> BatchProcessor { }; Self::calculate_completion_from_tree(account.data.as_mut_slice()) - } + } async fn get_output_queue_completion(&self, rpc: &mut R) -> f64 { let mut account = match rpc.get_account(self.context.output_queue).await { @@ -117,7 +117,7 @@ impl + IndexerType> BatchProcessor { }; Self::calculate_completion_from_queue(account.data.as_mut_slice()) - } + } fn calculate_completion_from_tree(data: &mut [u8]) -> f64 { let tree = match BatchedMerkleTreeAccount::state_tree_from_bytes_mut(data) { @@ -153,7 +153,7 @@ impl + IndexerType> BatchProcessor { let remaining = total - batch.get_num_inserted_zkps(); remaining as f64 / total as f64 - } + } async fn process_state_append(&self) -> Result { let mut rpc = self.context.rpc_pool.get_connection().await?; diff --git a/forester/tests/batched_address_test.rs b/forester/tests/batched_address_test.rs index 3ca0ec266..279db9c64 100644 --- a/forester/tests/batched_address_test.rs +++ b/forester/tests/batched_address_test.rs @@ -1,18 +1,17 @@ use std::{sync::Arc, time::Duration}; use forester::run_pipeline; -use forester_utils::{ - registry::{register_test_forester, update_test_forester}, -}; +use forester_utils::registry::{register_test_forester, update_test_forester}; use light_batched_merkle_tree::{ batch::BatchState, initialize_address_tree::InitAddressTreeAccountsInstructionData, merkle_tree::BatchedMerkleTreeAccount, }; use light_client::{ + indexer::AddressMerkleTreeAccounts, rpc::{solana_rpc::SolanaRpcUrl, RpcConnection, SolanaRpcConnection}, rpc_pool::SolanaRpcPool, }; -use light_program_test::test_env::EnvAccounts; +use light_program_test::{indexer::TestIndexer, test_env::EnvAccounts}; use light_prover_client::gnark::helpers::{LightValidatorConfig, ProverConfig, ProverMode}; use light_test_utils::{ create_address_test_program_sdk::perform_create_pda_with_event_rnd, e2e_test_env::E2ETestEnv, @@ -25,8 +24,7 @@ use tokio::{ time::{sleep, timeout}, }; use tracing::log::info; -use light_client::indexer::AddressMerkleTreeAccounts; -use light_program_test::indexer::TestIndexer; + use crate::test_utils::{forester_config, general_action_config, init, keypair_action_config}; mod test_utils; @@ -142,7 +140,7 @@ async fn test_address_batched() { println!("Creating new address batch tree..."); let merkle_tree_keypair = Keypair::new(); - env.indexer + env.indexer .add_address_merkle_tree( &mut env.rpc, &merkle_tree_keypair, @@ -150,7 +148,7 @@ async fn test_address_batched() { None, 2, ) - .await; + .await; env_accounts.batch_address_merkle_tree = merkle_tree_keypair.pubkey(); let address_trees: Vec = env diff --git a/forester/tests/batched_state_test.rs b/forester/tests/batched_state_test.rs index 74cdb57d6..f0da3c225 100644 --- a/forester/tests/batched_state_test.rs +++ b/forester/tests/batched_state_test.rs @@ -10,11 +10,9 @@ use light_client::{ rpc::{solana_rpc::SolanaRpcUrl, RpcConnection, SolanaRpcConnection}, rpc_pool::SolanaRpcPool, }; -use light_program_test::test_env::EnvAccounts; +use light_program_test::{indexer::TestIndexer, test_env::EnvAccounts}; use light_prover_client::gnark::helpers::LightValidatorConfig; -use light_test_utils::{ - e2e_test_env::{init_program_test_env, E2ETestEnv}, -}; +use light_test_utils::e2e_test_env::{init_program_test_env, E2ETestEnv}; use serial_test::serial; use solana_program::native_token::LAMPORTS_PER_SOL; use solana_sdk::{ @@ -25,7 +23,7 @@ use tokio::{ time::timeout, }; use tracing::log::info; -use light_program_test::indexer::TestIndexer; + use crate::test_utils::{forester_config, init}; mod test_utils; diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index 9fd58c40a..3c7e81c41 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -5,14 +5,13 @@ use account_compression::{ AddressMerkleTreeAccount, }; use forester::{queue_helpers::fetch_queue_item_data, run_pipeline, utils::get_protocol_config}; -use forester_utils::{ - registry::register_test_forester, -}; +use forester_utils::registry::register_test_forester; use light_client::{ + indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}, rpc::{solana_rpc::SolanaRpcUrl, RpcConnection, RpcError, SolanaRpcConnection}, rpc_pool::SolanaRpcPool, }; -use light_program_test::test_env::EnvAccounts; +use light_program_test::{indexer::TestIndexer, test_env::EnvAccounts}; use light_prover_client::gnark::helpers::{ spawn_prover, LightValidatorConfig, ProverConfig, ProverMode, }; @@ -30,8 +29,6 @@ use tokio::{ sync::{mpsc, oneshot, Mutex}, time::{sleep, timeout}, }; -use light_client::indexer::{AddressMerkleTreeAccounts, StateMerkleTreeAccounts}; -use light_program_test::indexer::TestIndexer; mod test_utils; use test_utils::*; diff --git a/forester/tests/test_utils.rs b/forester/tests/test_utils.rs index b6f11106c..4cfbbc6ca 100644 --- a/forester/tests/test_utils.rs +++ b/forester/tests/test_utils.rs @@ -6,16 +6,15 @@ use forester::{ telemetry::setup_telemetry, ForesterConfig, }; -use light_client::rpc::RpcConnection; -use light_program_test::test_env::get_test_env_accounts; -use light_prover_client::gnark::helpers::{spawn_validator, LightValidatorConfig}; -use light_test_utils::{ - e2e_test_env::{GeneralActionConfig, KeypairActionConfig, User}, +use light_client::{ + indexer::{Indexer, IndexerError, NewAddressProofWithContext}, + rpc::RpcConnection, }; +use light_program_test::{indexer::TestIndexerExtensions, test_env::get_test_env_accounts}; +use light_prover_client::gnark::helpers::{spawn_validator, LightValidatorConfig}; +use light_test_utils::e2e_test_env::{GeneralActionConfig, KeypairActionConfig, User}; use solana_sdk::signature::{Keypair, Signer}; use tracing::debug; -use light_client::indexer::{Indexer, IndexerError, NewAddressProofWithContext}; -use light_program_test::indexer::TestIndexerExtensions; #[allow(dead_code)] pub async fn init(config: Option) { @@ -108,7 +107,10 @@ pub fn generate_pubkey_254() -> Pubkey { } #[allow(dead_code)] -pub async fn assert_new_address_proofs_for_photon_and_test_indexer + TestIndexerExtensions>( +pub async fn assert_new_address_proofs_for_photon_and_test_indexer< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( indexer: &mut I, trees: &[Pubkey], addresses: &[Pubkey], @@ -176,7 +178,10 @@ pub async fn assert_new_address_proofs_for_photon_and_test_indexer + TestIndexerExtensions>( +pub async fn assert_accounts_by_owner< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( indexer: &mut I, user: &User, photon_indexer: &PhotonIndexer, @@ -210,14 +215,16 @@ pub async fn assert_accounts_by_owner + TestInde } #[allow(dead_code)] -pub async fn assert_account_proofs_for_photon_and_test_indexer + TestIndexerExtensions>( +pub async fn assert_account_proofs_for_photon_and_test_indexer< + R: RpcConnection, + I: Indexer + TestIndexerExtensions, +>( indexer: &mut I, user_pubkey: &Pubkey, photon_indexer: &PhotonIndexer, ) { - let accs: Result, IndexerError> = indexer - .get_compressed_accounts_by_owner(user_pubkey) - .await; + let accs: Result, IndexerError> = + indexer.get_compressed_accounts_by_owner(user_pubkey).await; for account_hash in accs.unwrap() { let photon_result = photon_indexer .get_multiple_compressed_account_proofs(vec![account_hash.clone()]) diff --git a/sdk-libs/client/src/rpc/solana_rpc.rs b/sdk-libs/client/src/rpc/solana_rpc.rs index e2ec99a23..a98091a0b 100644 --- a/sdk-libs/client/src/rpc/solana_rpc.rs +++ b/sdk-libs/client/src/rpc/solana_rpc.rs @@ -27,10 +27,9 @@ use solana_transaction_status::{ use tokio::time::{sleep, Instant}; use crate::{ - rpc::{errors::RpcError, rpc_connection::RpcConnection}, + rpc::{errors::RpcError, merkle_tree::MerkleTreeExt, rpc_connection::RpcConnection}, transaction_params::TransactionParams, }; -use crate::rpc::merkle_tree::MerkleTreeExt; pub enum SolanaRpcUrl { Testnet, @@ -463,4 +462,4 @@ impl RpcConnection for SolanaRpcConnection { } } -impl MerkleTreeExt for SolanaRpcConnection {} \ No newline at end of file +impl MerkleTreeExt for SolanaRpcConnection {} From 3b16fc13be4b6f2aa5ea69eb54b0d02cdbac37c6 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Thu, 9 Jan 2025 20:18:36 +0000 Subject: [PATCH 21/27] use `LeafIndexInfo` --- forester-utils/src/instructions.rs | 14 +- forester/src/photon_indexer.rs | 4 +- sdk-libs/client/src/indexer/mod.rs | 141 ++---------------- .../program-test/src/indexer/test_indexer.rs | 18 ++- .../program-test/src/test_batch_forester.rs | 12 +- 5 files changed, 36 insertions(+), 153 deletions(-) diff --git a/forester-utils/src/instructions.rs b/forester-utils/src/instructions.rs index 2828dfecf..73f64bfc9 100644 --- a/forester-utils/src/instructions.rs +++ b/forester-utils/src/instructions.rs @@ -378,18 +378,18 @@ pub async fn create_nullify_batch_ix_data>( merkle_tree_pubkey, &leaf_indices_tx_hashes .iter() - .map(|(index, _, _)| *index as u64) + .map(|leaf_info| leaf_info.leaf_index as u64) .collect::>(), ); - for ((index, leaf, tx_hash), proof) in leaf_indices_tx_hashes.iter().zip(proofs.iter()) { - path_indices.push(*index); - leaves.push(*leaf); + for (leaf_info, proof) in leaf_indices_tx_hashes.iter().zip(proofs.iter()) { + path_indices.push(leaf_info.leaf_index); + leaves.push(leaf_info.leaf); old_leaves.push(proof.leaf); merkle_proofs.push(proof.proof.clone()); - tx_hashes.push(*tx_hash); - let index_bytes = index.to_be_bytes(); - let nullifier = Poseidon::hashv(&[leaf, &index_bytes, tx_hash]).unwrap(); + tx_hashes.push(leaf_info.tx_hash); + let index_bytes = leaf_info.leaf_index.to_be_bytes(); + let nullifier = Poseidon::hashv(&[&leaf_info.leaf, &index_bytes, &leaf_info.tx_hash]).unwrap(); nullifiers.push(nullifier); } diff --git a/forester/src/photon_indexer.rs b/forester/src/photon_indexer.rs index 78e0fa2ad..ae144b3f2 100644 --- a/forester/src/photon_indexer.rs +++ b/forester/src/photon_indexer.rs @@ -16,7 +16,7 @@ use photon_api::{ }; use solana_sdk::bs58; use tracing::debug; - +use light_client::indexer::LeafIndexInfo; use crate::utils::decode_hash; pub struct PhotonIndexer { @@ -244,7 +244,7 @@ impl Indexer for PhotonIndexer { &mut self, _merkle_tree_pubkey: Pubkey, _zkp_batch_size: usize, - ) -> Vec<(u32, [u8; 32], [u8; 32])> { + ) -> Vec { todo!() } diff --git a/sdk-libs/client/src/indexer/mod.rs b/sdk-libs/client/src/indexer/mod.rs index 9866167e8..29f7970c1 100644 --- a/sdk-libs/client/src/indexer/mod.rs +++ b/sdk-libs/client/src/indexer/mod.rs @@ -17,7 +17,7 @@ use crate::rpc::RpcConnection; #[derive(Error, Debug)] pub enum IndexerError { #[error("RPC Error: {0}")] - RpcError(#[from] solana_client::client_error::ClientError), + RpcError(#[from] Box), #[error("failed to deserialize account data")] DeserializeError(#[from] solana_sdk::program_error::ProgramError), #[error("failed to copy merkle tree")] @@ -49,15 +49,6 @@ pub trait Indexer: Sync + Send + Debug + 'static { fn get_subtrees(&self, merkle_tree_pubkey: [u8; 32]) -> Result, IndexerError>; - // fn add_event_and_compressed_accounts( - // &mut self, - // slot: u64, - // event: &PublicTransactionEvent, - // ) -> ( - // Vec, - // Vec, - // ); - async fn create_proof_for_compressed_accounts( &mut self, compressed_accounts: Option>, @@ -89,30 +80,21 @@ pub trait Indexer: Sync + Send + Debug + 'static { addresses: Vec<[u8; 32]>, ) -> Result>, IndexerError>; - // TODO: remove? fn get_proofs_by_indices( &mut self, merkle_tree_pubkey: Pubkey, indices: &[u64], ) -> Vec; - // TODO: remove? fn get_leaf_indices_tx_hashes( &mut self, merkle_tree_pubkey: Pubkey, zkp_batch_size: usize, - ) -> Vec<(u32, [u8; 32], [u8; 32])>; + ) -> Vec; - // TODO: remove? fn get_address_merkle_trees(&self) -> &Vec; } -// #[derive(Debug, Clone)] -// pub struct TokenDataWithMerkleContext { -// pub token_data: TokenData, -// pub compressed_account: CompressedAccountWithMerkleContext, -// } - #[derive(Debug, Clone)] pub struct MerkleProof { pub hash: String, @@ -151,6 +133,13 @@ pub struct AddressMerkleTreeAccounts { pub queue: Pubkey, } +#[derive(Debug, Clone)] +pub struct LeafIndexInfo { + pub leaf_index: u32, + pub leaf: [u8; 32], + pub tx_hash: [u8; 32], +} + #[derive(Debug, Clone)] pub struct StateMerkleTreeBundle { pub rollover_fee: i64, @@ -158,8 +147,7 @@ pub struct StateMerkleTreeBundle { pub accounts: StateMerkleTreeAccounts, pub version: u64, pub output_queue_elements: Vec<[u8; 32]>, - /// leaf index, leaf, tx hash - pub input_leaf_indices: Vec<(u32, [u8; 32], [u8; 32])>, + pub input_leaf_indices: Vec, } #[derive(Debug, Clone)] @@ -170,112 +158,3 @@ pub struct AddressMerkleTreeBundle { pub accounts: AddressMerkleTreeAccounts, pub queue_elements: Vec<[u8; 32]>, } - -// use std::{fmt::Debug, future::Future}; -// -// use light_concurrent_merkle_tree::light_hasher::Poseidon; -// use light_indexed_merkle_tree::{ -// array::{IndexedArray, IndexedElement}, -// reference::IndexedMerkleTree, -// }; -// use light_merkle_tree_reference::MerkleTree; -// use light_sdk::{ -// compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent, -// proof::ProofRpcResult, token::TokenDataWithMerkleContext, -// }; -// use num_bigint::BigUint; -// use solana_sdk::pubkey::Pubkey; -// use thiserror::Error; -// -// use crate::rpc::RpcConnection; -// -// #[derive(Error, Debug)] -// pub enum IndexerError { -// #[error("RPC Error: {0}")] -// RpcError(#[from] solana_client::client_error::ClientError), -// #[error("failed to deserialize account data")] -// DeserializeError(#[from] solana_sdk::program_error::ProgramError), -// #[error("failed to copy merkle tree")] -// CopyMerkleTreeError(#[from] std::io::Error), -// #[error("error: {0:?}")] -// Custom(String), -// #[error("unknown error")] -// Unknown, -// } -// -// pub trait Indexer: Sync + Send + Debug + 'static { -// fn add_event_and_compressed_accounts( -// &mut self, -// event: &PublicTransactionEvent, -// ) -> ( -// Vec, -// Vec, -// ); -// -// fn create_proof_for_compressed_accounts( -// &mut self, -// compressed_accounts: Option<&[[u8; 32]]>, -// state_merkle_tree_pubkeys: Option<&[Pubkey]>, -// new_addresses: Option<&[[u8; 32]]>, -// address_merkle_tree_pubkeys: Option>, -// rpc: &mut R, -// ) -> impl Future; -// -// fn get_compressed_accounts_by_owner( -// &self, -// owner: &Pubkey, -// ) -> Vec; -// } -// -// #[derive(Debug, Clone)] -// pub struct MerkleProof { -// pub hash: String, -// pub leaf_index: u64, -// pub merkle_tree: String, -// pub proof: Vec<[u8; 32]>, -// pub root_seq: u64, -// } -// -// // For consistency with the Photon API. -// #[derive(Clone, Default, Debug, PartialEq)] -// pub struct NewAddressProofWithContext { -// pub merkle_tree: [u8; 32], -// pub root: [u8; 32], -// pub root_seq: u64, -// pub low_address_index: u64, -// pub low_address_value: [u8; 32], -// pub low_address_next_index: u64, -// pub low_address_next_value: [u8; 32], -// pub low_address_proof: [[u8; 32]; 16], -// pub new_low_element: Option>, -// pub new_element: Option>, -// pub new_element_next_value: Option, -// } -// -// #[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] -// pub struct StateMerkleTreeAccounts { -// pub merkle_tree: Pubkey, -// pub nullifier_queue: Pubkey, -// pub cpi_context: Pubkey, -// } -// -// #[derive(Debug, Clone, Copy)] -// pub struct AddressMerkleTreeAccounts { -// pub merkle_tree: Pubkey, -// pub queue: Pubkey, -// } -// -// #[derive(Debug, Clone)] -// pub struct StateMerkleTreeBundle { -// pub rollover_fee: u64, -// pub merkle_tree: Box>, -// pub accounts: StateMerkleTreeAccounts, -// } -// -// #[derive(Debug, Clone)] -// pub struct AddressMerkleTreeBundle { -// pub rollover_fee: u64, -// pub merkle_tree: Box>, -// pub indexed_array: Box>, -// pub accounts: AddressMerkleTreeAccounts, -// } diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index 3e4704c04..2568c072f 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -67,7 +67,7 @@ use solana_sdk::{ pubkey::Pubkey, signature::{Keypair, Signer}, }; - +use light_client::indexer::LeafIndexInfo; use crate::{ indexer::{ utils::create_address_merkle_tree_and_queue_account_with_assert, TestIndexerExtensions, @@ -570,7 +570,7 @@ where &mut self, merkle_tree_pubkey: Pubkey, zkp_batch_size: usize, - ) -> Vec<(u32, [u8; 32], [u8; 32])> { + ) -> Vec { let state_merkle_tree_bundle = self .state_merkle_trees .iter_mut() @@ -1001,12 +1001,12 @@ where let batch_size = batch.zkp_batch_size; let leaf_indices_tx_hashes = state_merkle_tree_bundle.input_leaf_indices[..batch_size as usize].to_vec(); - for (index, leaf, tx_hash) in leaf_indices_tx_hashes.iter() { - let index = *index as usize; - let leaf = *leaf; + for leaf_info in leaf_indices_tx_hashes.iter() { + let index = leaf_info.leaf_index as usize; + let leaf = leaf_info.leaf; let index_bytes = index.to_be_bytes(); - let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, tx_hash]).unwrap(); + let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, &leaf_info.tx_hash]).unwrap(); state_merkle_tree_bundle.input_leaf_indices.remove(0); state_merkle_tree_bundle @@ -1675,7 +1675,11 @@ where let leaf_hash = event.input_compressed_account_hashes[i]; bundle .input_leaf_indices - .push((leaf_index, leaf_hash, tx_hash)); + .push(LeafIndexInfo { + leaf_index, + leaf: leaf_hash, + tx_hash, + }); } } let mut new_addresses = vec![]; diff --git a/sdk-libs/program-test/src/test_batch_forester.rs b/sdk-libs/program-test/src/test_batch_forester.rs index bb08cce3b..f733ca11d 100644 --- a/sdk-libs/program-test/src/test_batch_forester.rs +++ b/sdk-libs/program-test/src/test_batch_forester.rs @@ -299,10 +299,10 @@ pub async fn get_batched_nullify_ix_data( let mut tx_hashes = Vec::new(); let mut old_leaves = Vec::new(); let mut path_indices = Vec::new(); - for (index, leaf, tx_hash) in leaf_indices_tx_hashes.iter() { - path_indices.push(*index); - let index = *index as usize; - let leaf = *leaf; + for leaf_info in leaf_indices_tx_hashes.iter() { + path_indices.push(leaf_info.leaf_index); + let index = leaf_info.leaf_index as usize; + let leaf = leaf_info.leaf; leaves.push(leaf); // + 2 because next index is + 1 and we need to init the leaf in @@ -324,8 +324,8 @@ pub async fn get_batched_nullify_ix_data( bundle.input_leaf_indices.remove(0); let index_bytes = index.to_be_bytes(); use light_hasher::Hasher; - let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, tx_hash]).unwrap(); - tx_hashes.push(*tx_hash); + let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, &leaf_info.tx_hash]).unwrap(); + tx_hashes.push(leaf_info.tx_hash); nullifiers.push(nullifier); bundle.merkle_tree.update(&nullifier, index).unwrap(); } From 897a69475c83185cf6fc3537e5e2d2498c949a64 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 10 Jan 2025 00:29:18 +0000 Subject: [PATCH 22/27] token-escrow & sdk-test-program cargo.toml deps fixes --- Cargo.lock | 6 ------ examples/token-escrow/programs/token-escrow/Cargo.toml | 4 ++-- program-tests/sdk-test-program/programs/sdk-test/Cargo.toml | 6 ------ 3 files changed, 2 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08e83d793..2d781dfb4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4665,17 +4665,11 @@ name = "sdk-test" version = "0.7.0" dependencies = [ "anchor-lang", - "borsh 0.10.3", "light-client", "light-hasher", - "light-macros", "light-program-test", "light-sdk", - "light-sdk-macros", "light-test-utils", - "light-utils 1.1.0", - "light-verifier", - "solana-program-test", "solana-sdk", "tokio", ] diff --git a/examples/token-escrow/programs/token-escrow/Cargo.toml b/examples/token-escrow/programs/token-escrow/Cargo.toml index 3243b78bb..812b0e538 100644 --- a/examples/token-escrow/programs/token-escrow/Cargo.toml +++ b/examples/token-escrow/programs/token-escrow/Cargo.toml @@ -25,14 +25,14 @@ light-compressed-token = { workspace = true } light-system-program = { workspace = true } account-compression = { workspace = true } light-hasher = { workspace = true } -light-verifier = { workspace = true } light-sdk = { workspace = true, features = ["legacy"] } -light-client = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dependencies] solana-sdk = { workspace = true } [dev-dependencies] +light-verifier = { workspace = true } +light-client = { workspace = true } light-test-utils = { workspace = true, features = ["devenv"] } light-program-test = { workspace = true, features = ["devenv"] } tokio = { workspace = true } diff --git a/program-tests/sdk-test-program/programs/sdk-test/Cargo.toml b/program-tests/sdk-test-program/programs/sdk-test/Cargo.toml index 7366f0391..36aae9c92 100644 --- a/program-tests/sdk-test-program/programs/sdk-test/Cargo.toml +++ b/program-tests/sdk-test-program/programs/sdk-test/Cargo.toml @@ -22,13 +22,8 @@ idl-build = ["anchor-lang/idl-build", "light-sdk/idl-build"] [dependencies] anchor-lang = { workspace=true} -borsh = { workspace = true } light-hasher = { workspace = true, features = ["solana"] } -light-macros = { workspace = true } light-sdk = { workspace = true } -light-sdk-macros = { workspace = true } -light-utils = { workspace = true } -light-verifier = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dependencies] solana-sdk = { workspace = true } @@ -37,5 +32,4 @@ solana-sdk = { workspace = true } light-client = { workspace = true , features = ["devenv"]} light-program-test = { workspace = true, features = ["devenv"] } light-test-utils = { workspace = true, features = ["devenv"] } -solana-program-test = { workspace = true } tokio = { workspace = true } From 2219a457f3acd9de1034167f5d2ed84d4da27403 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 10 Jan 2025 07:45:47 +0000 Subject: [PATCH 23/27] remove memory limit option from test workflow --- .github/workflows/rust.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 1e005aa3b..83f77ab33 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -33,8 +33,6 @@ jobs: test: if: github.event.pull_request.draft == false runs-on: ubuntu-latest - container: - options: --memory=4g steps: - name: Checkout sources uses: actions/checkout@v4 From 65b640fd8340d42420fd60317b2d409f2a31e061 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 10 Jan 2025 07:51:00 +0000 Subject: [PATCH 24/27] format --- forester-utils/src/instructions.rs | 3 ++- forester/src/photon_indexer.rs | 6 ++--- js/compressed-token/rollup.config.js | 6 +---- js/stateless.js/rollup.config.js | 6 +---- .../program-test/src/indexer/test_indexer.rs | 22 +++++++++---------- 5 files changed, 18 insertions(+), 25 deletions(-) diff --git a/forester-utils/src/instructions.rs b/forester-utils/src/instructions.rs index 73f64bfc9..29dae8589 100644 --- a/forester-utils/src/instructions.rs +++ b/forester-utils/src/instructions.rs @@ -389,7 +389,8 @@ pub async fn create_nullify_batch_ix_data>( merkle_proofs.push(proof.proof.clone()); tx_hashes.push(leaf_info.tx_hash); let index_bytes = leaf_info.leaf_index.to_be_bytes(); - let nullifier = Poseidon::hashv(&[&leaf_info.leaf, &index_bytes, &leaf_info.tx_hash]).unwrap(); + let nullifier = + Poseidon::hashv(&[&leaf_info.leaf, &index_bytes, &leaf_info.tx_hash]).unwrap(); nullifiers.push(nullifier); } diff --git a/forester/src/photon_indexer.rs b/forester/src/photon_indexer.rs index ae144b3f2..513841323 100644 --- a/forester/src/photon_indexer.rs +++ b/forester/src/photon_indexer.rs @@ -4,8 +4,8 @@ use account_compression::initialize_address_merkle_tree::Pubkey; use async_trait::async_trait; use light_client::{ indexer::{ - AddressMerkleTreeBundle, Indexer, IndexerError, MerkleProof, NewAddressProofWithContext, - ProofOfLeaf, + AddressMerkleTreeBundle, Indexer, IndexerError, LeafIndexInfo, MerkleProof, + NewAddressProofWithContext, ProofOfLeaf, }, rpc::RpcConnection, }; @@ -16,7 +16,7 @@ use photon_api::{ }; use solana_sdk::bs58; use tracing::debug; -use light_client::indexer::LeafIndexInfo; + use crate::utils::decode_hash; pub struct PhotonIndexer { diff --git a/js/compressed-token/rollup.config.js b/js/compressed-token/rollup.config.js index 6f02c4084..5e1d6f531 100644 --- a/js/compressed-token/rollup.config.js +++ b/js/compressed-token/rollup.config.js @@ -79,8 +79,4 @@ const typesConfig = { plugins: [dts()], }; -export default [ - rolls('cjs', 'browser'), - rolls('cjs', 'node'), - typesConfig, -]; +export default [rolls('cjs', 'browser'), rolls('cjs', 'node'), typesConfig]; diff --git a/js/stateless.js/rollup.config.js b/js/stateless.js/rollup.config.js index 72140f365..6285eb30b 100644 --- a/js/stateless.js/rollup.config.js +++ b/js/stateless.js/rollup.config.js @@ -62,8 +62,4 @@ const typesConfig = { plugins: [dts()], }; -export default [ - rolls('cjs', 'browser'), - rolls('cjs', 'node'), - typesConfig, -]; +export default [rolls('cjs', 'browser'), rolls('cjs', 'node'), typesConfig]; diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index 2568c072f..b966b2611 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -17,8 +17,9 @@ use light_batched_merkle_tree::{ }; use light_client::{ indexer::{ - AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, IndexerError, MerkleProof, - NewAddressProofWithContext, ProofOfLeaf, StateMerkleTreeAccounts, StateMerkleTreeBundle, + AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, IndexerError, LeafIndexInfo, + MerkleProof, NewAddressProofWithContext, ProofOfLeaf, StateMerkleTreeAccounts, + StateMerkleTreeBundle, }, rpc::{merkle_tree::MerkleTreeExt, RpcConnection}, transaction_params::FeeConfig, @@ -67,7 +68,7 @@ use solana_sdk::{ pubkey::Pubkey, signature::{Keypair, Signer}, }; -use light_client::indexer::LeafIndexInfo; + use crate::{ indexer::{ utils::create_address_merkle_tree_and_queue_account_with_assert, TestIndexerExtensions, @@ -1006,7 +1007,8 @@ where let leaf = leaf_info.leaf; let index_bytes = index.to_be_bytes(); - let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, &leaf_info.tx_hash]).unwrap(); + let nullifier = + Poseidon::hashv(&[&leaf, &index_bytes, &leaf_info.tx_hash]).unwrap(); state_merkle_tree_bundle.input_leaf_indices.remove(0); state_merkle_tree_bundle @@ -1673,13 +1675,11 @@ where // Store leaf indices of input accounts for batched trees if bundle.version == 2 { let leaf_hash = event.input_compressed_account_hashes[i]; - bundle - .input_leaf_indices - .push(LeafIndexInfo { - leaf_index, - leaf: leaf_hash, - tx_hash, - }); + bundle.input_leaf_indices.push(LeafIndexInfo { + leaf_index, + leaf: leaf_hash, + tx_hash, + }); } } let mut new_addresses = vec![]; From ad40da329c736886ad4d6c41b700c64bcad14e33 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 10 Jan 2025 10:07:13 +0000 Subject: [PATCH 25/27] clippy configuration more strict & simplified --- Cargo.toml | 18 ++-- forester/src/epoch_manager.rs | 2 +- forester/src/send_transaction.rs | 2 +- .../batched-merkle-tree/src/merkle_tree.rs | 8 +- .../create-address-test-program/Cargo.toml | 7 ++ .../programs/sdk-test/Cargo.toml | 8 ++ program-tests/utils/src/assert_token_tx.rs | 6 +- program-tests/utils/src/lib.rs | 5 +- program-tests/utils/src/test_forester.rs | 2 +- .../registry/src/protocol_config/state.rs | 10 +- programs/system/src/invoke/processor.rs | 2 +- .../system/src/invoke/verify_state_proof.rs | 6 +- prover/client/src/indexed_changelog.rs | 2 +- scripts/lint.sh | 10 +- sdk-libs/client/src/rpc/errors.rs | 93 +++++++++++++------ sdk-libs/client/src/rpc/solana_rpc.rs | 2 +- .../photon-api/src/models/account_state.rs | 11 ++- sdk-libs/program-test/src/test_rpc.rs | 8 +- 18 files changed, 127 insertions(+), 75 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7a58ce542..a6e60d843 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,14 +23,14 @@ members = [ "sdk-libs/program-test", "xtask", "examples/token-escrow/programs/*", - "program-tests/account-compression-test/", - "program-tests/compressed-token-test/", - "program-tests/e2e-test/", - "program-tests/registry-test/", - "program-tests/system-cpi-test/", - "program-tests/system-test/", - "program-tests/sdk-test-program/programs/sdk-test/", - "program-tests/create-address-test-program/", + "program-tests/account-compression-test", + "program-tests/compressed-token-test", + "program-tests/e2e-test", + "program-tests/registry-test", + "program-tests/system-cpi-test", + "program-tests/system-test", + "program-tests/sdk-test-program/programs/sdk-test", + "program-tests/create-address-test-program", "program-tests/utils", "program-tests/merkle-tree", "forester-utils", @@ -149,5 +149,5 @@ serial_test = "3.1.1" level = "allow" check-cfg = [ 'cfg(target_os, values("solana"))', - 'cfg(feature, values("frozen-abi", "no-entrypoint"))', + 'cfg(feature, values("frozen-abi", "no-entrypoint", "anchor-debug"))', ] diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index 067a01f04..4a3e5b640 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -296,7 +296,7 @@ impl + IndexerType> EpochManager { "last_epoch: {:?}, current_epoch: {:?}, slot: {:?}", last_epoch, current_epoch, slot ); - if last_epoch.map_or(true, |last| current_epoch > last) { + if last_epoch.is_none_or(|last| current_epoch > last) { debug!("New epoch detected: {}", current_epoch); let phases = get_epoch_phases(&self.protocol_config, current_epoch); if slot < phases.registration.end { diff --git a/forester/src/send_transaction.rs b/forester/src/send_transaction.rs index 63ac5e437..308c7d49d 100644 --- a/forester/src/send_transaction.rs +++ b/forester/src/send_transaction.rs @@ -66,7 +66,7 @@ const TIMEOUT_CHECK_ENABLED: bool = false; /// /// Strategy: /// 1. Execute transaction batches until max number of batches is -/// reached or light slot ended (global timeout). +/// reached or light slot ended (global timeout). /// 2. Fetch queue items. /// 3. If work items is empty, await minimum batch time. /// 4. Fetch recent blockhash. diff --git a/program-libs/batched-merkle-tree/src/merkle_tree.rs b/program-libs/batched-merkle-tree/src/merkle_tree.rs index 66f3f277f..5ebfbf78e 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree.rs @@ -638,7 +638,7 @@ impl BatchedMerkleTreeAccount { /// - value is committed to bloom_filter for non-inclusion proof /// - nullifier is Hash(value, tx_hash), committed to leaves hashchain /// - tx_hash is hash of all inputs and outputs - /// -> we can access the history of how commitments are spent in zkps for example fraud proofs + /// -> we can access the history of how commitments are spent in zkps for example fraud proofs pub fn insert_nullifier_into_current_batch( &mut self, compressed_account_hash: &[u8; 32], @@ -753,9 +753,9 @@ impl BatchedMerkleTreeAccount { /// 1. Previous batch must be inserted and bloom filter must not be wiped. /// 2. Current batch must be 50% full /// 3. if yes - /// 3.1 zero out bloom filter - /// 3.2 mark bloom filter as wiped - /// 3.3 zero out roots if needed + /// 3.1 zero out bloom filter + /// 3.2 mark bloom filter as wiped + /// 3.3 zero out roots if needed pub fn wipe_previous_batch_bloom_filter(&mut self) -> Result<(), BatchedMerkleTreeError> { let current_batch = self .get_metadata() diff --git a/program-tests/create-address-test-program/Cargo.toml b/program-tests/create-address-test-program/Cargo.toml index 49ce7d676..c64cb8c41 100644 --- a/program-tests/create-address-test-program/Cargo.toml +++ b/program-tests/create-address-test-program/Cargo.toml @@ -25,3 +25,10 @@ light-system-program = { workspace = true, features = ["cpi"] } account-compression = { workspace = true , features = ["cpi"]} light-hasher = { workspace = true, features = ["solana"] } light-utils = { workspace = true } + +[lints.rust.unexpected_cfgs] +level = "allow" +check-cfg = [ + 'cfg(target_os, values("solana"))', + 'cfg(feature, values("frozen-abi", "no-entrypoint", "anchor-debug"))', +] diff --git a/program-tests/sdk-test-program/programs/sdk-test/Cargo.toml b/program-tests/sdk-test-program/programs/sdk-test/Cargo.toml index 36aae9c92..0845e5c14 100644 --- a/program-tests/sdk-test-program/programs/sdk-test/Cargo.toml +++ b/program-tests/sdk-test-program/programs/sdk-test/Cargo.toml @@ -33,3 +33,11 @@ light-client = { workspace = true , features = ["devenv"]} light-program-test = { workspace = true, features = ["devenv"] } light-test-utils = { workspace = true, features = ["devenv"] } tokio = { workspace = true } + + +[lints.rust.unexpected_cfgs] +level = "allow" +check-cfg = [ + 'cfg(target_os, values("solana"))', + 'cfg(feature, values("frozen-abi", "no-entrypoint", "anchor-debug"))', +] diff --git a/program-tests/utils/src/assert_token_tx.rs b/program-tests/utils/src/assert_token_tx.rs index 69c6596ff..f7f4e527b 100644 --- a/program-tests/utils/src/assert_token_tx.rs +++ b/program-tests/utils/src/assert_token_tx.rs @@ -20,7 +20,7 @@ use crate::assert_compressed_tx::{ /// 4. Merkle tree was updated correctly /// 5. TODO: Fees have been paid (after fee refactor) /// 6. Check compression amount was transferred (outside of this function) -/// No addresses in token transactions +/// No addresses in token transactions #[allow(clippy::too_many_arguments)] pub async fn assert_transfer + TestIndexerExtensions>( context: &mut R, @@ -192,9 +192,9 @@ pub fn assert_compressed_token_accounts< } #[allow(clippy::too_many_arguments)] -pub async fn assert_mint_to<'a, R: RpcConnection, I: Indexer + TestIndexerExtensions>( +pub async fn assert_mint_to + TestIndexerExtensions>( rpc: &mut R, - test_indexer: &'a mut I, + test_indexer: &mut I, recipients: &[Pubkey], mint: Pubkey, amounts: &[u64], diff --git a/program-tests/utils/src/lib.rs b/program-tests/utils/src/lib.rs index 6cfc74829..9cc84797d 100644 --- a/program-tests/utils/src/lib.rs +++ b/program-tests/utils/src/lib.rs @@ -176,7 +176,10 @@ pub fn assert_custom_error_or_program_error( ]; let is_accepted = accepted_errors.iter().any(|(index, error)| { - matches!(result, Err(RpcError::TransactionError(transaction::TransactionError::InstructionError(i, ref e))) if i == (*index as u8) && e == error) + matches!(&result, Err(RpcError::TransactionError(box_err)) if matches!( + **box_err, + transaction::TransactionError::InstructionError(i, ref e) if i == (*index as u8) && e == error + )) }); if !is_accepted { diff --git a/program-tests/utils/src/test_forester.rs b/program-tests/utils/src/test_forester.rs index 900dee482..1d62002ec 100644 --- a/program-tests/utils/src/test_forester.rs +++ b/program-tests/utils/src/test_forester.rs @@ -216,7 +216,7 @@ pub async fn nullify_compressed_accounts( Ok(()) } -async fn assert_value_is_marked_in_queue<'a, R: RpcConnection>( +async fn assert_value_is_marked_in_queue( rpc: &mut R, state_tree_bundle: &mut StateMerkleTreeBundle, index_in_nullifier_queue: &usize, diff --git a/programs/registry/src/protocol_config/state.rs b/programs/registry/src/protocol_config/state.rs index b4c441aa1..24a6eca46 100644 --- a/programs/registry/src/protocol_config/state.rs +++ b/programs/registry/src/protocol_config/state.rs @@ -96,16 +96,16 @@ pub enum EpochState { /// /// To get the latest registry epoch: /// - slot = 0; -/// let current_registry_epoch = (slot - genesis) / active_phase_length; -/// current_registry_epoch = (0 - 0) / 1000 = 0; -/// first active phase starts at genesis + registration_phase_length +/// let current_registry_epoch = (slot - genesis) / active_phase_length; +/// current_registry_epoch = (0 - 0) / 1000 = 0; +/// first active phase starts at genesis + registration_phase_length /// = 0 + 100 = 100; /// /// To get the current active epoch: /// - slot = 100; -/// let current_active_epoch = +/// let current_active_epoch = /// (slot - genesis - registration_phase_length) / active_phase_length; -/// current_active_epoch = (100 - 0 - 100) / 1000 = 0; +/// current_active_epoch = (100 - 0 - 100) / 1000 = 0; /// /// Epoch 0: /// - Registration 0: 0 - 100 diff --git a/programs/system/src/invoke/processor.rs b/programs/system/src/invoke/processor.rs index 775229bb4..5ae5866a0 100644 --- a/programs/system/src/invoke/processor.rs +++ b/programs/system/src/invoke/processor.rs @@ -372,7 +372,7 @@ pub fn process< /// Network fee distribution: /// - if any account is created or modified -> transfer network fee (5000 lamports) -/// (Previously we didn't charge for appends now we have to since values go into a queue.) +/// (Previously we didn't charge for appends now we have to since values go into a queue.) /// - if an address is created -> transfer an additional network fee (5000 lamports) /// /// Examples: diff --git a/programs/system/src/invoke/verify_state_proof.rs b/programs/system/src/invoke/verify_state_proof.rs index ce27fac16..6766b21ba 100644 --- a/programs/system/src/invoke/verify_state_proof.rs +++ b/programs/system/src/invoke/verify_state_proof.rs @@ -233,10 +233,10 @@ fn fetch_root( /// For each read-only account /// 1. prove inclusion by index in the output queue if leaf index should exist in the output queue. -/// 1.1. if proved inclusion by index, return Ok. +/// 1.1. if proved inclusion by index, return Ok. /// 2. prove non-inclusion in the bloom filters -/// 2.1. skip wiped batches. -/// 2.2. prove non-inclusion in the bloom filters for each batch. +/// 2.1. skip wiped batches. +/// 2.2. prove non-inclusion in the bloom filters for each batch. #[inline(always)] pub fn verify_read_only_account_inclusion<'a>( remaining_accounts: &'a [AccountInfo<'_>], diff --git a/prover/client/src/indexed_changelog.rs b/prover/client/src/indexed_changelog.rs index 2fdff8abb..04c8236f9 100644 --- a/prover/client/src/indexed_changelog.rs +++ b/prover/client/src/indexed_changelog.rs @@ -7,7 +7,7 @@ use num_bigint::BigUint; /// Patch the indexed changelogs. /// 1. find changelog entries of the same index /// 2. iterate over entries -/// 2.1 if next_value < new_element.value patch element +/// 2.1 if next_value < new_element.value patch element /// 3. #[inline(never)] pub fn patch_indexed_changelogs( diff --git a/scripts/lint.sh b/scripts/lint.sh index f0d2223b1..8c7ec4582 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -5,12 +5,4 @@ npx nx run-many --target=format:check --all npx nx run-many --target=lint --all cargo +nightly fmt --all -- --check -cargo clippy \ - --workspace \ - --exclude photon-api \ - --exclude name-service \ - -- -A clippy::result_large_err \ - -A clippy::empty-docs \ - -A clippy::to-string-trait-impl \ - -A clippy::doc_lazy_continuation \ - -D warnings +cargo clippy --workspace -- -D warnings diff --git a/sdk-libs/client/src/rpc/errors.rs b/sdk-libs/client/src/rpc/errors.rs index 6ca9524ec..568efc451 100644 --- a/sdk-libs/client/src/rpc/errors.rs +++ b/sdk-libs/client/src/rpc/errors.rs @@ -1,3 +1,4 @@ +use std::fmt::Debug; use std::io; use solana_banks_client::BanksClientError; @@ -9,16 +10,16 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum RpcError { #[error("BanksError: {0}")] - BanksError(#[from] BanksClientError), + BanksError(#[from] Box), #[error("TransactionError: {0}")] - TransactionError(#[from] TransactionError), + TransactionError(#[from] Box), #[error("ClientError: {0}")] - ClientError(#[from] ClientError), + ClientError(#[from] Box), #[error("IoError: {0}")] - IoError(#[from] io::Error), + IoError(#[from] Box), #[error("Error: `{0}`")] CustomError(String), @@ -31,33 +32,73 @@ pub enum RpcError { InvalidWarpSlot, } -pub fn assert_rpc_error( +impl From for RpcError { + fn from(err: BanksClientError) -> Self { + RpcError::BanksError(Box::new(err)) + } +} + +impl From for RpcError { + fn from(err: TransactionError) -> Self { + RpcError::TransactionError(Box::new(err)) + } +} + +impl From for RpcError { + fn from(err: ClientError) -> Self { + RpcError::ClientError(Box::new(err)) + } +} + +impl From for RpcError { + fn from(err: io::Error) -> Self { + RpcError::IoError(Box::new(err)) + } +} + + +pub fn assert_rpc_error( result: Result, i: u8, expected_error_code: u32, ) -> Result<(), RpcError> { match result { - Err(RpcError::TransactionError(TransactionError::InstructionError( - index, - InstructionError::Custom(error_code), - ))) if index != i => Err(RpcError::AssertRpcError( - format!( - "Expected error code: {}, got: {} error: {}", - expected_error_code, - error_code, - unsafe { result.unwrap_err_unchecked() } + Err(RpcError::TransactionError(ref box_err)) if matches!( + **box_err, + TransactionError::InstructionError( + index, + InstructionError::Custom(_) + ) if index != i + ) => { + let TransactionError::InstructionError(_, InstructionError::Custom(actual_error_code)) = **box_err else { + unreachable!() + }; + Err(RpcError::AssertRpcError( + format!( + "Expected error code: {}, got: {} error: {:?}", + expected_error_code, + actual_error_code, + result + ) + )) + }, + + Err(RpcError::TransactionError(ref box_err)) if matches!( + **box_err, + TransactionError::InstructionError( + index, + InstructionError::Custom(error_code) + ) if index == i && error_code == expected_error_code + ) => Ok(()), + + Err(RpcError::TransactionError(ref box_err)) if matches!( + **box_err, + TransactionError::InstructionError( + 0, + InstructionError::ProgramFailedToComplete ) - .to_string(), - )), - Err(RpcError::TransactionError(TransactionError::InstructionError( - index, - InstructionError::Custom(error_code), - ))) if index == i && error_code == expected_error_code => Ok(()), - - Err(RpcError::TransactionError(TransactionError::InstructionError( - 0, - InstructionError::ProgramFailedToComplete, - ))) => Ok(()), + ) => Ok(()), + Err(e) => Err(RpcError::AssertRpcError(format!( "Unexpected error type: {:?}", e @@ -66,4 +107,4 @@ pub fn assert_rpc_error( "Unexpected error type", ))), } -} +} \ No newline at end of file diff --git a/sdk-libs/client/src/rpc/solana_rpc.rs b/sdk-libs/client/src/rpc/solana_rpc.rs index a98091a0b..797d5c099 100644 --- a/sdk-libs/client/src/rpc/solana_rpc.rs +++ b/sdk-libs/client/src/rpc/solana_rpc.rs @@ -387,7 +387,7 @@ impl RpcConnection for SolanaRpcConnection { let signature = self .client .request_airdrop(to, lamports) - .map_err(RpcError::ClientError)?; + .map_err(|e| RpcError::ClientError(Box::new(e)))?; self.retry(|| async { if self .client diff --git a/sdk-libs/photon-api/src/models/account_state.rs b/sdk-libs/photon-api/src/models/account_state.rs index 3dae0c673..6645e061e 100644 --- a/sdk-libs/photon-api/src/models/account_state.rs +++ b/sdk-libs/photon-api/src/models/account_state.rs @@ -7,10 +7,10 @@ * * Generated by: https://openapi-generator.tech */ +use std::fmt::Display; use crate::models; -/// #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum AccountState { #[serde(rename = "initialized")] @@ -19,12 +19,13 @@ pub enum AccountState { Frozen, } -impl ToString for AccountState { - fn to_string(&self) -> String { - match self { +impl Display for AccountState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let str = match self { Self::Initialized => String::from("initialized"), Self::Frozen => String::from("frozen"), - } + }; + write!(f, "{}", str) } } diff --git a/sdk-libs/program-test/src/test_rpc.rs b/sdk-libs/program-test/src/test_rpc.rs index 8a9bc2d6a..e293174fd 100644 --- a/sdk-libs/program-test/src/test_rpc.rs +++ b/sdk-libs/program-test/src/test_rpc.rs @@ -78,7 +78,7 @@ impl RpcConnection for ProgramTestRpcConnection { .process_transaction_with_metadata(transaction) .await .map_err(RpcError::from)?; - result.result.map_err(RpcError::TransactionError)?; + result.result.map_err(|e| RpcError::TransactionError(Box::new(e)))?; Ok(sig) } @@ -93,7 +93,7 @@ impl RpcConnection for ProgramTestRpcConnection { .process_transaction_with_metadata(transaction) .await .map_err(RpcError::from)?; - result.result.map_err(RpcError::TransactionError)?; + result.result.map_err(|e| RpcError::TransactionError(Box::new(e)))?; let slot = self.context.banks_client.get_root_slot().await?; Ok((sig, slot)) } @@ -135,7 +135,7 @@ impl RpcConnection for ProgramTestRpcConnection { // Handle an error nested in the simulation result. if let Some(Err(e)) = simulation_result.result { let error = match e { - TransactionError::InstructionError(_, _) => RpcError::TransactionError(e), + TransactionError::InstructionError(_, _) => RpcError::TransactionError(Box::new(e)), _ => RpcError::from(BanksClientError::TransactionError(e)), }; return Err(error); @@ -317,7 +317,7 @@ impl RpcConnection for ProgramTestRpcConnection { .and_then(|status| { status .ok_or(RpcError::TransactionError( - TransactionError::SignatureFailure, + Box::new(TransactionError::SignatureFailure), )) .map(|status| status.slot) }) From 449809637412a2106f59c9877862bc064437c44f Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 10 Jan 2025 10:07:47 +0000 Subject: [PATCH 26/27] format --- sdk-libs/client/src/rpc/errors.rs | 77 ++++++++++++++------------- sdk-libs/program-test/src/test_rpc.rs | 14 +++-- 2 files changed, 49 insertions(+), 42 deletions(-) diff --git a/sdk-libs/client/src/rpc/errors.rs b/sdk-libs/client/src/rpc/errors.rs index 568efc451..e5427feb0 100644 --- a/sdk-libs/client/src/rpc/errors.rs +++ b/sdk-libs/client/src/rpc/errors.rs @@ -1,5 +1,4 @@ -use std::fmt::Debug; -use std::io; +use std::{fmt::Debug, io}; use solana_banks_client::BanksClientError; use solana_client::client_error::ClientError; @@ -56,48 +55,52 @@ impl From for RpcError { } } - pub fn assert_rpc_error( result: Result, i: u8, expected_error_code: u32, ) -> Result<(), RpcError> { match result { - Err(RpcError::TransactionError(ref box_err)) if matches!( - **box_err, - TransactionError::InstructionError( - index, - InstructionError::Custom(_) - ) if index != i - ) => { - let TransactionError::InstructionError(_, InstructionError::Custom(actual_error_code)) = **box_err else { + Err(RpcError::TransactionError(ref box_err)) + if matches!( + **box_err, + TransactionError::InstructionError( + index, + InstructionError::Custom(_) + ) if index != i + ) => + { + let TransactionError::InstructionError(_, InstructionError::Custom(actual_error_code)) = + **box_err + else { unreachable!() }; - Err(RpcError::AssertRpcError( - format!( - "Expected error code: {}, got: {} error: {:?}", - expected_error_code, - actual_error_code, - result - ) - )) - }, - - Err(RpcError::TransactionError(ref box_err)) if matches!( - **box_err, - TransactionError::InstructionError( - index, - InstructionError::Custom(error_code) - ) if index == i && error_code == expected_error_code - ) => Ok(()), - - Err(RpcError::TransactionError(ref box_err)) if matches!( - **box_err, - TransactionError::InstructionError( - 0, - InstructionError::ProgramFailedToComplete - ) - ) => Ok(()), + Err(RpcError::AssertRpcError(format!( + "Expected error code: {}, got: {} error: {:?}", + expected_error_code, actual_error_code, result + ))) + } + + Err(RpcError::TransactionError(ref box_err)) + if matches!( + **box_err, + TransactionError::InstructionError( + index, + InstructionError::Custom(error_code) + ) if index == i && error_code == expected_error_code + ) => + { + Ok(()) + } + + Err(RpcError::TransactionError(ref box_err)) + if matches!( + **box_err, + TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete) + ) => + { + Ok(()) + } Err(e) => Err(RpcError::AssertRpcError(format!( "Unexpected error type: {:?}", @@ -107,4 +110,4 @@ pub fn assert_rpc_error( "Unexpected error type", ))), } -} \ No newline at end of file +} diff --git a/sdk-libs/program-test/src/test_rpc.rs b/sdk-libs/program-test/src/test_rpc.rs index e293174fd..a273a3e54 100644 --- a/sdk-libs/program-test/src/test_rpc.rs +++ b/sdk-libs/program-test/src/test_rpc.rs @@ -78,7 +78,9 @@ impl RpcConnection for ProgramTestRpcConnection { .process_transaction_with_metadata(transaction) .await .map_err(RpcError::from)?; - result.result.map_err(|e| RpcError::TransactionError(Box::new(e)))?; + result + .result + .map_err(|e| RpcError::TransactionError(Box::new(e)))?; Ok(sig) } @@ -93,7 +95,9 @@ impl RpcConnection for ProgramTestRpcConnection { .process_transaction_with_metadata(transaction) .await .map_err(RpcError::from)?; - result.result.map_err(|e| RpcError::TransactionError(Box::new(e)))?; + result + .result + .map_err(|e| RpcError::TransactionError(Box::new(e)))?; let slot = self.context.banks_client.get_root_slot().await?; Ok((sig, slot)) } @@ -316,9 +320,9 @@ impl RpcConnection for ProgramTestRpcConnection { .map_err(RpcError::from) .and_then(|status| { status - .ok_or(RpcError::TransactionError( - Box::new(TransactionError::SignatureFailure), - )) + .ok_or(RpcError::TransactionError(Box::new( + TransactionError::SignatureFailure, + ))) .map(|status| status.slot) }) } From f6211cbc6d27c937f2275036bc0faba6e3c0643b Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 10 Jan 2025 10:18:19 +0000 Subject: [PATCH 27/27] enable --all-targets for clippy --- Cargo.toml | 2 +- program-libs/zero-copy/src/wrapped_pointer.rs | 22 +++++++++---------- .../zero-copy/tests/slice_mut_test.rs | 2 +- program-libs/zero-copy/tests/vec_tests.rs | 8 +++---- scripts/lint.sh | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a6e60d843..5611a4b65 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ members = [ "sdk-libs/photon-api", "sdk-libs/program-test", "xtask", - "examples/token-escrow/programs/*", + "examples/token-escrow/programs/token-escrow", "program-tests/account-compression-test", "program-tests/compressed-token-test", "program-tests/e2e-test", diff --git a/program-libs/zero-copy/src/wrapped_pointer.rs b/program-libs/zero-copy/src/wrapped_pointer.rs index c61be5ca4..29973a83f 100644 --- a/program-libs/zero-copy/src/wrapped_pointer.rs +++ b/program-libs/zero-copy/src/wrapped_pointer.rs @@ -199,22 +199,22 @@ mod test { #[test] fn test_rawpointer_from_bytes_success() { - let mut buffer = [0u8; 4]; + let buffer = [0u8; 4]; let value = 42u32; // Write value to buffer unsafe { *(buffer.as_ptr() as *mut u32) = value }; - let pointer: WrappedPointer = WrappedPointer::from_bytes(&mut buffer).unwrap(); + let pointer: WrappedPointer = WrappedPointer::from_bytes(&buffer).unwrap(); assert_eq!(*pointer.get(), value); } #[test] fn test_rawpointer_from_bytes_insufficient_memory() { let value = 42u32; - let mut buffer = value.to_le_bytes(); + let buffer = value.to_le_bytes(); - let result = WrappedPointer::::from_bytes(&mut buffer[0..2]); + let result = WrappedPointer::::from_bytes(&buffer[0..2]); assert_eq!( result, Err(ZeroCopyError::InsufficientMemoryAllocated(2, 4)) @@ -223,14 +223,14 @@ mod test { #[test] fn test_rawpointer_from_bytes_at_success() { - let mut buffer = [0u8; 8]; + let buffer = [0u8; 8]; let value = 42u32; let mut offset = 4; // Write value to buffer unsafe { *(buffer[offset..].as_ptr() as *mut u32) = value }; let pointer: WrappedPointer = - WrappedPointer::from_bytes_at(&mut buffer, &mut offset).unwrap(); + WrappedPointer::from_bytes_at(&buffer, &mut offset).unwrap(); assert_eq!(*pointer.get(), value); assert_eq!(offset, 8); } @@ -244,15 +244,15 @@ mod test { buffer[..8].copy_from_slice(&1u64.to_le_bytes()); // Fake discriminator unsafe { *(buffer[8..].as_ptr() as *mut u32) = value }; - let pointer = WrappedPointer::::from_bytes_with_discriminator(&mut buffer).unwrap(); + let pointer = WrappedPointer::::from_bytes_with_discriminator(&buffer).unwrap(); assert_eq!(*pointer.get(), value); } #[test] #[should_panic(expected = "out of range for slice of length")] fn test_rawpointer_from_bytes_with_discriminator_fail() { - let mut buffer = [0u8; 7]; // Not enough space for discriminator - let result = WrappedPointer::::from_bytes_with_discriminator(&mut buffer); + let buffer = [0u8; 7]; // Not enough space for discriminator + let result = WrappedPointer::::from_bytes_with_discriminator(&buffer); assert_eq!( result, Err(ZeroCopyError::InsufficientMemoryAllocated(7, 8)) @@ -261,8 +261,8 @@ mod test { #[test] fn test_rawpointer_from_bytes_with_discriminator_insufficient_memory() { - let mut buffer = [0u8; 9]; - let result = WrappedPointer::::from_bytes_with_discriminator(&mut buffer); + let buffer = [0u8; 9]; + let result = WrappedPointer::::from_bytes_with_discriminator(&buffer); assert_eq!( result, Err(ZeroCopyError::InsufficientMemoryAllocated(1, 4)) diff --git a/program-libs/zero-copy/tests/slice_mut_test.rs b/program-libs/zero-copy/tests/slice_mut_test.rs index 5ed5c8d78..5f63d5fec 100644 --- a/program-libs/zero-copy/tests/slice_mut_test.rs +++ b/program-libs/zero-copy/tests/slice_mut_test.rs @@ -402,7 +402,7 @@ fn test_new_at_multiple() { let mut account_data = vec![0u8; 128]; let mut offset = 0; let capacity = 4; - let mut reference_vecs = vec![vec![], vec![]]; + let mut reference_vecs = [vec![], vec![]]; { let mut initialized_vecs = ZeroCopySliceMutUsize::::new_at_multiple( diff --git a/program-libs/zero-copy/tests/vec_tests.rs b/program-libs/zero-copy/tests/vec_tests.rs index aa4ad3830..8150f0f6a 100644 --- a/program-libs/zero-copy/tests/vec_tests.rs +++ b/program-libs/zero-copy/tests/vec_tests.rs @@ -40,7 +40,7 @@ fn test_zero_copy_vec() { println!("test_zero_copy_vec_with_capacity::()"); test_zero_copy_vec_new::(u8::MAX as u64); println!("test_zero_copy_vec_with_capacity::()"); - test_zero_copy_vec_new::(10000 as usize); + test_zero_copy_vec_new::(10000_usize); } #[test] @@ -171,7 +171,7 @@ where let ref_length: CAPACITY = CAPACITY::from_usize(i + 1).unwrap(); assert_eq!(length, ref_length.to_ne_bytes().as_ref().to_vec()); - let padding_start = metadata_size.clone(); + let padding_start = metadata_size; add_padding::(&mut metadata_size); let padding_end = metadata_size; let data = data[padding_start..padding_end].to_vec(); @@ -250,7 +250,7 @@ where let ref_length: CAPACITY = CAPACITY::zero(); //;(0).to_usize().unwrap(); assert_eq!(length, ref_length.to_ne_bytes().as_ref().to_vec()); - let padding_start = metadata_size.clone(); + let padding_start = metadata_size; add_padding::(&mut metadata_size); let padding_end = metadata_size; let data = data[padding_start..padding_end].to_vec(); @@ -443,7 +443,7 @@ fn test_init_multiple_pass() { assert_eq!(initialized_vecs[1].capacity(), capacity); assert_eq!(initialized_vecs[0].len(), 0); assert_eq!(initialized_vecs[1].len(), 0); - let mut reference_vecs = vec![vec![], vec![]]; + let mut reference_vecs = [vec![], vec![]]; for i in 0..capacity { for (j, vec) in initialized_vecs.iter_mut().enumerate() { assert!(vec.get(i).is_none()); diff --git a/scripts/lint.sh b/scripts/lint.sh index 8c7ec4582..708f0af93 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -5,4 +5,4 @@ npx nx run-many --target=format:check --all npx nx run-many --target=lint --all cargo +nightly fmt --all -- --check -cargo clippy --workspace -- -D warnings +cargo clippy --workspace --all-targets -- -D warnings