From 4f5ccbc913577fd06f434dfe700ef7601a43f88b Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Sun, 5 Jan 2025 20:12:29 +0000 Subject: [PATCH] remove unused commented code --- forester/src/batch_operations.rs | 319 --------------------- forester/src/batched_address_ops.rs | 313 -------------------- forester/src/batched_ops.rs | 427 ---------------------------- 3 files changed, 1059 deletions(-) delete mode 100644 forester/src/batch_operations.rs delete mode 100644 forester/src/batched_address_ops.rs delete mode 100644 forester/src/batched_ops.rs diff --git a/forester/src/batch_operations.rs b/forester/src/batch_operations.rs deleted file mode 100644 index 3536574dc..000000000 --- a/forester/src/batch_operations.rs +++ /dev/null @@ -1,319 +0,0 @@ -// use crate::{errors::ForesterError, Result}; -// use borsh::BorshSerialize; -// use forester_utils::forester_epoch::TreeType; -// use forester_utils::indexer::Indexer; -// use forester_utils::instructions::{ -// create_append_batch_ix_data, create_batch_update_address_tree_instruction_data, -// create_nullify_batch_ix_data, -// }; -// use light_batched_merkle_tree::batch::BatchState; -// use light_batched_merkle_tree::event::{BatchAppendEvent, BatchNullifyEvent}; -// use light_batched_merkle_tree::merkle_tree::{ -// BatchedMerkleTreeAccount, InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, -// }; -// use light_batched_merkle_tree::queue::BatchedQueueAccount; -// use light_client::rpc::RpcConnection; -// use light_client::rpc_pool::SolanaRpcPool; -// use light_registry::account_compression_cpi::sdk::{ -// create_batch_append_instruction, create_batch_nullify_instruction, -// create_batch_update_address_tree_instruction, -// }; -// use solana_program::pubkey::Pubkey; -// use solana_sdk::signature::Keypair; -// use solana_sdk::signer::Signer; -// use std::sync::Arc; -// use tokio::sync::Mutex; -// use tracing::{debug, info, instrument}; -// -// #[derive(Debug)] -// pub struct BatchContext> { -// pub rpc_pool: Arc>, -// pub indexer: Arc>, -// pub authority: Keypair, -// pub derivation: Pubkey, -// pub epoch: u64, -// pub merkle_tree: Pubkey, -// pub output_queue: Pubkey, -// } -// -// #[derive(Debug)] -// pub struct BatchProcessor> { -// context: BatchContext, -// tree_type: TreeType, -// } -// -// impl> BatchProcessor { -// pub fn new(context: BatchContext, tree_type: TreeType) -> Self { -// Self { context, tree_type } -// } -// -// #[instrument(level = "debug", skip(self))] -// pub async fn process(&self) -> Result { -// if !self.verify_batch_ready().await { -// debug!("Batch is not ready for processing"); -// return Ok(0); -// } -// -// match self.tree_type { -// TreeType::BatchedAddress => { -// info!("Processing address batch"); -// self.process_address_batch().await -// } -// TreeType::BatchedState => { -// info!("Processing state batch"); -// self.process_state_batch().await -// } -// _ => Err(ForesterError::Custom(format!( -// "Unsupported tree type: {:?}", -// self.tree_type -// ))), -// } -// } -// -// #[instrument( -// level = "debug", -// skip(self), -// fields( -// epoch = self.context.epoch, -// tree = %self.context.merkle_tree -// ) -// )] -// async fn verify_batch_ready(&self) -> bool { -// let mut rpc = match self.context.rpc_pool.get_connection().await { -// Ok(rpc) => rpc, -// Err(_) => return false, -// }; -// -// let mut account = match rpc.get_account(self.context.merkle_tree).await { -// Ok(Some(account)) => account, -// _ => return false, -// }; -// -// let is_ready = { -// let merkle_tree = match self.tree_type { -// TreeType::BatchedAddress => BatchedMerkleTreeAccount::address_tree_from_bytes_mut( -// account.data.as_mut_slice(), -// ), -// TreeType::BatchedState => { -// BatchedMerkleTreeAccount::state_tree_from_bytes_mut(account.data.as_mut_slice()) -// } -// _ => return false, -// }; -// -// if let Ok(tree) = merkle_tree { -// let batch_index = tree.get_metadata().queue_metadata.next_full_batch_index; -// let full_batch = tree.batches.get(batch_index as usize).unwrap(); -// -// full_batch.get_state() != BatchState::Inserted -// && full_batch.get_current_zkp_batch_index() > full_batch.get_num_inserted_zkps() -// } else { -// false -// } -// }; -// is_ready -// } -// -// async fn process_address_batch(&self) -> Result { -// info!("process_address_batch"); -// let (instruction_data, batch_size) = self -// .create_batch_update_address_tree_instruction_data() -// .await?; -// -// let instruction = create_batch_update_address_tree_instruction( -// self.context.authority.pubkey(), -// self.context.derivation, -// self.context.merkle_tree, -// self.context.epoch, -// instruction_data.try_to_vec()?, -// ); -// -// let mut rpc = self.context.rpc_pool.get_connection().await?; -// -// // TODO: should we do instead rpc.create_and_send_transaction_with_event::( -// rpc.create_and_send_transaction( -// &[instruction], -// &self.context.authority.pubkey(), -// &[&self.context.authority], -// ) -// .await?; -// -// let mut indexer = self.context.indexer.lock().await; -// indexer -// .finalize_batched_address_tree_update(&mut *rpc, self.context.merkle_tree) -// .await; -// -// Ok(batch_size) -// } -// -// async fn process_state_batch(&self) -> Result { -// info!("Performing state batch append operation"); -// let mut rpc = self.context.rpc_pool.get_connection().await?; -// -// let (num_inserted_zkps, zkp_batch_size) = { -// let mut output_queue_account = -// rpc.get_account(self.context.output_queue).await?.unwrap(); -// let output_queue = BatchedQueueAccount::output_queue_from_bytes_mut( -// output_queue_account.data.as_mut_slice(), -// ) -// .map_err(|e| ForesterError::Custom(e.to_string()))?; -// -// let batch_index = output_queue -// .get_metadata() -// .batch_metadata -// .next_full_batch_index; -// let zkp_batch_size = output_queue.get_metadata().batch_metadata.zkp_batch_size; -// -// ( -// output_queue.batches[batch_index as usize].get_num_inserted_zkps(), -// zkp_batch_size as usize, -// ) -// }; -// -// let instruction_data = self.create_append_batch_ix_data().await?; -// let instruction = create_batch_append_instruction( -// self.context.authority.pubkey(), -// self.context.derivation, -// self.context.merkle_tree, -// self.context.output_queue, -// self.context.epoch, -// instruction_data.try_to_vec()?, -// ); -// -// rpc.create_and_send_transaction_with_event::( -// &[instruction], -// &self.context.authority.pubkey(), -// &[&self.context.authority], -// None, -// ) -// .await?; -// -// info!("Updating indexer after append"); -// self.update_indexer_after_append(num_inserted_zkps).await?; -// info!("Indexer updated after append"); -// -// info!("Performing state batch nullify operation"); -// let instruction_data = self.create_nullify_batch_ix_data().await?; -// let instruction = create_batch_nullify_instruction( -// self.context.authority.pubkey(), -// self.context.derivation, -// self.context.merkle_tree, -// self.context.epoch, -// instruction_data.try_to_vec()?, -// ); -// rpc.create_and_send_transaction_with_event::( -// &[instruction], -// &self.context.authority.pubkey(), -// &[&self.context.authority], -// None, -// ) -// .await?; -// -// info!("Updating indexer after nullify"); -// self.update_indexer_after_nullify().await?; -// info!("Indexer updated after nullify"); -// -// Ok(zkp_batch_size * 2) -// } -// -// async fn update_indexer_after_append(&self, num_inserted_zkps: u64) -> Result<()> { -// let mut rpc = self.context.rpc_pool.get_connection().await?; -// let mut indexer = self.context.indexer.lock().await; -// -// indexer -// .update_test_indexer_after_append( -// &mut *rpc, -// self.context.merkle_tree, -// self.context.output_queue, -// num_inserted_zkps, -// ) -// .await; -// -// Ok(()) -// } -// -// async fn update_indexer_after_nullify(&self) -> Result<()> { -// let mut rpc = self.context.rpc_pool.get_connection().await?; -// let mut indexer = self.context.indexer.lock().await; -// -// let batch_index = { -// let mut account = rpc.get_account(self.context.merkle_tree).await?.unwrap(); -// let merkle_tree = -// BatchedMerkleTreeAccount::state_tree_from_bytes_mut(account.data.as_mut_slice()) -// .map_err(|e| { -// ForesterError::Custom(format!("Failed to parse merkle tree account: {}", e)) -// })?; -// merkle_tree -// .get_metadata() -// .queue_metadata -// .next_full_batch_index -// }; -// -// indexer -// .update_test_indexer_after_nullification( -// &mut *rpc, -// self.context.merkle_tree, -// batch_index as usize, -// ) -// .await; -// -// Ok(()) -// } -// -// async fn create_batch_update_address_tree_instruction_data( -// &self, -// ) -> Result<(InstructionDataBatchNullifyInputs, usize)> { -// let mut rpc = self.context.rpc_pool.get_connection().await?; -// -// let mut indexer = self.context.indexer.lock().await; -// -// create_batch_update_address_tree_instruction_data( -// &mut *rpc, -// &mut *indexer, -// self.context.merkle_tree, -// ) -// .await -// .map_err(|e| ForesterError::Custom(e.to_string())) -// } -// -// async fn create_append_batch_ix_data(&self) -> Result { -// let mut rpc = self.context.rpc_pool.get_connection().await?; -// -// let mut indexer = self.context.indexer.lock().await; -// -// create_append_batch_ix_data( -// &mut *rpc, -// &mut *indexer, -// self.context.merkle_tree, -// self.context.output_queue, -// ) -// .await -// .map_err(|e| ForesterError::Custom(e.to_string())) -// } -// -// async fn create_nullify_batch_ix_data(&self) -> Result { -// let mut rpc = self.context.rpc_pool.get_connection().await?; -// -// let mut indexer = self.context.indexer.lock().await; -// -// create_nullify_batch_ix_data(&mut *rpc, &mut *indexer, self.context.merkle_tree) -// .await -// .map_err(|e| ForesterError::Custom(e.to_string())) -// } -// } -// -// #[instrument( -// level = "debug", -// fields( -// epoch = context.epoch, -// tree = %context.merkle_tree, -// tree_type = ?tree_type -// ) -// )] -// pub async fn process_batched_operations>( -// context: BatchContext, -// tree_type: TreeType, -// ) -> Result { -// info!("process_batched_operations"); -// let processor = BatchProcessor::new(context, tree_type); -// processor.process().await -// } diff --git a/forester/src/batched_address_ops.rs b/forester/src/batched_address_ops.rs deleted file mode 100644 index d2bf3e55c..000000000 --- a/forester/src/batched_address_ops.rs +++ /dev/null @@ -1,313 +0,0 @@ -// use std::sync::Arc; -// -// use borsh::BorshSerialize; -// use forester_utils::indexer::Indexer; -// use light_batched_merkle_tree::{ -// batch::BatchState, -// constants::DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, -// merkle_tree::{ -// BatchProofInputsIx, BatchedMerkleTreeAccount, InstructionDataBatchNullifyInputs, -// }, -// }; -// use light_client::{ -// rpc::{RpcConnection, RpcError}, -// rpc_pool::SolanaRpcPool, -// }; -// use light_prover_client::{ -// batch_address_append::get_batch_address_append_circuit_inputs, -// gnark::{ -// batch_address_append_json_formatter::to_json, -// constants::{PROVE_PATH, SERVER_ADDRESS}, -// proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, -// }, -// }; -// use light_registry::account_compression_cpi::sdk::create_batch_update_address_tree_instruction; -// use light_utils::bigint::bigint_to_be_bytes_array; -// use light_verifier::CompressedProof; -// use reqwest::Client; -// use solana_program::pubkey::Pubkey; -// use solana_sdk::{signature::Keypair, signer::Signer}; -// use tokio::sync::Mutex; -// use tracing::info; -// -// use crate::{errors::ForesterError, Result}; -// -// pub struct BatchedAddressOperations> { -// pub rpc_pool: Arc>, -// pub indexer: Arc>, -// pub authority: Keypair, -// pub derivation: Pubkey, -// pub epoch: u64, -// pub merkle_tree: Pubkey, -// pub output_queue: Pubkey, -// } -// impl> BatchedAddressOperations { -// async fn is_batch_ready(&self) -> bool { -// let mut rpc = self.rpc_pool.get_connection().await.unwrap(); -// let is_batch_ready = { -// let mut account = rpc.get_account(self.merkle_tree).await.unwrap().unwrap(); -// let merkle_tree = -// BatchedMerkleTreeAccount::address_tree_from_bytes_mut(account.data.as_mut_slice()) -// .unwrap(); -// let batch_index = merkle_tree -// .get_metadata() -// .queue_metadata -// .next_full_batch_index; -// let full_batch = merkle_tree.batches.get(batch_index as usize).unwrap(); -// -// info!("Batch state: {:?}", full_batch.get_state()); -// info!( -// "Current zkp batch index: {:?}", -// full_batch.get_current_zkp_batch_index() -// ); -// info!( -// "Num inserted zkps: {:?}", -// full_batch.get_num_inserted_zkps() -// ); -// -// full_batch.get_state() != BatchState::Inserted -// && full_batch.get_current_zkp_batch_index() > full_batch.get_num_inserted_zkps() -// }; -// is_batch_ready -// } -// -// pub async fn perform_batch_address_merkle_tree_update(&self) -> Result { -// info!("Performing batch address merkle tree update"); -// let mut rpc = self.rpc_pool.get_connection().await?; -// let (instruction_data, batch_size) = self -// .create_batch_update_address_tree_instruction_data_with_proof() -// .await?; -// -// let instruction = create_batch_update_address_tree_instruction( -// self.authority.pubkey(), -// self.derivation, -// self.merkle_tree, -// self.epoch, -// instruction_data.try_to_vec()?, -// ); -// let result = rpc -// .create_and_send_transaction( -// &[instruction], -// &self.authority.pubkey(), -// &[&self.authority], -// ) -// .await; -// match result { -// Ok(sig) => { -// info!("Batch address update sent with signature: {:?}", sig); -// self.finalize_batch_address_merkle_tree_update().await?; -// Ok(batch_size) -// } -// Err(e) => { -// info!("Failed to send batch address update: {:?}", e); -// Err(ForesterError::from(e)) -// } -// } -// } -// -// async fn finalize_batch_address_merkle_tree_update(&self) -> Result<()> { -// info!("Finalizing batch address merkle tree update"); -// let mut rpc = self.rpc_pool.get_connection().await?; -// self.indexer -// .lock() -// .await -// .finalize_batched_address_tree_update(&mut *rpc, self.merkle_tree) -// .await; -// -// Ok(()) -// } -// -// async fn create_batch_update_address_tree_instruction_data_with_proof( -// &self, -// ) -> Result<(InstructionDataBatchNullifyInputs, usize)> { -// let mut rpc = self.rpc_pool.get_connection().await?; -// -// let mut merkle_tree_account = rpc.get_account(self.merkle_tree).await?.unwrap(); -// -// let ( -// old_root_index, -// leaves_hashchain, -// start_index, -// current_root, -// batch_size, -// full_batch_index, -// ) = { -// let merkle_tree = BatchedMerkleTreeAccount::address_tree_from_bytes_mut( -// merkle_tree_account.data.as_mut_slice(), -// ) -// .unwrap(); -// -// let old_root_index = merkle_tree.root_history.last_index(); -// let full_batch_index = merkle_tree -// .get_metadata() -// .queue_metadata -// .next_full_batch_index; -// let batch = &merkle_tree.batches[full_batch_index as usize]; -// let zkp_batch_index = batch.get_num_inserted_zkps(); -// let leaves_hashchain = -// merkle_tree.hashchain_store[full_batch_index as usize][zkp_batch_index as usize]; -// let start_index = merkle_tree.get_metadata().next_index; -// let current_root = *merkle_tree.root_history.last().unwrap(); -// let batch_size = batch.zkp_batch_size as usize; -// -// ( -// old_root_index, -// leaves_hashchain, -// start_index, -// current_root, -// batch_size, -// full_batch_index, -// ) -// }; -// -// let batch_start_index = self -// .indexer -// .lock() -// .await -// .get_address_merkle_trees() -// .iter() -// .find(|x| x.accounts.merkle_tree == self.merkle_tree) -// .unwrap() -// .merkle_tree -// .merkle_tree -// .rightmost_index; -// -// let addresses = self -// .indexer -// .lock() -// .await -// .get_queue_elements( -// self.merkle_tree.to_bytes(), -// full_batch_index, -// 0, -// batch_size as u64, -// ) -// .await?; -// -// let batch_size = addresses.len(); -// -// // // local_leaves_hashchain is only used for a test assertion. -// // let local_nullifier_hashchain = create_hash_chain_from_array(&addresses); -// // assert_eq!(leaves_hashchain, local_nullifier_hashchain); -// -// // Get proof info after addresses are retrieved -// let non_inclusion_proofs = self -// .indexer -// .lock() -// .await -// .get_multiple_new_address_proofs_full(self.merkle_tree.to_bytes(), addresses.clone()) -// .await?; -// -// let mut low_element_values = Vec::new(); -// let mut low_element_indices = Vec::new(); -// let mut low_element_next_indices = Vec::new(); -// let mut low_element_next_values = Vec::new(); -// let mut low_element_proofs: Vec> = Vec::new(); -// -// for non_inclusion_proof in &non_inclusion_proofs { -// low_element_values.push(non_inclusion_proof.low_address_value); -// low_element_indices.push(non_inclusion_proof.low_address_index as usize); -// low_element_next_indices.push(non_inclusion_proof.low_address_next_index as usize); -// low_element_next_values.push(non_inclusion_proof.low_address_next_value); -// low_element_proofs.push(non_inclusion_proof.low_address_proof.to_vec()); -// } -// -// let subtrees = self -// .indexer -// .lock() -// .await -// .get_subtrees(self.merkle_tree.to_bytes()) -// .await? -// .try_into() -// .unwrap(); -// -// let inputs = get_batch_address_append_circuit_inputs::< -// { DEFAULT_BATCH_ADDRESS_TREE_HEIGHT as usize }, -// >( -// start_index as usize, -// current_root, -// low_element_values, -// low_element_next_values, -// low_element_indices, -// low_element_next_indices, -// low_element_proofs, -// addresses, -// subtrees, -// leaves_hashchain, -// batch_start_index, -// batch_size, -// ) -// .map_err(|e| { -// ForesterError::Custom(format!( -// "Can't create batch address append circuit inputs: {:?}", -// e.to_string() -// )) -// })?; -// -// let client = Client::new(); -// let circuit_inputs_new_root = bigint_to_be_bytes_array::<32>(&inputs.new_root).unwrap(); -// let inputs = to_json(&inputs); -// -// let response_result = client -// .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) -// .header("Content-Type", "text/plain; charset=utf-8") -// .body(inputs) -// .send() -// .await -// .expect("Failed to execute request."); -// -// if response_result.status().is_success() { -// let body = response_result.text().await.unwrap(); -// let proof_json = deserialize_gnark_proof_json(&body).unwrap(); -// let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); -// let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); -// let instruction_data = InstructionDataBatchNullifyInputs { -// public_inputs: BatchProofInputsIx { -// new_root: circuit_inputs_new_root, -// old_root_index: old_root_index as u16, -// }, -// compressed_proof: CompressedProof { -// a: proof_a, -// b: proof_b, -// c: proof_c, -// }, -// }; -// Ok((instruction_data, batch_size)) -// } else { -// Err(ForesterError::from(RpcError::CustomError( -// "Prover failed to generate proof".to_string(), -// ))) -// } -// } -// } -// -// pub async fn process_batched_address_operations>( -// rpc_pool: Arc>, -// indexer: Arc>, -// authority: Keypair, -// derivation: Pubkey, -// epoch: u64, -// merkle_tree: Pubkey, -// output_queue: Pubkey, -// ) -> Result { -// let ops = BatchedAddressOperations { -// rpc_pool, -// indexer, -// authority, -// derivation, -// epoch, -// merkle_tree, -// output_queue, -// }; -// -// info!("Processing batched address operations"); -// -// if ops.is_batch_ready().await { -// info!("Batch is ready"); -// let processed_count = ops.perform_batch_address_merkle_tree_update().await?; -// Ok(processed_count) -// } else { -// info!("Batch is not ready"); -// Ok(0) -// } -// } diff --git a/forester/src/batched_ops.rs b/forester/src/batched_ops.rs deleted file mode 100644 index 66bf89198..000000000 --- a/forester/src/batched_ops.rs +++ /dev/null @@ -1,427 +0,0 @@ -// use std::sync::Arc; -// -// use borsh::BorshSerialize; -// use forester_utils::indexer::Indexer; -// use light_batched_merkle_tree::{ -// batch::BatchState, -// constants::DEFAULT_BATCH_STATE_TREE_HEIGHT, -// event::{BatchAppendEvent, BatchNullifyEvent}, -// merkle_tree::{ -// AppendBatchProofInputsIx, BatchProofInputsIx, BatchedMerkleTreeAccount, -// InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, -// }, -// queue::BatchedQueueAccount, -// }; -// use light_client::{rpc::RpcConnection, rpc_pool::SolanaRpcPool}; -// use light_hasher::{Hasher, Poseidon}; -// use light_prover_client::{ -// batch_append_with_proofs::get_batch_append_with_proofs_inputs, -// batch_update::get_batch_update_inputs, -// gnark::{ -// batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson, -// batch_update_json_formatter::update_inputs_string, -// constants::{PROVE_PATH, SERVER_ADDRESS}, -// proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, -// }, -// }; -// use light_registry::account_compression_cpi::sdk::{ -// create_batch_append_instruction, create_batch_nullify_instruction, -// }; -// use light_utils::bigint::bigint_to_be_bytes_array; -// use light_verifier::CompressedProof; -// use reqwest::Client; -// use solana_program::pubkey::Pubkey; -// use solana_sdk::{signature::Keypair, signer::Signer}; -// use tokio::sync::Mutex; -// use tracing::error; -// -// use crate::{errors::ForesterError, Result}; -// -// pub struct BatchedOperations> { -// pub rpc_pool: Arc>, -// pub indexer: Arc>, -// pub authority: Keypair, -// pub derivation: Pubkey, -// pub epoch: u64, -// pub merkle_tree: Pubkey, -// pub output_queue: Pubkey, -// } -// impl> BatchedOperations { -// async fn is_batch_ready(&self) -> bool { -// let mut rpc = self.rpc_pool.get_connection().await.unwrap(); -// let is_batch_ready = { -// let mut account = rpc.get_account(self.merkle_tree).await.unwrap().unwrap(); -// let merkle_tree = -// BatchedMerkleTreeAccount::state_tree_from_bytes_mut(account.data.as_mut_slice()) -// .unwrap(); -// let batch_index = merkle_tree -// .get_metadata() -// .queue_metadata -// .next_full_batch_index; -// let full_batch = merkle_tree.batches.get(batch_index as usize).unwrap(); -// -// full_batch.get_state() != BatchState::Inserted -// && full_batch.get_current_zkp_batch_index() > full_batch.get_num_inserted_zkps() -// }; -// is_batch_ready -// } -// -// pub async fn perform_batch_append(&self) -> Result { -// let mut rpc = self.rpc_pool.get_connection().await?; -// -// let (num_inserted_zkps, batch_size) = { -// let mut output_queue_account = -// rpc.get_account(self.output_queue).await.unwrap().unwrap(); -// let output_queue = BatchedQueueAccount::output_queue_from_bytes_mut( -// output_queue_account.data.as_mut_slice(), -// ) -// .unwrap(); -// let queue_metadata = output_queue.get_metadata(); -// let batch_index = queue_metadata.batch_metadata.next_full_batch_index; -// let num_inserted_zkps = -// output_queue.batches[batch_index as usize].get_num_inserted_zkps(); -// let zkp_batch_size = queue_metadata.batch_metadata.zkp_batch_size; -// -// (num_inserted_zkps, zkp_batch_size) -// }; -// -// let instruction_data = self.create_append_batch_ix_data().await; -// let instruction = create_batch_append_instruction( -// self.authority.pubkey(), -// self.derivation, -// self.merkle_tree, -// self.output_queue, -// self.epoch, -// instruction_data?.try_to_vec()?, -// ); -// -// rpc.create_and_send_transaction_with_event::( -// &[instruction], -// &self.authority.pubkey(), -// &[&self.authority], -// None, -// ) -// .await?; -// -// self.indexer -// .lock() -// .await -// .update_test_indexer_after_append( -// &mut rpc, -// self.merkle_tree, -// self.output_queue, -// num_inserted_zkps, -// ) -// .await; -// Ok(batch_size as usize) -// } -// -// pub async fn perform_batch_nullify(&self) -> Result { -// let mut rpc = self.rpc_pool.get_connection().await?; -// -// let instruction_data = self.get_batched_nullify_ix_data().await?; -// -// let instruction = create_batch_nullify_instruction( -// self.authority.pubkey(), -// self.derivation, -// self.merkle_tree, -// self.epoch, -// instruction_data.try_to_vec()?, -// ); -// -// rpc.create_and_send_transaction_with_event::( -// &[instruction], -// &self.authority.pubkey(), -// &[&self.authority], -// None, -// ) -// .await?; -// -// let (batch_index, batch_size) = { -// let mut account = rpc.get_account(self.merkle_tree).await.unwrap().unwrap(); -// let merkle_tree = -// BatchedMerkleTreeAccount::state_tree_from_bytes_mut(account.data.as_mut_slice()) -// .unwrap(); -// ( -// merkle_tree -// .get_metadata() -// .queue_metadata -// .next_full_batch_index, -// merkle_tree.get_metadata().queue_metadata.zkp_batch_size, -// ) -// }; -// -// self.indexer -// .lock() -// .await -// .update_test_indexer_after_nullification( -// &mut rpc, -// self.merkle_tree, -// batch_index as usize, -// ) -// .await; -// Ok(batch_size as usize) -// } -// -// async fn create_append_batch_ix_data(&self) -> Result { -// let mut rpc = self.rpc_pool.get_connection().await.unwrap(); -// -// let (merkle_tree_next_index, current_root) = { -// let mut merkle_tree_account = rpc.get_account(self.merkle_tree).await.unwrap().unwrap(); -// let merkle_tree = BatchedMerkleTreeAccount::state_tree_from_bytes_mut( -// merkle_tree_account.data.as_mut_slice(), -// ) -// .unwrap(); -// ( -// merkle_tree.get_metadata().next_index, -// *merkle_tree.root_history.last().unwrap(), -// ) -// }; -// -// let (zkp_batch_size, full_batch_index, num_inserted_zkps, leaves_hashchain) = { -// let mut output_queue_account = -// rpc.get_account(self.output_queue).await.unwrap().unwrap(); -// let output_queue = BatchedQueueAccount::output_queue_from_bytes_mut( -// output_queue_account.data.as_mut_slice(), -// ) -// .unwrap(); -// -// let queue_metadata = output_queue.get_metadata(); -// let full_batch_index = queue_metadata.batch_metadata.next_full_batch_index; -// let zkp_batch_size = queue_metadata.batch_metadata.zkp_batch_size; -// -// let num_inserted_zkps = -// output_queue.batches[full_batch_index as usize].get_num_inserted_zkps(); -// -// let leaves_hashchain = -// output_queue.hashchain_store[full_batch_index as usize][num_inserted_zkps as usize]; -// -// ( -// zkp_batch_size, -// full_batch_index, -// num_inserted_zkps, -// leaves_hashchain, -// ) -// }; -// let start = num_inserted_zkps as usize * zkp_batch_size as usize; -// let end = start + zkp_batch_size as usize; -// -// let leaves = self -// .indexer -// .lock() -// .await -// .get_queue_elements( -// self.merkle_tree.to_bytes(), -// full_batch_index, -// start as u64, -// end as u64, -// ) -// .await -// .unwrap(); -// -// let (old_leaves, merkle_proofs) = { -// let mut old_leaves = vec![]; -// let mut merkle_proofs = vec![]; -// let indices = (merkle_tree_next_index..merkle_tree_next_index + zkp_batch_size) -// .collect::>(); -// let proofs = self -// .indexer -// .lock() -// .await -// .get_proofs_by_indices(self.merkle_tree, &indices); -// proofs.iter().for_each(|proof| { -// old_leaves.push(proof.leaf); -// merkle_proofs.push(proof.proof.clone()); -// }); -// -// (old_leaves, merkle_proofs) -// }; -// -// let (proof, new_root) = { -// let circuit_inputs = get_batch_append_with_proofs_inputs::< -// { DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }, -// >( -// current_root, -// merkle_tree_next_index as u32, -// leaves, -// leaves_hashchain, -// old_leaves, -// merkle_proofs, -// zkp_batch_size as u32, -// ) -// .unwrap(); -// -// let client = Client::new(); -// let inputs_json = -// BatchAppendWithProofsInputsJson::from_inputs(&circuit_inputs).to_string(); -// -// let response = client -// .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) -// .header("Content-Type", "text/plain; charset=utf-8") -// .body(inputs_json) -// .send() -// .await -// .expect("Failed to execute request."); -// -// if response.status().is_success() { -// let body = response.text().await.unwrap(); -// let proof_json = deserialize_gnark_proof_json(&body).unwrap(); -// let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); -// let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); -// ( -// CompressedProof { -// a: proof_a, -// b: proof_b, -// c: proof_c, -// }, -// bigint_to_be_bytes_array::<32>(&circuit_inputs.new_root.to_biguint().unwrap()) -// .unwrap(), -// ) -// } else { -// error!( -// "create_append_batch_ix_data: failed to get proof from server: {:?}", -// response.text().await -// ); -// return Err(ForesterError::Custom( -// "Failed to get proof from server".into(), -// )); -// } -// }; -// -// Ok(InstructionDataBatchAppendInputs { -// public_inputs: AppendBatchProofInputsIx { new_root }, -// compressed_proof: proof, -// }) -// } -// -// async fn get_batched_nullify_ix_data(&self) -> Result { -// let mut rpc = self.rpc_pool.get_connection().await.unwrap(); -// -// let (zkp_batch_size, old_root, old_root_index, leaves_hashchain) = { -// let mut account = rpc.get_account(self.merkle_tree).await.unwrap().unwrap(); -// let merkle_tree = -// BatchedMerkleTreeAccount::state_tree_from_bytes_mut(account.data.as_mut_slice()) -// .unwrap(); -// let metadata = merkle_tree.get_metadata(); -// let batch_idx = metadata.queue_metadata.next_full_batch_index as usize; -// let zkp_size = metadata.queue_metadata.zkp_batch_size; -// let batch = &merkle_tree.batches[batch_idx]; -// let zkp_idx = batch.get_num_inserted_zkps(); -// let hashchain = merkle_tree.hashchain_store[batch_idx][zkp_idx as usize]; -// let root_idx = merkle_tree.root_history.last_index(); -// let root = *merkle_tree.root_history.last().unwrap(); -// (zkp_size, root, root_idx, hashchain) -// }; -// -// let leaf_indices_tx_hashes = self -// .indexer -// .lock() -// .await -// .get_leaf_indices_tx_hashes(self.merkle_tree, zkp_batch_size as usize); -// -// let mut leaves = Vec::new(); -// let mut tx_hashes = Vec::new(); -// let mut old_leaves = Vec::new(); -// let mut path_indices = Vec::new(); -// let mut merkle_proofs = Vec::new(); -// let mut nullifiers = Vec::new(); -// -// let proofs = self.indexer.lock().await.get_proofs_by_indices( -// self.merkle_tree, -// &leaf_indices_tx_hashes -// .iter() -// .map(|(index, _, _)| *index as u64) -// .collect::>(), -// ); -// -// for ((index, leaf, tx_hash), proof) in leaf_indices_tx_hashes.iter().zip(proofs.iter()) { -// path_indices.push(*index); -// leaves.push(*leaf); -// old_leaves.push(proof.leaf); -// merkle_proofs.push(proof.proof.clone()); -// tx_hashes.push(*tx_hash); -// let index_bytes = index.to_be_bytes(); -// let nullifier = Poseidon::hashv(&[leaf, &index_bytes, tx_hash]).unwrap(); -// nullifiers.push(nullifier); -// } -// -// let inputs = get_batch_update_inputs::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>( -// old_root, -// tx_hashes, -// leaves.to_vec(), -// leaves_hashchain, -// old_leaves, -// merkle_proofs, -// path_indices, -// zkp_batch_size as u32, -// ) -// .unwrap(); -// -// let new_root = -// bigint_to_be_bytes_array::<32>(&inputs.new_root.to_biguint().unwrap()).unwrap(); -// -// let client = Client::new(); -// let response = client -// .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) -// .header("Content-Type", "text/plain; charset=utf-8") -// .body(update_inputs_string(&inputs)) -// .send() -// .await?; -// -// let proof = if response.status().is_success() { -// let body = response.text().await.unwrap(); -// let proof_json = deserialize_gnark_proof_json(&body).unwrap(); -// let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); -// let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); -// CompressedProof { -// a: proof_a, -// b: proof_b, -// c: proof_c, -// } -// } else { -// error!( -// "get_batched_nullify_ix_data: failed to get proof from server: {:?}", -// response.text().await -// ); -// return Err(ForesterError::Custom( -// "Failed to get proof from server".into(), -// )); -// }; -// -// Ok(InstructionDataBatchNullifyInputs { -// public_inputs: BatchProofInputsIx { -// new_root, -// old_root_index: old_root_index as u16, -// }, -// compressed_proof: proof, -// }) -// } -// } -// -// pub async fn process_batched_operations>( -// rpc_pool: Arc>, -// indexer: Arc>, -// authority: Keypair, -// derivation: Pubkey, -// epoch: u64, -// merkle_tree: Pubkey, -// output_queue: Pubkey, -// ) -> Result { -// let ops = BatchedOperations { -// rpc_pool, -// indexer, -// authority, -// derivation, -// epoch, -// merkle_tree, -// output_queue, -// }; -// -// if ops.is_batch_ready().await { -// let processed_appends_count = ops.perform_batch_append().await?; -// let processed_nullifications_count = ops.perform_batch_nullify().await?; -// Ok(processed_appends_count + processed_nullifications_count) -// } else { -// Ok(0) -// } -// }