Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
sergeytimoshin committed Nov 28, 2024
1 parent b870ace commit 18f38d8
Show file tree
Hide file tree
Showing 11 changed files with 231 additions and 191 deletions.
5 changes: 2 additions & 3 deletions client/src/rpc/solana_rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,6 @@ impl RpcConnection for SolanaRpcConnection {
parsed_event = self.parse_inner_instructions::<T>(signature).ok();
}


if let Some(transaction_params) = transaction_params {
let mut deduped_signers = signers.to_vec();
deduped_signers.dedup();
Expand All @@ -321,9 +320,9 @@ impl RpcConnection for SolanaRpcConnection {
}
let expected_post_balance = pre_balance as i64
- i64::from(transaction_params.num_new_addresses)
* transaction_params.fee_config.address_queue_rollover as i64
* transaction_params.fee_config.address_queue_rollover as i64
- i64::from(transaction_params.num_output_compressed_accounts)
* transaction_params.fee_config.state_merkle_tree_rollover as i64
* transaction_params.fee_config.state_merkle_tree_rollover as i64
- transaction_params.compress
- transaction_params.fee_config.solana_network_fee * deduped_signers.len() as i64
- network_fee;
Expand Down
8 changes: 6 additions & 2 deletions forester-utils/src/indexer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,10 +174,14 @@ pub trait Indexer<R: RpcConnection>: Sync + Send + Debug {
unimplemented!()
}

async fn update_test_indexer_in_nullification(&mut self, _merkle_tree_pubkey: Pubkey, _nullifier: &[u8; 32], _index: usize) {
async fn update_test_indexer_in_nullification(
&mut self,
_merkle_tree_pubkey: Pubkey,
_nullifier: &[u8; 32],
_index: usize,
) {
unimplemented!()
}

}

#[derive(Debug, Clone)]
Expand Down
34 changes: 19 additions & 15 deletions forester/src/batched_ops.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use crate::errors::ForesterError;
use crate::Result;
use account_compression::batch::BatchState;
use account_compression::batched_merkle_tree::{
AppendBatchProofInputsIx, BatchAppendEvent, BatchNullifyEvent, BatchProofInputsIx,
InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs,
Expand Down Expand Up @@ -33,7 +34,6 @@ use solana_sdk::signer::Signer;
use std::sync::Arc;
use tokio::sync::Mutex;
use tracing::{debug, error, info};
use account_compression::batch::BatchState;

pub struct BatchedOperations<R: RpcConnection, I: Indexer<R>> {
pub rpc_pool: Arc<SolanaRpcPool<R>>,
Expand Down Expand Up @@ -93,20 +93,15 @@ impl<R: RpcConnection, I: Indexer<R>> BatchedOperations<R, I> {
self.indexer
.lock()
.await
.update_test_indexer_after_append(
&mut rpc,
self.merkle_tree,
self.output_queue,
)
.update_test_indexer_after_append(&mut rpc, self.merkle_tree, self.output_queue)
.await;

info!("=== perform_batch_append end ===");
Ok(batch_size as usize)

}

pub async fn perform_batch_nullify(&self) -> Result<usize> {
info!("=== perform_batch_nullify begin ===");
info!("=== perform_batch_nullify begin ===");
let mut rpc = self.rpc_pool.get_connection().await?;

let instruction_data = self.get_batched_nullify_ix_data().await?;
Expand Down Expand Up @@ -330,7 +325,7 @@ impl<R: RpcConnection, I: Indexer<R>> BatchedOperations<R, I> {
let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(
merkle_tree_account.data.as_mut_slice(),
)
.unwrap();
.unwrap();
merkle_tree.batches[batch_index].clone()
};

Expand All @@ -349,12 +344,21 @@ impl<R: RpcConnection, I: Indexer<R>> BatchedOperations<R, I> {
tx_hashes.push(*tx_hash);

if batch.get_state() == BatchState::Inserted
|| batch.get_state() == BatchState::ReadyToUpdateTree {
|| batch.get_state() == BatchState::ReadyToUpdateTree
{
let index_bytes = index.to_be_bytes();
use light_hasher::Hasher;
let leaf = *leaf;
let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, tx_hash]).unwrap();
self.indexer.lock().await.update_test_indexer_in_nullification(self.merkle_tree, &nullifier, *index as usize).await;
self.indexer
.lock()
.await
.update_test_indexer_in_nullification(
self.merkle_tree,
&nullifier,
*index as usize,
)
.await;
}
}

Expand Down Expand Up @@ -392,9 +396,9 @@ impl<R: RpcConnection, I: Indexer<R>> BatchedOperations<R, I> {
}
} else {
error!(
"get_batched_nullify_ix_data: failed to get proof from server: {:?}",
response.text().await
);
"get_batched_nullify_ix_data: failed to get proof from server: {:?}",
response.text().await
);
return Err(ForesterError::Custom(
"Failed to get proof from server".into(),
));
Expand Down Expand Up @@ -434,5 +438,5 @@ pub async fn process_batched_operations<R: RpcConnection, I: Indexer<R>>(
let processed_appends_count = ops.perform_batch_append().await?;
// let processed_nullifications_count = ops.perform_batch_nullify().await?;

Ok(processed_appends_count )
Ok(processed_appends_count)
}
4 changes: 2 additions & 2 deletions forester/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
pub type Result<T> = std::result::Result<T, ForesterError>;

pub mod batched_ops;
pub mod cli;
pub mod config;
pub mod epoch_manager;
Expand All @@ -16,7 +17,6 @@ pub mod telemetry;
pub mod tree_data_sync;
pub mod tree_finder;
pub mod utils;
pub mod batched_ops;

use crate::epoch_manager::{run_service, WorkReport};
use crate::errors::ForesterError;
Expand Down Expand Up @@ -49,7 +49,7 @@ pub async fn run_queue_info(
.collect();

for tree_data in trees {
if tree_data.tree_type == TreeType::BatchedState {
if tree_data.tree_type == TreeType::BatchedState {
continue;
}

Expand Down
1 change: 0 additions & 1 deletion forester/src/photon_indexer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ impl<R: RpcConnection> Debug for PhotonIndexer<R> {

#[async_trait]
impl<R: RpcConnection> Indexer<R> for PhotonIndexer<R> {

async fn get_multiple_compressed_account_proofs(
&self,
hashes: Vec<String>,
Expand Down
63 changes: 41 additions & 22 deletions forester/src/rollover/operations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@ use tracing::{debug, info};

use crate::errors::ForesterError;
use crate::ForesterConfig;
use account_compression::batched_merkle_tree::ZeroCopyBatchedMerkleTreeAccount;
use account_compression::utils::constants::{
STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT,
};
use account_compression::{
AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig,
QueueAccount, StateMerkleTreeAccount, StateMerkleTreeConfig,
};
use account_compression::batched_merkle_tree::ZeroCopyBatchedMerkleTreeAccount;
use forester_utils::address_merkle_tree_config::{
get_address_bundle_config, get_state_bundle_config,
};
Expand Down Expand Up @@ -114,28 +114,47 @@ pub async fn get_tree_fullness<R: RpcConnection>(
})
}
TreeType::BatchedState => {
let mut account = rpc
.get_account(tree_pubkey)
.await?
.unwrap();
let mut account = rpc.get_account(tree_pubkey).await?.unwrap();
let merkle_tree =
ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut account.data)
.unwrap();
println!("merkle_tree.get_account().queue.batch_size: {:?}", merkle_tree.get_account().queue.batch_size);


println!("queue currently_processing_batch_index: {:?}", merkle_tree
.get_account()
.queue
.currently_processing_batch_index as usize);

println!("queue batch_size: {:?}", merkle_tree.get_account().queue.batch_size);
println!("queue zkp_batch_size: {:?}", merkle_tree.get_account().queue.zkp_batch_size);
println!("queue next_full_batch_index: {:?}", merkle_tree.get_account().queue.next_full_batch_index);
println!("queue bloom_filter_capacity: {:?}", merkle_tree.get_account().queue.bloom_filter_capacity);
println!("queue num_batches: {:?}", merkle_tree.get_account().queue.num_batches);

println!("tree next_index: {:?}", merkle_tree.get_account().next_index);
ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut account.data).unwrap();
println!(
"merkle_tree.get_account().queue.batch_size: {:?}",
merkle_tree.get_account().queue.batch_size
);

println!(
"queue currently_processing_batch_index: {:?}",
merkle_tree
.get_account()
.queue
.currently_processing_batch_index as usize
);

println!(
"queue batch_size: {:?}",
merkle_tree.get_account().queue.batch_size
);
println!(
"queue zkp_batch_size: {:?}",
merkle_tree.get_account().queue.zkp_batch_size
);
println!(
"queue next_full_batch_index: {:?}",
merkle_tree.get_account().queue.next_full_batch_index
);
println!(
"queue bloom_filter_capacity: {:?}",
merkle_tree.get_account().queue.bloom_filter_capacity
);
println!(
"queue num_batches: {:?}",
merkle_tree.get_account().queue.num_batches
);

println!(
"tree next_index: {:?}",
merkle_tree.get_account().next_index
);
println!("tree height: {:?}", merkle_tree.get_account().height);

// TODO: implement
Expand Down
5 changes: 3 additions & 2 deletions forester/src/tree_data_sync.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
use crate::Result;
use account_compression::batched_merkle_tree::{
BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount,
};
use account_compression::utils::check_discrimininator::check_discriminator;
use account_compression::{AddressMerkleTreeAccount, MerkleTreeMetadata, StateMerkleTreeAccount};
use borsh::BorshDeserialize;
Expand All @@ -7,7 +10,6 @@ use light_client::rpc::RpcConnection;
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
use tracing::debug;
use account_compression::batched_merkle_tree::{BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount};

pub async fn fetch_trees<R: RpcConnection>(rpc: &R) -> Result<Vec<TreeAccounts>> {
let program_id = account_compression::id();
Expand Down Expand Up @@ -47,7 +49,6 @@ fn process_batch_state_account(account: &mut Account, pubkey: Pubkey) -> Result<
))
}


fn process_address_account(account: &Account, pubkey: Pubkey) -> Result<TreeAccounts> {
check_discriminator::<AddressMerkleTreeAccount>(&account.data)?;
let tree_account = AddressMerkleTreeAccount::deserialize(&mut &account.data[8..])?;
Expand Down
6 changes: 1 addition & 5 deletions programs/account-compression/src/state/batched_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -258,11 +258,7 @@ impl ZeroCopyBatchedQueueAccount {

/// Prove inclusion of a leaf by index in the queue's value vec. If
/// checked, fail if the leaf is not found.
pub fn prove_inclusion_by_index(
&mut self,
leaf_index: u64,
value: &[u8; 32],
) -> Result<()> {
pub fn prove_inclusion_by_index(&mut self, leaf_index: u64, value: &[u8; 32]) -> Result<()> {
for (batch_index, batch) in self.batches.iter().enumerate() {
if batch.value_is_inserted_in_batch(leaf_index)? {
let index = batch.get_value_index_in_batch(leaf_index)?;
Expand Down
56 changes: 29 additions & 27 deletions test-programs/registry-test/tests/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ use light_test_utils::assert_epoch::{
use light_test_utils::e2e_test_env::{init_program_test_env, init_program_test_env_forester};
use light_test_utils::rpc::ProgramTestRpcConnection;
use light_test_utils::test_batch_forester::{
create_append_batch_ix_data, perform_batch_append, perform_batch_append_with_indexer, perform_batch_nullify, perform_batch_nullify_with_indexer
create_append_batch_ix_data, perform_batch_append, perform_batch_append_with_indexer,
perform_batch_nullify, perform_batch_nullify_with_indexer,
};
use light_test_utils::test_env::{
create_address_merkle_tree_and_queue_account, create_state_merkle_tree_and_queue_account,
Expand Down Expand Up @@ -720,28 +721,30 @@ async fn test_custom_forester_batched() {

#[serial]
#[tokio::test]
async fn test_custom_forester_batched_v2() {
async fn test_forester_batched() {
let devnet = false;
let tree_params = if devnet {
InitStateTreeAccountsInstructionData::default()
} else {
InitStateTreeAccountsInstructionData::test_default()
};

let (mut rpc, env) = setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params(
None,
ProtocolConfig::default(),
true,
tree_params,
).await;
let (mut rpc, env) =
setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params(
None,
ProtocolConfig::default(),
true,
tree_params,
)
.await;
let unregistered_forester_keypair = Keypair::new();
rpc.airdrop_lamports(&unregistered_forester_keypair.pubkey(), 1_000_000_000)
.await
.unwrap();
let merkle_tree_keypair = Keypair::new();
let nullifier_queue_keypair = Keypair::new();
let cpi_context_keypair = Keypair::new();

let (mut state_merkle_tree_bundle, _, mut rpc, mut indexer) = {
let mut e2e_env = if devnet {
let mut e2e_env = init_program_test_env_forester(rpc, &env).await;
Expand All @@ -764,8 +767,7 @@ async fn test_custom_forester_batched_v2() {
2,
)
.await;
let state_merkle_tree_pubkey =
e2e_env.indexer.state_merkle_trees[0].accounts.merkle_tree;
let state_merkle_tree_pubkey = e2e_env.indexer.state_merkle_trees[0].accounts.merkle_tree;
let output_queue_pubkey = e2e_env.indexer.state_merkle_trees[0]
.accounts
.nullifier_queue;
Expand Down Expand Up @@ -814,28 +816,28 @@ async fn test_custom_forester_batched_v2() {
tree_params.input_queue_batch_size / tree_params.output_queue_zkp_batch_size;
for i in 0..num_output_zkp_batches {
let result = perform_batch_append_with_indexer(
&mut rpc,
&mut indexer,
&env.forester,
0,
merkle_tree_keypair.pubkey(),
&mut rpc,
&mut indexer,
&env.forester,
0,
merkle_tree_keypair.pubkey(),
nullifier_queue_keypair.pubkey(),
).await;
println!("perform_batch_append_with_indexer result: {:?}", result);

let result = perform_batch_nullify_with_indexer(
&mut rpc,
&mut indexer,
&env.forester,
0,
merkle_tree_keypair.pubkey(),
)
.await;
println!("perform_batch_nullify_with_indexer result: {:?}", result);
println!("perform_batch_append_with_indexer result: {:?}", result);

// let result = perform_batch_nullify_with_indexer(
// &mut rpc,
// &mut indexer,
// &env.forester,
// 0,
// merkle_tree_keypair.pubkey(),
// )
// .await;
// println!("perform_batch_nullify_with_indexer result: {:?}", result);
}
}


/// Test:
/// 1. SUCCESS: Register a forester
/// 2. SUCCESS: Update forester authority
Expand Down
Loading

0 comments on commit 18f38d8

Please sign in to comment.