diff --git a/rust/processor/src/models/token_v2_models/v2_token_ownerships.rs b/rust/processor/src/models/token_v2_models/v2_token_ownerships.rs index 38c7cb5d8..e7059ac0c 100644 --- a/rust/processor/src/models/token_v2_models/v2_token_ownerships.rs +++ b/rust/processor/src/models/token_v2_models/v2_token_ownerships.rs @@ -19,6 +19,7 @@ use crate::{ token_utils::TokenWriteSet, tokens::TableHandleToOwner, }, + token_v2_models::v2_token_utils::DEFAULT_OWNER_ADDRESS, }, schema::{current_token_ownerships_v2, token_ownerships_v2}, utils::{ @@ -304,9 +305,9 @@ impl TokenOwnershipV2 { burn_event.get_previous_owner_address() } else { // 2. If it doesn't exist in burn event mapping, then it must be an old burn event that doesn't contain previous_owner. - // Do a lookup to get preivous owner. - let latest_nft_ownership = match prior_nft_ownership.get(&token_address) { - Some(inner) => inner.clone(), + // Do a lookup to get previous owner. This is necessary because preivous owner is part of current token ownerships primary key. + match prior_nft_ownership.get(&token_address) { + Some(inner) => inner.owner_address.clone(), None => { match CurrentTokenOwnershipV2Query::get_latest_owned_nft_by_token_data_id( conn, @@ -314,19 +315,18 @@ impl TokenOwnershipV2 { ) .await { - Ok(nft) => nft, + Ok(nft) => nft.owner_address.clone(), Err(_) => { tracing::error!( transaction_version = txn_version, lookup_key = &token_address, - "Failed to find NFT for burned token. You probably should backfill db." + "Failed to find current_token_ownership_v2 for burned token. You probably should backfill db." ); - return Ok(None); + DEFAULT_OWNER_ADDRESS.to_string() }, } }, - }; - latest_nft_ownership.owner_address.clone() + } }; let token_data_id = token_address.clone(); diff --git a/rust/processor/src/models/token_v2_models/v2_token_utils.rs b/rust/processor/src/models/token_v2_models/v2_token_utils.rs index 7c8093028..e39be436a 100644 --- a/rust/processor/src/models/token_v2_models/v2_token_utils.rs +++ b/rust/processor/src/models/token_v2_models/v2_token_utils.rs @@ -27,6 +27,8 @@ use std::fmt::{self, Formatter}; pub const TOKEN_V2_ADDR: &str = "0x0000000000000000000000000000000000000000000000000000000000000004"; +pub const DEFAULT_OWNER_ADDRESS: &str = "unknown"; + /// Tracks all token related data in a hashmap for quick access (keyed on address of the object core) /// Maps address to burn event (new). The event is None if it's an old burn event. pub type TokenV2Burned = AHashMap>; diff --git a/rust/processor/src/processors/token_v2_processor.rs b/rust/processor/src/processors/token_v2_processor.rs index f2a77984f..34931bdb7 100644 --- a/rust/processor/src/processors/token_v2_processor.rs +++ b/rust/processor/src/processors/token_v2_processor.rs @@ -82,7 +82,7 @@ async fn insert_to_db( current_collections_v2: &[CurrentCollectionV2], current_token_datas_v2: &[CurrentTokenDataV2], current_token_ownerships_v2: &[CurrentTokenOwnershipV2], - current_burned_token_ownerships_v2: &[CurrentTokenOwnershipV2], + current_deleted_token_ownerships_v2: &[CurrentTokenOwnershipV2], token_activities_v2: &[TokenActivityV2], current_token_v2_metadata: &[CurrentTokenV2Metadata], per_table_chunk_sizes: &AHashMap, @@ -142,10 +142,10 @@ async fn insert_to_db( per_table_chunk_sizes, ), ); - let cbto_v2 = execute_in_chunks( + let cdto_v2 = execute_in_chunks( conn.clone(), - insert_current_burned_token_ownerships_v2_query, - current_burned_token_ownerships_v2, + insert_current_deleted_token_ownerships_v2_query, + current_deleted_token_ownerships_v2, get_config_table_chunk_size::( "current_token_ownerships_v2", per_table_chunk_sizes, @@ -177,10 +177,10 @@ async fn insert_to_db( cc_v2_res, ctd_v2_res, cto_v2_res, - cbto_v2_res, + cdto_v2_res, ta_v2_res, ct_v2_res, - ) = tokio::join!(coll_v2, td_v2, to_v2, cc_v2, ctd_v2, cto_v2, cbto_v2, ta_v2, ct_v2,); + ) = tokio::join!(coll_v2, td_v2, to_v2, cc_v2, ctd_v2, cto_v2, cdto_v2, ta_v2, ct_v2,); for res in [ coll_v2_res, @@ -189,7 +189,7 @@ async fn insert_to_db( cc_v2_res, ctd_v2_res, cto_v2_res, - cbto_v2_res, + cdto_v2_res, ta_v2_res, ct_v2_res, ] { @@ -344,7 +344,7 @@ fn insert_current_token_ownerships_v2_query( ) } -fn insert_current_burned_token_ownerships_v2_query( +fn insert_current_deleted_token_ownerships_v2_query( items_to_insert: Vec, ) -> ( impl QueryFragment + diesel::query_builder::QueryId + Send, @@ -440,9 +440,9 @@ impl ProcessorTrait for TokenV2Processor { token_datas_v2, token_ownerships_v2, current_collections_v2, - current_token_ownerships_v2, - current_burned_token_ownerships_v2, current_token_datas_v2, + current_token_ownerships_v2, + current_deleted_token_ownerships_v2, token_activities_v2, current_token_v2_metadata, ) = parse_v2_token(&transactions, &table_handle_to_owner, &mut conn).await; @@ -459,9 +459,9 @@ impl ProcessorTrait for TokenV2Processor { &token_datas_v2, &token_ownerships_v2, ¤t_collections_v2, - ¤t_token_ownerships_v2, - ¤t_burned_token_ownerships_v2, ¤t_token_datas_v2, + ¤t_token_ownerships_v2, + ¤t_deleted_token_ownerships_v2, &token_activities_v2, ¤t_token_v2_metadata, &self.per_table_chunk_sizes, @@ -506,7 +506,7 @@ async fn parse_v2_token( Vec, Vec, Vec, - Vec, // burned token ownerships + Vec, // deleted token ownerships Vec, Vec, ) { @@ -523,7 +523,7 @@ async fn parse_v2_token( CurrentTokenOwnershipV2PK, CurrentTokenOwnershipV2, > = AHashMap::new(); - let mut current_burned_token_ownerships_v2 = vec![]; + let mut current_deleted_token_ownerships_v2 = AHashMap::new(); // Tracks prior ownership in case a token gets burned let mut prior_nft_ownership: AHashMap = AHashMap::new(); // Get Metadata for token v2 by object @@ -816,7 +816,15 @@ async fn parse_v2_token( is_soulbound: cto.is_soulbound_v2, }, ); - current_burned_token_ownerships_v2.push(cto); + current_deleted_token_ownerships_v2.insert( + ( + cto.token_data_id.clone(), + cto.property_version_v1.clone(), + cto.owner_address.clone(), + cto.storage_id.clone(), + ), + cto, + ); } } }, @@ -900,7 +908,15 @@ async fn parse_v2_token( is_soulbound: current_nft_ownership.is_soulbound_v2, }, ); - current_burned_token_ownerships_v2.push(current_nft_ownership); + current_deleted_token_ownerships_v2.insert( + ( + current_nft_ownership.token_data_id.clone(), + current_nft_ownership.property_version_v1.clone(), + current_nft_ownership.owner_address.clone(), + current_nft_ownership.storage_id.clone(), + ), + current_nft_ownership, + ); } // Add fungible token handling @@ -969,7 +985,15 @@ async fn parse_v2_token( is_soulbound: current_nft_ownership.is_soulbound_v2, }, ); - current_burned_token_ownerships_v2.push(current_nft_ownership); + current_deleted_token_ownerships_v2.insert( + ( + current_nft_ownership.token_data_id.clone(), + current_nft_ownership.property_version_v1.clone(), + current_nft_ownership.owner_address.clone(), + current_nft_ownership.storage_id.clone(), + ), + current_nft_ownership, + ); } }, _ => {}, @@ -991,6 +1015,9 @@ async fn parse_v2_token( let mut current_token_v2_metadata = current_token_v2_metadata .into_values() .collect::>(); + let mut current_deleted_token_ownerships_v2 = current_deleted_token_ownerships_v2 + .into_values() + .collect::>(); // Sort by PK current_collections_v2.sort_by(|a, b| a.collection_id.cmp(&b.collection_id)); @@ -1012,6 +1039,20 @@ async fn parse_v2_token( current_token_v2_metadata.sort_by(|a, b| { (&a.object_address, &a.resource_type).cmp(&(&b.object_address, &b.resource_type)) }); + current_deleted_token_ownerships_v2.sort_by(|a, b| { + ( + &a.token_data_id, + &a.property_version_v1, + &a.owner_address, + &a.storage_id, + ) + .cmp(&( + &b.token_data_id, + &b.property_version_v1, + &b.owner_address, + &b.storage_id, + )) + }); ( collections_v2, @@ -1020,7 +1061,7 @@ async fn parse_v2_token( current_collections_v2, current_token_datas_v2, current_token_ownerships_v2, - current_burned_token_ownerships_v2, + current_deleted_token_ownerships_v2, token_activities_v2, current_token_v2_metadata, )