Skip to content

Commit

Permalink
Dedupe token burns (#305)
Browse files Browse the repository at this point in the history
  • Loading branch information
rtso committed Mar 8, 2024
1 parent 31a3f92 commit f8c2f29
Show file tree
Hide file tree
Showing 3 changed files with 66 additions and 23 deletions.
16 changes: 8 additions & 8 deletions rust/processor/src/models/token_v2_models/v2_token_ownerships.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use crate::{
token_utils::TokenWriteSet,
tokens::TableHandleToOwner,
},
token_v2_models::v2_token_utils::DEFAULT_OWNER_ADDRESS,
},
schema::{current_token_ownerships_v2, token_ownerships_v2},
utils::{
Expand Down Expand Up @@ -304,29 +305,28 @@ impl TokenOwnershipV2 {
burn_event.get_previous_owner_address()
} else {
// 2. If it doesn't exist in burn event mapping, then it must be an old burn event that doesn't contain previous_owner.
// Do a lookup to get preivous owner.
let latest_nft_ownership = match prior_nft_ownership.get(&token_address) {
Some(inner) => inner.clone(),
// Do a lookup to get previous owner. This is necessary because preivous owner is part of current token ownerships primary key.
match prior_nft_ownership.get(&token_address) {
Some(inner) => inner.owner_address.clone(),
None => {
match CurrentTokenOwnershipV2Query::get_latest_owned_nft_by_token_data_id(
conn,
&token_address,
)
.await
{
Ok(nft) => nft,
Ok(nft) => nft.owner_address.clone(),
Err(_) => {
tracing::error!(
transaction_version = txn_version,
lookup_key = &token_address,
"Failed to find NFT for burned token. You probably should backfill db."
"Failed to find current_token_ownership_v2 for burned token. You probably should backfill db."
);
return Ok(None);
DEFAULT_OWNER_ADDRESS.to_string()
},
}
},
};
latest_nft_ownership.owner_address.clone()
}
};

let token_data_id = token_address.clone();
Expand Down
2 changes: 2 additions & 0 deletions rust/processor/src/models/token_v2_models/v2_token_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ use std::fmt::{self, Formatter};
pub const TOKEN_V2_ADDR: &str =
"0x0000000000000000000000000000000000000000000000000000000000000004";

pub const DEFAULT_OWNER_ADDRESS: &str = "unknown";

/// Tracks all token related data in a hashmap for quick access (keyed on address of the object core)
/// Maps address to burn event (new). The event is None if it's an old burn event.
pub type TokenV2Burned = AHashMap<CurrentObjectPK, Option<Burn>>;
Expand Down
71 changes: 56 additions & 15 deletions rust/processor/src/processors/token_v2_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ async fn insert_to_db(
current_collections_v2: Vec<CurrentCollectionV2>,
current_token_datas_v2: Vec<CurrentTokenDataV2>,
current_token_ownerships_v2: Vec<CurrentTokenOwnershipV2>,
current_burned_token_ownerships_v2: Vec<CurrentTokenOwnershipV2>,
current_deleted_token_ownerships_v2: Vec<CurrentTokenOwnershipV2>,
token_activities_v2: Vec<TokenActivityV2>,
current_token_v2_metadata: Vec<CurrentTokenV2Metadata>,
) -> Result<(), diesel::result::Error> {
Expand Down Expand Up @@ -134,8 +134,8 @@ async fn insert_to_db(
.await?;
execute_in_chunks(
conn.clone(),
insert_current_burned_token_ownerships_v2_query,
current_burned_token_ownerships_v2,
insert_current_deleted_token_ownerships_query,
current_deleted_token_ownerships_v2,
CurrentTokenOwnershipV2::field_count(),
)
.await?;
Expand Down Expand Up @@ -301,7 +301,7 @@ fn insert_current_token_ownerships_v2_query(
)
}

fn insert_current_burned_token_ownerships_v2_query(
fn insert_current_deleted_token_ownerships_v2_query(
items_to_insert: Vec<CurrentTokenOwnershipV2>,
) -> (
impl QueryFragment<Pg> + diesel::query_builder::QueryId + Send,
Expand Down Expand Up @@ -395,9 +395,9 @@ impl ProcessorTrait for TokenV2Processor {
token_datas_v2,
token_ownerships_v2,
current_collections_v2,
current_token_ownerships_v2,
current_burned_token_ownerships_v2,
current_token_datas_v2,
current_token_ownerships_v2,
current_deleted_token_ownerships_v2,
token_activities_v2,
current_token_v2_metadata,
) = parse_v2_token(&transactions, &table_handle_to_owner, &mut conn).await;
Expand All @@ -414,9 +414,9 @@ impl ProcessorTrait for TokenV2Processor {
token_datas_v2,
token_ownerships_v2,
current_collections_v2,
current_token_ownerships_v2,
current_burned_token_ownerships_v2,
current_token_datas_v2,
current_token_ownerships_v2,
current_deleted_token_ownerships_v2,
token_activities_v2,
current_token_v2_metadata,
)
Expand All @@ -429,7 +429,7 @@ impl ProcessorTrait for TokenV2Processor {
end_version,
processing_duration_in_secs,
db_insertion_duration_in_secs,
last_transaction_timstamp: transactions.last().unwrap().timestamp.clone(),
last_transaction_timestamp: transactions.last().unwrap().timestamp.clone(),
}),
Err(e) => {
error!(
Expand Down Expand Up @@ -460,7 +460,7 @@ async fn parse_v2_token(
Vec<CurrentCollectionV2>,
Vec<CurrentTokenDataV2>,
Vec<CurrentTokenOwnershipV2>,
Vec<CurrentTokenOwnershipV2>, // burned token ownerships
Vec<CurrentTokenOwnershipV2>, // deleted token ownerships
Vec<TokenActivityV2>,
Vec<CurrentTokenV2Metadata>,
) {
Expand All @@ -477,7 +477,7 @@ async fn parse_v2_token(
CurrentTokenOwnershipV2PK,
CurrentTokenOwnershipV2,
> = AHashMap::new();
let mut current_burned_token_ownerships_v2 = vec![];
let mut current_deleted_token_ownerships_v2 = AHashMap::new();
// Tracks prior ownership in case a token gets burned
let mut prior_nft_ownership: AHashMap<String, NFTOwnershipV2> = AHashMap::new();
// Get Metadata for token v2 by object
Expand Down Expand Up @@ -770,7 +770,15 @@ async fn parse_v2_token(
is_soulbound: cto.is_soulbound_v2,
},
);
current_burned_token_ownerships_v2.push(cto);
current_deleted_token_ownerships_v2.insert(
(
cto.token_data_id.clone(),
cto.property_version_v1.clone(),
cto.owner_address.clone(),
cto.storage_id.clone(),
),
cto,
);
}
}
},
Expand Down Expand Up @@ -854,7 +862,15 @@ async fn parse_v2_token(
is_soulbound: current_nft_ownership.is_soulbound_v2,
},
);
current_burned_token_ownerships_v2.push(current_nft_ownership);
current_deleted_token_ownerships_v2.insert(
(
current_nft_ownership.token_data_id.clone(),
current_nft_ownership.property_version_v1.clone(),
current_nft_ownership.owner_address.clone(),
current_nft_ownership.storage_id.clone(),
),
current_nft_ownership,
);
}

// Add fungible token handling
Expand Down Expand Up @@ -923,7 +939,15 @@ async fn parse_v2_token(
is_soulbound: current_nft_ownership.is_soulbound_v2,
},
);
current_burned_token_ownerships_v2.push(current_nft_ownership);
current_deleted_token_ownerships_v2.insert(
(
current_nft_ownership.token_data_id.clone(),
current_nft_ownership.property_version_v1.clone(),
current_nft_ownership.owner_address.clone(),
current_nft_ownership.storage_id.clone(),
),
current_nft_ownership,
);
}
},
_ => {},
Expand All @@ -945,6 +969,9 @@ async fn parse_v2_token(
let mut current_token_v2_metadata = current_token_v2_metadata
.into_values()
.collect::<Vec<CurrentTokenV2Metadata>>();
let mut current_deleted_token_ownerships_v2 = current_deleted_token_ownerships_v2
.into_values()
.collect::<Vec<CurrentTokenOwnershipV2>>();

// Sort by PK
current_collections_v2.sort_by(|a, b| a.collection_id.cmp(&b.collection_id));
Expand All @@ -966,6 +993,20 @@ async fn parse_v2_token(
current_token_v2_metadata.sort_by(|a, b| {
(&a.object_address, &a.resource_type).cmp(&(&b.object_address, &b.resource_type))
});
current_deleted_token_ownerships_v2.sort_by(|a, b| {
(
&a.token_data_id,
&a.property_version_v1,
&a.owner_address,
&a.storage_id,
)
.cmp(&(
&b.token_data_id,
&b.property_version_v1,
&b.owner_address,
&b.storage_id,
))
});

(
collections_v2,
Expand All @@ -974,7 +1015,7 @@ async fn parse_v2_token(
current_collections_v2,
current_token_datas_v2,
current_token_ownerships_v2,
current_burned_token_ownerships_v2,
current_deleted_token_ownerships_v2,
token_activities_v2,
current_token_v2_metadata,
)
Expand Down

0 comments on commit f8c2f29

Please sign in to comment.