diff --git a/rust/processor/migrations/2024-05-21-221101_add_royalty_v1/down.sql b/rust/processor/migrations/2024-05-21-221101_add_royalty_v1/down.sql new file mode 100644 index 000000000..dadb0f6d6 --- /dev/null +++ b/rust/processor/migrations/2024-05-21-221101_add_royalty_v1/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS current_token_royalty_v1; \ No newline at end of file diff --git a/rust/processor/migrations/2024-05-21-221101_add_royalty_v1/up.sql b/rust/processor/migrations/2024-05-21-221101_add_royalty_v1/up.sql new file mode 100644 index 000000000..d7b57adc9 --- /dev/null +++ b/rust/processor/migrations/2024-05-21-221101_add_royalty_v1/up.sql @@ -0,0 +1,11 @@ +-- Your SQL goes here +-- This'll only work with royalty v1 because royalty_v2 requires collection id +CREATE TABLE IF NOT EXISTS current_token_royalty_v1 ( + token_data_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + payee_address VARCHAR(66) NOT NULL, + royalty_points_numerator NUMERIC NOT NULL, + royalty_points_denominator NUMERIC NOT NULL, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); \ No newline at end of file diff --git a/rust/processor/src/models/token_v2_models/mod.rs b/rust/processor/src/models/token_v2_models/mod.rs index ab7c4616c..49bd71da5 100644 --- a/rust/processor/src/models/token_v2_models/mod.rs +++ b/rust/processor/src/models/token_v2_models/mod.rs @@ -1,6 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +pub mod v1_token_royalty; pub mod v2_collections; pub mod v2_token_activities; pub mod v2_token_datas; diff --git a/rust/processor/src/models/token_v2_models/v1_token_royalty.rs b/rust/processor/src/models/token_v2_models/v1_token_royalty.rs new file mode 100644 index 000000000..b45d7c6a9 --- /dev/null +++ b/rust/processor/src/models/token_v2_models/v1_token_royalty.rs @@ -0,0 +1,98 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use crate::{models::token_models::token_utils::TokenWriteSet, schema::current_token_royalty_v1}; +use aptos_protos::transaction::v1::WriteTableItem; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive( + Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, PartialEq, Eq, +)] +#[diesel(primary_key(token_data_id))] +#[diesel(table_name = current_token_royalty_v1)] +pub struct CurrentTokenRoyaltyV1 { + pub token_data_id: String, + pub payee_address: String, + pub royalty_points_numerator: BigDecimal, + pub royalty_points_denominator: BigDecimal, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +impl Ord for CurrentTokenRoyaltyV1 { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.token_data_id.cmp(&other.token_data_id) + } +} +impl PartialOrd for CurrentTokenRoyaltyV1 { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl CurrentTokenRoyaltyV1 { + pub fn pk(&self) -> String { + self.token_data_id.clone() + } + + // Royalty for v2 token is more complicated and not supported yet. For token v2, royalty can be on the collection (default) or on + // the token (override). + pub fn get_v1_from_write_table_item( + write_table_item: &WriteTableItem, + transaction_version: i64, + transaction_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + let table_item_data = write_table_item.data.as_ref().unwrap(); + + let maybe_token_data = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + transaction_version, + )? { + Some(TokenWriteSet::TokenData(inner)) => Some(inner), + _ => None, + }; + + if let Some(token_data) = maybe_token_data { + let maybe_token_data_id = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + transaction_version, + )? { + Some(TokenWriteSet::TokenDataId(inner)) => Some(inner), + _ => None, + }; + if let Some(token_data_id_struct) = maybe_token_data_id { + // token data id is the 0x{hash} version of the creator, collection name, and token name + let token_data_id = token_data_id_struct.to_id(); + let payee_address = token_data.royalty.get_payee_address(); + let royalty_points_numerator = token_data.royalty.royalty_points_numerator.clone(); + let royalty_points_denominator = + token_data.royalty.royalty_points_denominator.clone(); + + return Ok(Some(Self { + token_data_id, + payee_address, + royalty_points_numerator, + royalty_points_denominator, + last_transaction_version: transaction_version, + last_transaction_timestamp: transaction_timestamp, + })); + } else { + tracing::warn!( + transaction_version, + key_type = table_item_data.key_type, + key = table_item_data.key, + "Expecting token_data_id as key for value = token_data" + ); + } + } + Ok(None) + } +} diff --git a/rust/processor/src/processors/token_v2_processor.rs b/rust/processor/src/processors/token_v2_processor.rs index 005333e13..8b337341a 100644 --- a/rust/processor/src/processors/token_v2_processor.rs +++ b/rust/processor/src/processors/token_v2_processor.rs @@ -10,6 +10,7 @@ use crate::{ }, token_models::tokens::{TableHandleToOwner, TableMetadataForToken}, token_v2_models::{ + v1_token_royalty::CurrentTokenRoyaltyV1, v2_collections::{CollectionV2, CurrentCollectionV2, CurrentCollectionV2PK}, v2_token_activities::TokenActivityV2, v2_token_datas::{CurrentTokenDataV2, CurrentTokenDataV2PK, TokenDataV2}, @@ -105,6 +106,7 @@ async fn insert_to_db( ), token_activities_v2: &[TokenActivityV2], current_token_v2_metadata: &[CurrentTokenV2Metadata], + current_token_royalties_v1: &[CurrentTokenRoyaltyV1], per_table_chunk_sizes: &AHashMap, ) -> Result<(), diesel::result::Error> { tracing::trace!( @@ -190,7 +192,7 @@ async fn insert_to_db( ), ); let ct_v2 = execute_in_chunks( - conn, + conn.clone(), insert_current_token_v2_metadatas_query, current_token_v2_metadata, get_config_table_chunk_size::( @@ -198,6 +200,15 @@ async fn insert_to_db( per_table_chunk_sizes, ), ); + let ctr_v1 = execute_in_chunks( + conn, + insert_current_token_royalties_v1_query, + current_token_royalties_v1, + get_config_table_chunk_size::( + "current_token_royalty_v1", + per_table_chunk_sizes, + ), + ); let ( coll_v2_res, @@ -210,7 +221,10 @@ async fn insert_to_db( cdto_v2_res, ta_v2_res, ct_v2_res, - ) = tokio::join!(coll_v2, td_v2, to_v2, cc_v2, ctd_v2, cdtd_v2, cto_v2, cdto_v2, ta_v2, ct_v2,); + ctr_v1_res, + ) = tokio::join!( + coll_v2, td_v2, to_v2, cc_v2, ctd_v2, cdtd_v2, cto_v2, cdto_v2, ta_v2, ct_v2, ctr_v1 + ); for res in [ coll_v2_res, @@ -223,6 +237,7 @@ async fn insert_to_db( cdto_v2_res, ta_v2_res, ct_v2_res, + ctr_v1_res, ] { res?; } @@ -479,6 +494,30 @@ fn insert_current_token_v2_metadatas_query( ) } +fn insert_current_token_royalties_v1_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_royalty_v1::dsl::*; + + ( + diesel::insert_into(schema::current_token_royalty_v1::table) + .values(items_to_insert) + .on_conflict(token_data_id) + .do_update() + .set(( + payee_address.eq(excluded(payee_address)), + royalty_points_numerator.eq(excluded(royalty_points_numerator)), + royalty_points_denominator.eq(excluded(royalty_points_denominator)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + )), + Some(" WHERE current_token_royalty_v1.last_transaction_version <= excluded.last_transaction_version "), + ) +} + #[async_trait] impl ProcessorTrait for TokenV2Processor { fn name(&self) -> &'static str { @@ -516,6 +555,7 @@ impl ProcessorTrait for TokenV2Processor { current_deleted_token_ownerships_v2, token_activities_v2, current_token_v2_metadata, + current_token_royalties_v1, ) = parse_v2_token( &transactions, &table_handle_to_owner, @@ -544,6 +584,7 @@ impl ProcessorTrait for TokenV2Processor { ), &token_activities_v2, ¤t_token_v2_metadata, + ¤t_token_royalties_v1, &self.per_table_chunk_sizes, ) .await; @@ -592,12 +633,14 @@ async fn parse_v2_token( Vec, // deleted token ownerships Vec, Vec, + Vec, ) { // Token V2 and V1 combined let mut collections_v2 = vec![]; let mut token_datas_v2 = vec![]; let mut token_ownerships_v2 = vec![]; let mut token_activities_v2 = vec![]; + let mut current_collections_v2: AHashMap = AHashMap::new(); let mut current_token_datas_v2: AHashMap = @@ -618,6 +661,8 @@ async fn parse_v2_token( // Basically token properties let mut current_token_v2_metadata: AHashMap = AHashMap::new(); + let mut current_token_royalties_v1: AHashMap = + AHashMap::new(); // Code above is inefficient (multiple passthroughs) so I'm approaching TokenV2 with a cleaner code structure for txn in transactions { @@ -820,6 +865,19 @@ async fn parse_v2_token( current_token_data, ); } + if let Some(current_token_royalty) = + CurrentTokenRoyaltyV1::get_v1_from_write_table_item( + table_item, + txn_version, + txn_timestamp, + ) + .unwrap() + { + current_token_royalties_v1.insert( + current_token_royalty.token_data_id.clone(), + current_token_royalty, + ); + } if let Some((token_ownership, current_token_ownership)) = TokenOwnershipV2::get_v1_from_write_table_item( table_item, @@ -1091,6 +1149,9 @@ async fn parse_v2_token( let mut current_deleted_token_ownerships_v2 = current_deleted_token_ownerships_v2 .into_values() .collect::>(); + let mut current_token_royalties_v1 = current_token_royalties_v1 + .into_values() + .collect::>(); // Sort by PK current_collections_v2.sort_by(|a, b| a.collection_id.cmp(&b.collection_id)); @@ -1127,6 +1188,7 @@ async fn parse_v2_token( &b.storage_id, )) }); + current_token_royalties_v1.sort(); ( collections_v2, @@ -1139,5 +1201,6 @@ async fn parse_v2_token( current_deleted_token_ownerships_v2, token_activities_v2, current_token_v2_metadata, + current_token_royalties_v1, ) } diff --git a/rust/processor/src/schema.rs b/rust/processor/src/schema.rs index b50808303..9d1edac85 100644 --- a/rust/processor/src/schema.rs +++ b/rust/processor/src/schema.rs @@ -620,6 +620,20 @@ diesel::table! { } } +diesel::table! { + current_token_royalty_v1 (token_data_id) { + #[max_length = 66] + token_data_id -> Varchar, + #[max_length = 66] + payee_address -> Varchar, + royalty_points_numerator -> Numeric, + royalty_points_denominator -> Numeric, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + diesel::table! { current_token_v2_metadata (object_address, resource_type) { #[max_length = 66] @@ -1290,6 +1304,7 @@ diesel::allow_tables_to_appear_in_same_query!( current_token_ownerships, current_token_ownerships_v2, current_token_pending_claims, + current_token_royalty_v1, current_token_v2_metadata, current_unified_fungible_asset_balances, delegated_staking_activities,