Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[EI-461] Index royalty in royalty tables #381

Merged
merged 5 commits into from
Jun 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE IF EXISTS current_token_royalty_v1;
11 changes: 11 additions & 0 deletions rust/processor/migrations/2024-05-21-221101_add_royalty_v1/up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
-- Your SQL goes here
-- This'll only work with royalty v1 because royalty_v2 requires collection id
CREATE TABLE IF NOT EXISTS current_token_royalty_v1 (
token_data_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL,
payee_address VARCHAR(66) NOT NULL,
royalty_points_numerator NUMERIC NOT NULL,
royalty_points_denominator NUMERIC NOT NULL,
last_transaction_version BIGINT NOT NULL,
last_transaction_timestamp TIMESTAMP NOT NULL,
inserted_at TIMESTAMP NOT NULL DEFAULT NOW()
);
1 change: 1 addition & 0 deletions rust/processor/src/models/token_v2_models/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// Copyright © Aptos Foundation
// SPDX-License-Identifier: Apache-2.0

pub mod v1_token_royalty;
pub mod v2_collections;
pub mod v2_token_activities;
pub mod v2_token_datas;
Expand Down
98 changes: 98 additions & 0 deletions rust/processor/src/models/token_v2_models/v1_token_royalty.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
// Copyright © Aptos Foundation
// SPDX-License-Identifier: Apache-2.0

// This is required because a diesel macro makes clippy sad
#![allow(clippy::extra_unused_lifetimes)]
#![allow(clippy::unused_unit)]

use crate::{models::token_models::token_utils::TokenWriteSet, schema::current_token_royalty_v1};
use aptos_protos::transaction::v1::WriteTableItem;
use bigdecimal::BigDecimal;
use field_count::FieldCount;
use serde::{Deserialize, Serialize};

#[derive(
Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, PartialEq, Eq,
)]
#[diesel(primary_key(token_data_id))]
#[diesel(table_name = current_token_royalty_v1)]
pub struct CurrentTokenRoyaltyV1 {
pub token_data_id: String,
pub payee_address: String,
pub royalty_points_numerator: BigDecimal,
pub royalty_points_denominator: BigDecimal,
pub last_transaction_version: i64,
pub last_transaction_timestamp: chrono::NaiveDateTime,
}

impl Ord for CurrentTokenRoyaltyV1 {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.token_data_id.cmp(&other.token_data_id)
}
}
impl PartialOrd for CurrentTokenRoyaltyV1 {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}

impl CurrentTokenRoyaltyV1 {
pub fn pk(&self) -> String {
self.token_data_id.clone()
}

// Royalty for v2 token is more complicated and not supported yet. For token v2, royalty can be on the collection (default) or on
// the token (override).
pub fn get_v1_from_write_table_item(
write_table_item: &WriteTableItem,
transaction_version: i64,
transaction_timestamp: chrono::NaiveDateTime,
) -> anyhow::Result<Option<Self>> {
let table_item_data = write_table_item.data.as_ref().unwrap();

let maybe_token_data = match TokenWriteSet::from_table_item_type(
table_item_data.value_type.as_str(),
&table_item_data.value,
transaction_version,
)? {
Some(TokenWriteSet::TokenData(inner)) => Some(inner),
_ => None,
};

if let Some(token_data) = maybe_token_data {
let maybe_token_data_id = match TokenWriteSet::from_table_item_type(
table_item_data.key_type.as_str(),
&table_item_data.key,
transaction_version,
)? {
Some(TokenWriteSet::TokenDataId(inner)) => Some(inner),
_ => None,
};
if let Some(token_data_id_struct) = maybe_token_data_id {
// token data id is the 0x{hash} version of the creator, collection name, and token name
let token_data_id = token_data_id_struct.to_id();
let payee_address = token_data.royalty.get_payee_address();
let royalty_points_numerator = token_data.royalty.royalty_points_numerator.clone();
let royalty_points_denominator =
token_data.royalty.royalty_points_denominator.clone();

return Ok(Some(Self {
token_data_id,
payee_address,
royalty_points_numerator,
royalty_points_denominator,
last_transaction_version: transaction_version,
last_transaction_timestamp: transaction_timestamp,
}));
} else {
tracing::warn!(
transaction_version,
key_type = table_item_data.key_type,
key = table_item_data.key,
"Expecting token_data_id as key for value = token_data"
);
}
}
Ok(None)
}
}
67 changes: 65 additions & 2 deletions rust/processor/src/processors/token_v2_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use crate::{
},
token_models::tokens::{TableHandleToOwner, TableMetadataForToken},
token_v2_models::{
v1_token_royalty::CurrentTokenRoyaltyV1,
v2_collections::{CollectionV2, CurrentCollectionV2, CurrentCollectionV2PK},
v2_token_activities::TokenActivityV2,
v2_token_datas::{CurrentTokenDataV2, CurrentTokenDataV2PK, TokenDataV2},
Expand Down Expand Up @@ -105,6 +106,7 @@ async fn insert_to_db(
),
token_activities_v2: &[TokenActivityV2],
current_token_v2_metadata: &[CurrentTokenV2Metadata],
current_token_royalties_v1: &[CurrentTokenRoyaltyV1],
per_table_chunk_sizes: &AHashMap<String, usize>,
) -> Result<(), diesel::result::Error> {
tracing::trace!(
Expand Down Expand Up @@ -190,14 +192,23 @@ async fn insert_to_db(
),
);
let ct_v2 = execute_in_chunks(
conn,
conn.clone(),
insert_current_token_v2_metadatas_query,
current_token_v2_metadata,
get_config_table_chunk_size::<CurrentTokenV2Metadata>(
"current_token_v2_metadata",
per_table_chunk_sizes,
),
);
let ctr_v1 = execute_in_chunks(
conn,
insert_current_token_royalties_v1_query,
current_token_royalties_v1,
get_config_table_chunk_size::<CurrentTokenRoyaltyV1>(
"current_token_royalty_v1",
per_table_chunk_sizes,
),
);

let (
coll_v2_res,
Expand All @@ -210,7 +221,10 @@ async fn insert_to_db(
cdto_v2_res,
ta_v2_res,
ct_v2_res,
) = tokio::join!(coll_v2, td_v2, to_v2, cc_v2, ctd_v2, cdtd_v2, cto_v2, cdto_v2, ta_v2, ct_v2,);
ctr_v1_res,
) = tokio::join!(
coll_v2, td_v2, to_v2, cc_v2, ctd_v2, cdtd_v2, cto_v2, cdto_v2, ta_v2, ct_v2, ctr_v1
);

for res in [
coll_v2_res,
Expand All @@ -223,6 +237,7 @@ async fn insert_to_db(
cdto_v2_res,
ta_v2_res,
ct_v2_res,
ctr_v1_res,
] {
res?;
}
Expand Down Expand Up @@ -479,6 +494,30 @@ fn insert_current_token_v2_metadatas_query(
)
}

fn insert_current_token_royalties_v1_query(
items_to_insert: Vec<CurrentTokenRoyaltyV1>,
) -> (
impl QueryFragment<Pg> + diesel::query_builder::QueryId + Send,
Option<&'static str>,
) {
use schema::current_token_royalty_v1::dsl::*;

(
diesel::insert_into(schema::current_token_royalty_v1::table)
.values(items_to_insert)
.on_conflict(token_data_id)
.do_update()
.set((
payee_address.eq(excluded(payee_address)),
royalty_points_numerator.eq(excluded(royalty_points_numerator)),
royalty_points_denominator.eq(excluded(royalty_points_denominator)),
last_transaction_version.eq(excluded(last_transaction_version)),
last_transaction_timestamp.eq(excluded(last_transaction_timestamp)),
)),
Some(" WHERE current_token_royalty_v1.last_transaction_version <= excluded.last_transaction_version "),
)
}

#[async_trait]
impl ProcessorTrait for TokenV2Processor {
fn name(&self) -> &'static str {
Expand Down Expand Up @@ -516,6 +555,7 @@ impl ProcessorTrait for TokenV2Processor {
current_deleted_token_ownerships_v2,
token_activities_v2,
current_token_v2_metadata,
current_token_royalties_v1,
) = parse_v2_token(
&transactions,
&table_handle_to_owner,
Expand Down Expand Up @@ -544,6 +584,7 @@ impl ProcessorTrait for TokenV2Processor {
),
&token_activities_v2,
&current_token_v2_metadata,
&current_token_royalties_v1,
&self.per_table_chunk_sizes,
)
.await;
Expand Down Expand Up @@ -592,12 +633,14 @@ async fn parse_v2_token(
Vec<CurrentTokenOwnershipV2>, // deleted token ownerships
Vec<TokenActivityV2>,
Vec<CurrentTokenV2Metadata>,
Vec<CurrentTokenRoyaltyV1>,
) {
// Token V2 and V1 combined
let mut collections_v2 = vec![];
let mut token_datas_v2 = vec![];
let mut token_ownerships_v2 = vec![];
let mut token_activities_v2 = vec![];

let mut current_collections_v2: AHashMap<CurrentCollectionV2PK, CurrentCollectionV2> =
AHashMap::new();
let mut current_token_datas_v2: AHashMap<CurrentTokenDataV2PK, CurrentTokenDataV2> =
Expand All @@ -618,6 +661,8 @@ async fn parse_v2_token(
// Basically token properties
let mut current_token_v2_metadata: AHashMap<CurrentTokenV2MetadataPK, CurrentTokenV2Metadata> =
AHashMap::new();
let mut current_token_royalties_v1: AHashMap<CurrentTokenDataV2PK, CurrentTokenRoyaltyV1> =
AHashMap::new();

// Code above is inefficient (multiple passthroughs) so I'm approaching TokenV2 with a cleaner code structure
for txn in transactions {
Expand Down Expand Up @@ -820,6 +865,19 @@ async fn parse_v2_token(
current_token_data,
);
}
if let Some(current_token_royalty) =
CurrentTokenRoyaltyV1::get_v1_from_write_table_item(
table_item,
txn_version,
txn_timestamp,
)
.unwrap()
{
current_token_royalties_v1.insert(
current_token_royalty.token_data_id.clone(),
current_token_royalty,
);
}
if let Some((token_ownership, current_token_ownership)) =
TokenOwnershipV2::get_v1_from_write_table_item(
table_item,
Expand Down Expand Up @@ -1091,6 +1149,9 @@ async fn parse_v2_token(
let mut current_deleted_token_ownerships_v2 = current_deleted_token_ownerships_v2
.into_values()
.collect::<Vec<CurrentTokenOwnershipV2>>();
let mut current_token_royalties_v1 = current_token_royalties_v1
.into_values()
.collect::<Vec<CurrentTokenRoyaltyV1>>();

// Sort by PK
current_collections_v2.sort_by(|a, b| a.collection_id.cmp(&b.collection_id));
Expand Down Expand Up @@ -1127,6 +1188,7 @@ async fn parse_v2_token(
&b.storage_id,
))
});
current_token_royalties_v1.sort();
bowenyang007 marked this conversation as resolved.
Show resolved Hide resolved

(
collections_v2,
Expand All @@ -1139,5 +1201,6 @@ async fn parse_v2_token(
current_deleted_token_ownerships_v2,
token_activities_v2,
current_token_v2_metadata,
current_token_royalties_v1,
)
}
15 changes: 15 additions & 0 deletions rust/processor/src/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -620,6 +620,20 @@ diesel::table! {
}
}

diesel::table! {
current_token_royalty_v1 (token_data_id) {
#[max_length = 66]
token_data_id -> Varchar,
#[max_length = 66]
payee_address -> Varchar,
royalty_points_numerator -> Numeric,
royalty_points_denominator -> Numeric,
last_transaction_version -> Int8,
last_transaction_timestamp -> Timestamp,
inserted_at -> Timestamp,
}
}

diesel::table! {
current_token_v2_metadata (object_address, resource_type) {
#[max_length = 66]
Expand Down Expand Up @@ -1290,6 +1304,7 @@ diesel::allow_tables_to_appear_in_same_query!(
current_token_ownerships,
current_token_ownerships_v2,
current_token_pending_claims,
current_token_royalty_v1,
current_token_v2_metadata,
current_unified_fungible_asset_balances,
delegated_staking_activities,
Expand Down
Loading