diff --git a/.github/actions/protobuf-compatibility-check/action.yaml b/.github/actions/protobuf-compatibility-check/action.yaml new file mode 100644 index 000000000..10c439145 --- /dev/null +++ b/.github/actions/protobuf-compatibility-check/action.yaml @@ -0,0 +1,59 @@ +name: "Protobuf compatibility check" +description: | + "Runs a compatibility check for protobuf files." +inputs: + upstream_commit_version: + description: "Commit version of aptos-core that upstream is using" + required: true + default: "main" + +runs: + using: composite + steps: + - name: Install the buf CLI + shell: bash + run: | + BIN="/usr/local/bin" && \ + VERSION="1.30.0" && \ + curl -sSL \ + "https://github.com/bufbuild/buf/releases/download/v${VERSION}/buf-$(uname -s)-$(uname -m)" \ + -o "${BIN}/buf" && \ + chmod +x "${BIN}/buf" + + # Checkout current repo with current commit + - name: Checkout current repo + uses: actions/checkout@v4 + with: + path: "aptos-indexer-processors" + + - name: Parse the toml in this repo + id: get_tag_output + shell: bash + run: | + set -ex + curl -sSLf "$(curl -sSLf https://api.github.com/repos/tomwright/dasel/releases/latest | grep browser_download_url | grep linux_amd64 | grep -v .gz | cut -d\" -f 4)" -L -o dasel && chmod +x dasel + mv ./dasel /usr/local/bin/dasel + cd aptos-indexer-processors + tag_output=$(dasel -r toml -f rust/Cargo.toml workspace.dependencies.aptos-protos.rev -w - ) + echo "::set-output name=tag_output::$tag_output" + + - name: Checkout aptos-core + uses: actions/checkout@v4 + with: + repository: "aptos-labs/aptos-core" + path: "aptos-core" + ref: ${{ steps.get_tag_output.outputs.tag_output }} + + - name: Check compatibility + shell: bash + run: | + set -ex + cd aptos-core/protos/proto + repo_url="https://github.com/aptos-labs/aptos-core.git#tag=${{ inputs.upstream_commit_version }},subdir=protos/proto" + if buf breaking --against "$repo_url" --verbose; then + echo "No breaking changes found" + else + echo "Breaking changes found" + echo "Did new oneof/enum fields get added?" + exit 1 + fi diff --git a/.github/workflows/build-images.yaml b/.github/workflows/build-images.yaml index 53086669e..c9cbe6a67 100644 --- a/.github/workflows/build-images.yaml +++ b/.github/workflows/build-images.yaml @@ -29,7 +29,7 @@ jobs: Build: strategy: matrix: - example: [python, rust] + example: [rust] runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/check-protobuf-compatibility.yaml b/.github/workflows/check-protobuf-compatibility.yaml new file mode 100644 index 000000000..ba7955f62 --- /dev/null +++ b/.github/workflows/check-protobuf-compatibility.yaml @@ -0,0 +1,27 @@ +name: "Check Protobuf Compatibility" +on: + workflow_dispatch: + inputs: + upstream_commit_version: + description: 'The commit version to check compatibility against' + required: false + default: 'main' + pull_request: + +# cancel redundant builds +concurrency: + # for push and workflow_dispatch events we use `github.sha` in the concurrency group and don't really cancel each other out/limit concurrency + # for pull_request events newer jobs cancel earlier jobs to save on CI etc. + group: ${{ github.workflow }}-${{ github.event_name }}-${{ (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.sha || github.head_ref || github.ref }} + cancel-in-progress: true + + +jobs: + CheckProtobufCompatibilityAgainstCurrentPR: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: ./.github/actions/protobuf-compatibility-check/ + with: + # Either current PR or the latest commit on the branch + upstream_commit_version: ${{ github.event.inputs.upstream_commit_version || 'main' }} \ No newline at end of file diff --git a/.github/workflows/copy-processor-images-to-dockerhub-release.yaml b/.github/workflows/copy-processor-images-to-dockerhub-release.yaml index c1996d72b..f115baba7 100644 --- a/.github/workflows/copy-processor-images-to-dockerhub-release.yaml +++ b/.github/workflows/copy-processor-images-to-dockerhub-release.yaml @@ -17,7 +17,7 @@ jobs: copy-images-to-docker-hub: strategy: matrix: - language: ["rust", "python"] + language: ["rust"] uses: ./.github/workflows/copy-processor-images-to-dockerhub.yaml with: processor_language: ${{ matrix.language }} diff --git a/.github/workflows/copy-processor-images-to-dockerhub.yaml b/.github/workflows/copy-processor-images-to-dockerhub.yaml index 7f9e9c756..f563f43d0 100644 --- a/.github/workflows/copy-processor-images-to-dockerhub.yaml +++ b/.github/workflows/copy-processor-images-to-dockerhub.yaml @@ -69,6 +69,6 @@ jobs: env: FORCE_COLOR: 3 # Force color output as per https://github.com/google/zx#using-github-actions GIT_SHA: ${{ inputs.GIT_SHA }} - GCP_DOCKER_ARTIFACT_PROCESSOR_REPO_US: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO_US }} + GCP_DOCKER_ARTIFACT_REPO: ${{ vars.GCP_DOCKER_ARTIFACT_REPO }} run: pnpm release-processor-images --language=${{ inputs.processor_language }} --version-tag=${{ inputs.version_tag }} --wait-for-image-seconds=3600 working-directory: scripts diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 63880eef0..c184c1c7c 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -31,6 +31,7 @@ jobs: - uses: actions/checkout@v3 - name: Install deps and run linter run: | + sudo apt update && sudo apt install libdw-dev cargo install cargo-sort rustup update rustup toolchain install nightly @@ -38,3 +39,11 @@ jobs: rustup component add rustfmt --toolchain nightly scripts/rust_lint.sh --check working-directory: rust + - run: bash scripts/check_banned_deps.sh + working-directory: rust + - name: Ensure the --no-default-features build passes too + run: cargo build --no-default-features + working-directory: rust + - name: Ensure tests pass + run: cargo test + working-directory: rust diff --git a/.github/workflows/nightly-check-protobuf-compatibility.yaml b/.github/workflows/nightly-check-protobuf-compatibility.yaml new file mode 100644 index 000000000..b0dfc4b7e --- /dev/null +++ b/.github/workflows/nightly-check-protobuf-compatibility.yaml @@ -0,0 +1,39 @@ +name: "Nightly Check Protobuf Compatibility" +on: + workflow_dispatch: + schedule: + - cron: "0 9 * * *" + +# cancel redundant builds +concurrency: + # for push and workflow_dispatch events we use `github.sha` in the concurrency group and don't really cancel each other out/limit concurrency + # for pull_request events newer jobs cancel earlier jobs to save on CI etc. + group: ${{ github.workflow }}-${{ github.event_name }}-${{ (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.sha || github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + CheckProtobufCompatibilityAgainstTestnet: + runs-on: ubuntu-latest + steps: + - name: Get current version + id: get_upstream_commit_version + run: | + echo "::set-output name=upstream_commit_version::$(curl -s https://api.testnet.aptoslabs.com/v1 | jq -r .git_hash)" + + - uses: actions/checkout@v4 + - uses: ./.github/actions/protobuf-compatibility-check/ + with: + upstream_commit_version: ${{ steps.get_upstream_commit_version.outputs.upstream_commit_version }} + + CheckProtobufCompatibilityAgainstMainnet: + runs-on: ubuntu-latest + steps: + - name: Get current version + id: get_upstream_commit_version + run: | + echo "::set-output name=upstream_commit_version::$(curl -s https://api.mainnet.aptoslabs.com/v1 | jq -r .git_hash)" + + - uses: actions/checkout@v4 + - uses: ./.github/actions/protobuf-compatibility-check/ + with: + upstream_commit_version: ${{ steps.get_upstream_commit_version.outputs.upstream_commit_version }} \ No newline at end of file diff --git a/README.md b/README.md index 76ae8a34b..0d2ffbfbf 100644 --- a/README.md +++ b/README.md @@ -24,3 +24,6 @@ This guide will get you started with creating an Aptos indexer with custom parsi ## [Aptos Indexer GRPC Release Notes](https://github.com/aptos-labs/aptos-core/blob/main/ecosystem/indexer-grpc/release_notes.md) + +> [!WARNING] +> The typescript implementation is known to get stuck when there are lots of data to process. The issue is with the GRPC client and we haven't had a chance to optimize. Please proceed with caution. \ No newline at end of file diff --git a/hasura-api/metadata-json/unified.json b/hasura-api/metadata-json/unified.json index ea4562d57..40741fbc9 100644 --- a/hasura-api/metadata-json/unified.json +++ b/hasura-api/metadata-json/unified.json @@ -1,5 +1,5 @@ { - "resource_version": 385, + "resource_version": 333, "metadata": { "version": 3, "sources": [ @@ -9,148 +9,22 @@ "tables": [ { "table": { - "name": "current_nft_marketplace_auctions", - "schema": "nft_marketplace_v2" + "name": "move_resources", + "schema": "legacy_migration_v1" }, - "object_relationships": [ - { - "name": "current_token_data", - "using": { - "manual_configuration": { - "column_mapping": { - "token_data_id": "token_data_id" - }, - "insertion_order": null, - "remote_table": { - "name": "current_token_datas_v2", - "schema": "public" - } - } - } - } - ], - "select_permissions": [ - { - "role": "anonymous", - "permission": { - "columns": [ - "buy_it_now_price", - "coin_type", - "collection_id", - "contract_address", - "current_bid_price", - "current_bidder", - "entry_function_id_str", - "expiration_time", - "fee_schedule_id", - "is_deleted", - "last_transaction_timestamp", - "last_transaction_version", - "listing_id", - "marketplace", - "seller", - "starting_bid_price", - "token_amount", - "token_data_id", - "token_standard" - ], - "filter": {}, - "limit": 100 - } - } - ] - }, - { - "table": { - "name": "current_nft_marketplace_collection_offers", - "schema": "nft_marketplace_v2" + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "move_resources", + "custom_root_fields": {} }, - "object_relationships": [ - { - "name": "current_collection_v2", - "using": { - "manual_configuration": { - "column_mapping": { - "collection_id": "collection_id" - }, - "insertion_order": null, - "remote_table": { - "name": "current_collections_v2", - "schema": "public" - } - } - } - } - ], "select_permissions": [ { "role": "anonymous", "permission": { "columns": [ - "buyer", - "coin_type", - "collection_id", - "collection_offer_id", - "contract_address", - "entry_function_id_str", - "expiration_time", - "fee_schedule_id", - "is_deleted", - "item_price", - "last_transaction_timestamp", - "last_transaction_version", - "marketplace", - "remaining_token_amount", - "token_standard" - ], - "filter": {}, - "limit": 100 - } - } - ] - }, - { - "table": { - "name": "current_nft_marketplace_listings", - "schema": "nft_marketplace_v2" - }, - "object_relationships": [ - { - "name": "current_token_data", - "using": { - "manual_configuration": { - "column_mapping": { - "token_data_id": "token_data_id" - }, - "insertion_order": null, - "remote_table": { - "name": "current_token_datas_v2", - "schema": "public" - } - } - } - } - ], - "select_permissions": [ - { - "role": "anonymous", - "permission": { - "columns": [ - "coin_type", - "collection_id", - "contract_address", - "entry_function_id_str", - "fee_schedule_id", - "is_deleted", - "last_transaction_timestamp", - "last_transaction_version", - "listing_id", - "marketplace", - "price", - "seller", - "token_amount", - "token_data_id", - "token_standard" + "address", + "transaction_version" ], "filter": {}, "limit": 100, @@ -161,47 +35,23 @@ }, { "table": { - "name": "current_nft_marketplace_token_offers", - "schema": "nft_marketplace_v2" + "name": "parsed_asset_uris", + "schema": "nft_metadata_crawler" }, - "object_relationships": [ - { - "name": "current_token_data", - "using": { - "manual_configuration": { - "column_mapping": { - "token_data_id": "token_data_id" - }, - "insertion_order": null, - "remote_table": { - "name": "current_token_datas_v2", - "schema": "public" - } - } - } - } - ], "select_permissions": [ { "role": "anonymous", "permission": { "columns": [ - "buyer", - "coin_type", - "collection_id", - "contract_address", - "entry_function_id_str", - "expiration_time", - "fee_schedule_id", - "is_deleted", - "last_transaction_timestamp", - "last_transaction_version", - "marketplace", - "offer_id", - "price", - "token_amount", - "token_data_id", - "token_standard" + "animation_optimizer_retry_count", + "asset_uri", + "cdn_animation_uri", + "cdn_image_uri", + "cdn_json_uri", + "image_optimizer_retry_count", + "json_parser_retry_count", + "raw_animation_uri", + "raw_image_uri" ], "filter": {}, "limit": 100 @@ -211,90 +61,26 @@ }, { "table": { - "name": "nft_marketplace_activities", - "schema": "nft_marketplace_v2" + "name": "account_transactions", + "schema": "public" }, "object_relationships": [ { - "name": "current_token_data", + "name": "user_transaction", "using": { "manual_configuration": { "column_mapping": { - "token_data_id": "token_data_id" + "transaction_version": "version" }, "insertion_order": null, "remote_table": { - "name": "current_token_datas_v2", + "name": "user_transactions", "schema": "public" } } } } ], - "select_permissions": [ - { - "role": "anonymous", - "permission": { - "columns": [ - "buyer", - "coin_type", - "collection_id", - "collection_name", - "contract_address", - "creator_address", - "entry_function_id_str", - "event_index", - "event_type", - "fee_schedule_id", - "marketplace", - "offer_or_listing_id", - "price", - "property_version", - "seller", - "token_amount", - "token_data_id", - "token_name", - "token_standard", - "transaction_timestamp", - "transaction_version" - ], - "filter": {}, - "limit": 100 - } - } - ] - }, - { - "table": { - "name": "parsed_asset_uris", - "schema": "nft_metadata_crawler" - }, - "select_permissions": [ - { - "role": "anonymous", - "permission": { - "columns": [ - "animation_optimizer_retry_count", - "asset_uri", - "cdn_animation_uri", - "cdn_image_uri", - "cdn_json_uri", - "image_optimizer_retry_count", - "json_parser_retry_count", - "raw_animation_uri", - "raw_image_uri" - ], - "filter": {}, - "limit": 100 - } - } - ] - }, - { - "table": { - "name": "account_transactions", - "schema": "public" - }, "array_relationships": [ { "name": "coin_activities", @@ -306,7 +92,7 @@ "insertion_order": null, "remote_table": { "name": "coin_activities", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -351,7 +137,7 @@ "insertion_order": null, "remote_table": { "name": "token_activities", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -440,7 +226,7 @@ "insertion_order": null, "remote_table": { "name": "coin_activities", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -470,7 +256,7 @@ "insertion_order": null, "remote_table": { "name": "token_activities", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -509,7 +295,13 @@ { "table": { "name": "address_version_from_move_resources", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "address_version_from_move_resources", + "custom_root_fields": {} }, "array_relationships": [ { @@ -522,7 +314,7 @@ "insertion_order": null, "remote_table": { "name": "coin_activities", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -552,7 +344,7 @@ "insertion_order": null, "remote_table": { "name": "token_activities", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -617,7 +409,13 @@ { "table": { "name": "coin_activities", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "coin_activities", + "custom_root_fields": {} }, "object_relationships": [ { @@ -630,7 +428,7 @@ "insertion_order": null, "remote_table": { "name": "coin_infos", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -684,7 +482,13 @@ { "table": { "name": "coin_balances", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "coin_balances", + "custom_root_fields": {} }, "select_permissions": [ { @@ -707,7 +511,13 @@ { "table": { "name": "coin_infos", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "coin_infos", + "custom_root_fields": {} }, "select_permissions": [ { @@ -757,7 +567,13 @@ { "table": { "name": "collection_datas", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "collection_datas", + "custom_root_fields": {} }, "select_permissions": [ { @@ -787,7 +603,13 @@ { "table": { "name": "current_ans_lookup", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "current_ans_lookup", + "custom_root_fields": {} }, "array_relationships": [ { @@ -800,7 +622,7 @@ "insertion_order": null, "remote_table": { "name": "current_token_ownerships", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -879,17 +701,19 @@ "role": "anonymous", "permission": { "columns": [ - "last_transaction_version", + "domain", + "domain_expiration_timestamp", + "domain_with_suffix", + "expiration_timestamp", "is_active", "is_primary", - "domain", + "last_transaction_version", "owner_address", "registered_address", "subdomain", + "subdomain_expiration_policy", "token_name", - "token_standard", - "domain_with_suffix", - "expiration_timestamp" + "token_standard" ], "filter": {}, "limit": 100, @@ -902,7 +726,13 @@ { "table": { "name": "current_coin_balances", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "current_coin_balances", + "custom_root_fields": {} }, "object_relationships": [ { @@ -915,7 +745,7 @@ "insertion_order": null, "remote_table": { "name": "coin_infos", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -1298,7 +1128,13 @@ { "table": { "name": "current_token_datas", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "current_token_datas", + "custom_root_fields": {} }, "object_relationships": [ { @@ -1400,9 +1236,11 @@ } } } - }, + } + ], + "array_relationships": [ { - "name": "current_token_ownership", + "name": "current_token_ownerships", "using": { "manual_configuration": { "column_mapping": { @@ -1423,7 +1261,9 @@ "permission": { "columns": [ "collection_id", + "decimals", "description", + "is_deleted_v2", "is_fungible_v2", "largest_property_version_v1", "last_transaction_timestamp", @@ -1445,7 +1285,13 @@ { "table": { "name": "current_token_ownerships", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "current_token_ownerships", + "custom_root_fields": {} }, "object_relationships": [ { @@ -1488,7 +1334,7 @@ "insertion_order": null, "remote_table": { "name": "current_token_datas", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -1568,6 +1414,7 @@ "is_soulbound_v2", "last_transaction_timestamp", "last_transaction_version", + "non_transferrable_by_owner", "owner_address", "property_version_v1", "storage_id", @@ -1629,7 +1476,7 @@ "insertion_order": null, "remote_table": { "name": "current_token_datas", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -1661,7 +1508,7 @@ "insertion_order": null, "remote_table": { "name": "tokens", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -1693,6 +1540,50 @@ } ] }, + { + "table": { + "name": "current_unified_fungible_asset_balances_to_be_renamed", + "schema": "public" + }, + "object_relationships": [ + { + "name": "metadata", + "using": { + "manual_configuration": { + "column_mapping": { + "asset_type": "asset_type" + }, + "insertion_order": null, + "remote_table": { + "name": "fungible_asset_metadata", + "schema": "public" + } + } + } + } + ], + "select_permissions": [ + { + "role": "anonymous", + "permission": { + "columns": [ + "amount", + "asset_type", + "is_frozen", + "is_primary", + "last_transaction_timestamp", + "last_transaction_version", + "owner_address", + "storage_id" + ], + "filter": {}, + "limit": 100, + "allow_aggregations": true + }, + "comment": "" + } + ] + }, { "table": { "name": "delegated_staking_activities", @@ -1939,10 +1830,12 @@ "icon_uri", "last_transaction_timestamp", "last_transaction_version", + "maximum_v2", "name", "project_uri", "supply_aggregator_table_handle_v1", "supply_aggregator_table_key_v1", + "supply_v2", "symbol", "token_standard" ], @@ -1988,26 +1881,6 @@ } ] }, - { - "table": { - "name": "move_resources", - "schema": "public" - }, - "select_permissions": [ - { - "role": "anonymous", - "permission": { - "columns": [ - "address", - "transaction_version" - ], - "filter": {}, - "limit": 100, - "allow_aggregations": true - } - } - ] - }, { "table": { "name": "num_active_delegator_per_pool", @@ -2073,6 +1946,35 @@ } ] }, + { + "table": { + "name": "signatures", + "schema": "public" + }, + "select_permissions": [ + { + "role": "anonymous", + "permission": { + "columns": [ + "is_sender_primary", + "multi_agent_index", + "multi_sig_index", + "public_key", + "public_key_indices", + "signature", + "signer", + "threshold", + "transaction_block_height", + "transaction_version", + "type" + ], + "filter": {}, + "limit": 100 + }, + "comment": "" + } + ] + }, { "table": { "name": "table_items", @@ -2119,7 +2021,13 @@ { "table": { "name": "token_activities", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "token_activities", + "custom_root_fields": {} }, "object_relationships": [ { @@ -2132,7 +2040,7 @@ "insertion_order": null, "remote_table": { "name": "current_token_datas", - "schema": "public" + "schema": "legacy_migration_v1" } } } @@ -2286,7 +2194,13 @@ { "table": { "name": "token_datas", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "token_datas", + "custom_root_fields": {} }, "select_permissions": [ { @@ -2324,7 +2238,13 @@ { "table": { "name": "token_ownerships", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "token_ownerships", + "custom_root_fields": {} }, "select_permissions": [ { @@ -2353,7 +2273,13 @@ { "table": { "name": "tokens", - "schema": "public" + "schema": "legacy_migration_v1" + }, + "configuration": { + "column_config": {}, + "custom_column_names": {}, + "custom_name": "tokens", + "custom_root_fields": {} }, "select_permissions": [ { @@ -2450,7 +2376,7 @@ "GET" ], "name": "Latest Processor Status", - "url": "get_lastest_processor_status" + "url": "get_latest_processor_status" } ], "api_limits": { @@ -2469,4 +2395,4 @@ "analyze_response_body": true } } -} +} \ No newline at end of file diff --git a/python/pyproject.toml b/python/pyproject.toml index e64e4a2ad..612e69e83 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -25,7 +25,7 @@ google-cloud-bigquery-storage = "^2.19.1" google-cloud-bigquery = "^3.10.0" prometheus-client = "^0.17.1" twisted = "^22.10.0" -grpclib = "^0.4.5" +grpclib = "^0.4.6" alembic = "^1.11.1" aptos-protos = { git = "https://github.com/aptos-labs/aptos-core.git", rev = "aee306923da1fae533a91b4015e0a58443742d45", subdirectory = "protos/python" } python-json-logger = "^2.0.7" diff --git a/python/utils/worker.py b/python/utils/worker.py index 426ee1fe8..3d11b3cae 100644 --- a/python/utils/worker.py +++ b/python/utils/worker.py @@ -201,8 +201,24 @@ def producer( extra={ "processor_name": processor_name, "stream_address": indexer_grpc_data_service_address, + "error": str(e), + "next_version_to_fetch": next_version_to_fetch, + "ending_version": ending_version, }, ) + # Datastream error can happen when we fail to deserialize deeply nested types. + # Skip the batch, log the error, and continue processing. + is_success = True + next_version_to_fetch += 1 + response_stream = get_grpc_stream( + indexer_grpc_data_service_address, + indexer_grpc_data_stream_api_key, + indexer_grpc_http2_ping_interval, + indexer_grpc_http2_ping_timeout, + next_version_to_fetch, + ending_version, + processor_name, + ) # Check if we're at the end of the stream reached_ending_version = ( @@ -369,7 +385,8 @@ async def consumer_impl( "service_type": PROCESSOR_SERVICE_TYPE, }, ) - os._exit(1) + # Gaps are possible because we skipped versions + # os._exit(1) last_fetched_version = transactions[-1].version transaction_batches.append(transactions) @@ -415,7 +432,8 @@ async def consumer_impl( "service_type": PROCESSOR_SERVICE_TYPE, }, ) - os._exit(1) + # Gaps are possible because we skip versions + # os._exit(1) prev_start = result.start_version prev_end = result.end_version diff --git a/rust/Cargo.lock b/rust/Cargo.lock index d7063a5ae..e2a1d4346 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -24,6 +24,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", + "const-random", "getrandom", "once_cell", "serde", @@ -40,6 +41,27 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocative" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "082af274fd02beef17b7f0725a49ecafe6c075ef56cac9d6363eb3916a9817ae" +dependencies = [ + "allocative_derive", + "ctor", +] + +[[package]] +name = "allocative_derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe233a377643e0fc1a56421d7c90acdec45c291b30345eb9f08e8d0ddce5a4ab" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -91,7 +113,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -101,15 +123,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" dependencies = [ "anstyle", - "windows-sys", + "windows-sys 0.48.0", ] +[[package]] +name = "antidote" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34fde25430d87a9388dadbe6e34d7f72a462c8b43ac8d309b42b0a8505d7e2a5" + [[package]] name = "anyhow" version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +[[package]] +name = "aptos-in-memory-cache" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=07952ba261dd8301581e449e26ce17bbbc1adc46#07952ba261dd8301581e449e26ce17bbbc1adc46" +dependencies = [ + "parking_lot", +] + [[package]] name = "aptos-moving-average" version = "0.1.0" @@ -117,19 +153,57 @@ dependencies = [ "chrono", ] +[[package]] +name = "aptos-profiler" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93#4541add3fd29826ec57f22658ca286d2d6134b93" +dependencies = [ + "anyhow", + "backtrace", + "jemalloc-sys", + "jemallocator", + "pprof", + "regex", +] + [[package]] name = "aptos-protos" version = "1.3.0" -source = "git+https://github.com/aptos-labs/aptos-core.git?tag=aptos-node-v1.10.0#b24f6cd08f84b179e49090c7e51a501c535096ca" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=d76b5bb423b78b2b9affc72d3853f0d973d3f11f#d76b5bb423b78b2b9affc72d3853f0d973d3f11f" dependencies = [ "futures-core", "pbjson", "prost 0.12.3", - "prost-types 0.12.3", "serde", - "tonic 0.10.2", + "tonic 0.11.0", +] + +[[package]] +name = "aptos-system-utils" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93#4541add3fd29826ec57f22658ca286d2d6134b93" +dependencies = [ + "anyhow", + "aptos-profiler", + "async-mutex", + "http", + "hyper", + "lazy_static", + "mime", + "pprof", + "regex", + "rstack-self", + "tokio", + "tracing", + "url", ] +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + [[package]] name = "async-channel" version = "1.9.0" @@ -142,16 +216,12 @@ dependencies = [ ] [[package]] -name = "async-compression" -version = "0.4.1" +name = "async-mutex" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b74f44609f0f91493e3082d3734d98497e094777144380ea4db9f9905dd5b6" +checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" dependencies = [ - "flate2", - "futures-core", - "memchr", - "pin-project-lite", - "tokio", + "event-listener", ] [[package]] @@ -187,6 +257,34 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "attribute-derive" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c124f12ade4e670107b132722d0ad1a5c9790bcbc1b265336369ea05626b4498" +dependencies = [ + "attribute-derive-macro", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "attribute-derive-macro" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b217a07446e0fb086f83401a98297e2d81492122f5874db5391bd270a185f88" +dependencies = [ + "collection_literals", + "interpolator", + "proc-macro-error", + "proc-macro-utils", + "proc-macro2", + "quote", + "quote-use", + "syn 2.0.48", +] + [[package]] name = "autocfg" version = "1.1.0" @@ -265,6 +363,18 @@ version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + [[package]] name = "bb8" version = "0.8.1" @@ -301,6 +411,15 @@ dependencies = [ "serde", ] +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -309,9 +428,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "block-buffer" @@ -344,6 +463,12 @@ version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +[[package]] +name = "bytemuck" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" + [[package]] name = "byteorder" version = "1.4.3" @@ -356,12 +481,26 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +[[package]] +name = "canonical_json" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f89083fd014d71c47a718d7f4ac050864dac8587668dbe90baf9e261064c5710" +dependencies = [ + "hex", + "regex", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "cc" version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ + "jobserver", "libc", ] @@ -373,9 +512,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -383,7 +522,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets", + "windows-targets 0.52.5", ] [[package]] @@ -427,6 +566,12 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +[[package]] +name = "collection_literals" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186dce98367766de751c42c4f03970fc60fc012296e706ccbb9d5df9b6c1e271" + [[package]] name = "colorchoice" version = "1.0.0" @@ -442,6 +587,32 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "const-oid" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + [[package]] name = "cookie" version = "0.16.2" @@ -486,6 +657,15 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +[[package]] +name = "cpp_demangle" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119" +dependencies = [ + "cfg-if", +] + [[package]] name = "cpufeatures" version = "0.2.9" @@ -510,6 +690,22 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array", + "subtle", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -520,6 +716,70 @@ dependencies = [ "typenum", ] +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.0", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "debugid" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +dependencies = [ + "uuid", +] + +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid", + "crypto-bigint", + "pem-rfc7468", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "derive-where" +version = "1.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "diesel" version = "2.1.4" @@ -527,7 +787,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" dependencies = [ "bigdecimal", - "bitflags 2.3.3", + "bitflags 2.5.0", "byteorder", "chrono", "diesel_derives", @@ -542,8 +802,7 @@ dependencies = [ [[package]] name = "diesel-async" version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acada1517534c92d3f382217b485db8a8638f111b0e3f2a2a8e26165050f77be" +source = "git+https://github.com/weiznich/diesel_async.git?rev=d02798c67065d763154d7272dd0c09b39757d0f2#d02798c67065d763154d7272dd0c09b39757d0f2" dependencies = [ "async-trait", "bb8", @@ -554,18 +813,6 @@ dependencies = [ "tokio-postgres", ] -[[package]] -name = "diesel_async_migrations" -version = "0.11.0" -source = "git+https://github.com/niroco/diesel_async_migrations?rev=11f331b73c5cfcc894380074f748d8fda710ac12#11f331b73c5cfcc894380074f748d8fda710ac12" -dependencies = [ - "diesel", - "diesel-async", - "macros", - "scoped-futures", - "tracing", -] - [[package]] name = "diesel_derives" version = "2.1.2" @@ -618,6 +865,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "dw" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef0ed82b765c2ab79fb48e4bf2c95bd583202f4078a702bc714cc6e6f3ca80c3" +dependencies = [ + "dw-sys", + "foreign-types 0.5.0", + "libc", +] + +[[package]] +name = "dw-sys" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14eb35c87ff6626cd1021bb32bc7d9a5372ea72547e1eaf0343a841d9d55a973" +dependencies = [ + "libc", + "pkg-config", +] + [[package]] name = "either" version = "1.8.1" @@ -659,7 +927,7 @@ checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -712,6 +980,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "findshlibs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", +] + [[package]] name = "finl_unicode" version = "1.2.0" @@ -740,7 +1020,28 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ - "foreign-types-shared", + "foreign-types-shared 0.1.1", +] + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared 0.3.1", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", ] [[package]] @@ -749,6 +1050,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + [[package]] name = "form_urlencoded" version = "1.2.0" @@ -848,56 +1155,33 @@ dependencies = [ ] [[package]] -name = "gcemeta" -version = "0.2.3" +name = "generic-array" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47d460327b24cc34c86d53d60a90e9e6044817f7906ebd9baa5c3d0ee13e1ecf" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ - "bytes", - "hyper", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", + "typenum", + "version_check", ] [[package]] -name = "gcloud-sdk" -version = "0.20.4" +name = "get-size" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b46b7a2dbf38a2c5558ebc6b9053ca468155e95db4da6f5f376e5608dd26ee7" +checksum = "47b61e2dab7eedce93a83ab3468b919873ff16bac5a3e704011ff836d22b2120" dependencies = [ - "async-trait", - "chrono", - "futures", - "gcemeta", - "hyper", - "jsonwebtoken", - "once_cell", - "prost 0.11.9", - "prost-types 0.11.9", - "reqwest", - "secret-vault-value", - "serde", - "serde_json", - "tokio", - "tonic 0.9.2", - "tower", - "tower-layer", - "tower-util", - "tracing", - "url", + "get-size-derive", ] [[package]] -name = "generic-array" -version = "0.14.7" +name = "get-size-derive" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "13a1bcfb855c1f340d5913ab542e36f25a1c56f57de79022928297632435dec2" dependencies = [ - "typenum", - "version_check", + "attribute-derive", + "quote", + "syn 2.0.48", ] [[package]] @@ -962,7 +1246,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a3b24a3f57be08afc02344e693afb55e48172c9c2ab86ff3fdb8efff550e4b9" dependencies = [ "prost 0.11.9", - "prost-types 0.11.9", + "prost-types", "tonic 0.9.2", ] @@ -989,13 +1273,43 @@ dependencies = [ "google-cloud-gax", "google-cloud-googleapis", "google-cloud-token", - "prost-types 0.11.9", + "prost-types", "thiserror", "tokio", "tokio-util", "tracing", ] +[[package]] +name = "google-cloud-storage" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22c57ca1d971d7c6f852c02eda4e87e88b1247b6ed8be9fa5b2768c68b0f2ca5" +dependencies = [ + "async-stream", + "base64 0.21.2", + "bytes", + "futures-util", + "google-cloud-auth", + "google-cloud-metadata", + "google-cloud-token", + "hex", + "once_cell", + "percent-encoding", + "regex", + "reqwest", + "ring 0.16.20", + "rsa", + "serde", + "serde_json", + "sha2 0.10.8", + "thiserror", + "time", + "tokio", + "tracing", + "url", +] + [[package]] name = "google-cloud-token" version = "0.1.1" @@ -1007,9 +1321,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.20" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -1017,13 +1331,24 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 1.9.3", + "indexmap 2.0.0", "slab", "tokio", "tokio-util", "tracing", ] +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", + "num-traits", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -1094,7 +1419,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1155,20 +1480,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" -dependencies = [ - "futures-util", - "http", - "hyper", - "rustls 0.21.7", - "tokio", - "tokio-rustls 0.24.1", -] - [[package]] name = "hyper-timeout" version = "0.4.1" @@ -1287,6 +1598,24 @@ dependencies = [ "hashbrown 0.14.0", ] +[[package]] +name = "inferno" +version = "0.11.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9" +dependencies = [ + "ahash", + "indexmap 2.0.0", + "is-terminal", + "itoa", + "log", + "num-format", + "once_cell", + "quick-xml", + "rgb", + "str_stack", +] + [[package]] name = "instant" version = "0.1.12" @@ -1296,6 +1625,18 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + +[[package]] +name = "interpolator" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71dd52191aae121e8611f1e8dc3e324dd0dd1dee1e6dd91d10ee07a3cfb4d9d8" + [[package]] name = "io-lifetimes" version = "1.0.11" @@ -1304,7 +1645,7 @@ checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1321,7 +1662,7 @@ checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", "rustix 0.38.4", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1348,6 +1689,35 @@ version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" +[[package]] +name = "jemalloc-sys" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" +dependencies = [ + "jemalloc-sys", + "libc", +] + +[[package]] +name = "jobserver" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.64" @@ -1365,7 +1735,7 @@ checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ "base64 0.21.2", "pem", - "ring", + "ring 0.16.20", "serde", "serde_json", "simple_asn1", @@ -1395,6 +1765,9 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] [[package]] name = "libc" @@ -1443,12 +1816,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] -name = "macros" -version = "0.1.0" -source = "git+https://github.com/niroco/diesel_async_migrations?rev=11f331b73c5cfcc894380074f748d8fda710ac12#11f331b73c5cfcc894380074f748d8fda710ac12" +name = "lz4_flex" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" dependencies = [ - "proc-macro2", - "quote", + "twox-hash", ] [[package]] @@ -1484,9 +1857,18 @@ dependencies = [ [[package]] name = "memchr" -version = "2.5.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" + +[[package]] +name = "memmap2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" +dependencies = [ + "libc", +] [[package]] name = "migrations_internals" @@ -1542,7 +1924,7 @@ checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1581,6 +1963,17 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1591,34 +1984,111 @@ dependencies = [ "winapi", ] +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-format" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" +dependencies = [ + "arrayvec", + "itoa", +] + [[package]] name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -1658,9 +2128,9 @@ version = "0.10.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.5.0", "cfg-if", - "foreign-types", + "foreign-types 0.3.2", "libc", "once_cell", "openssl-macros", @@ -1696,6 +2166,15 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + [[package]] name = "overload" version = "0.1.1" @@ -1722,9 +2201,49 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.48.1", ] +[[package]] +name = "parquet" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c3b5322cc1bbf67f11c079c42be41a55949099b78732f7dba9e15edde40eab" +dependencies = [ + "ahash", + "bytes", + "chrono", + "futures", + "half", + "hashbrown 0.14.0", + "lz4_flex", + "num", + "num-bigint", + "paste", + "seq-macro", + "thrift", + "tokio", + "twox-hash", +] + +[[package]] +name = "parquet_derive" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05a70674ad0e9e49f583a03e477c23cc0116cc49a001c52178f00fb25eb0a882" +dependencies = [ + "parquet", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + [[package]] name = "pbjson" version = "0.5.1" @@ -1744,6 +2263,15 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem-rfc7468" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.0" @@ -1768,33 +2296,13 @@ dependencies = [ "siphasher", ] -[[package]] -name = "pin-project" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" -dependencies = [ - "pin-project-internal 0.4.30", -] - [[package]] name = "pin-project" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" dependencies = [ - "pin-project-internal 1.1.2", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "pin-project-internal", ] [[package]] @@ -1821,31 +2329,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] -name = "pkg-config" -version = "0.3.27" +name = "pkcs1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" +dependencies = [ + "der", + "pkcs8", + "zeroize", +] [[package]] -name = "post-processor" -version = "1.0.0" +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" dependencies = [ - "ahash", - "anyhow", - "async-trait", - "chrono", - "clap", - "futures", - "once_cell", - "prometheus", - "reqwest", - "serde", - "serde_json", - "server-framework", - "tokio", - "tracing", + "der", + "spki", + "zeroize", ] +[[package]] +name = "pkg-config" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" + [[package]] name = "postgres-native-tls" version = "0.5.0" @@ -1888,6 +2398,35 @@ dependencies = [ "postgres-protocol", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "pprof" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "196ded5d4be535690899a4631cc9f18cdc41b7ebf24a79400f46f48e49a11059" +dependencies = [ + "backtrace", + "cfg-if", + "findshlibs", + "inferno", + "libc", + "log", + "nix", + "once_cell", + "parking_lot", + "protobuf", + "protobuf-codegen-pure", + "smallvec", + "symbolic-demangle", + "tempfile", + "thiserror", +] + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -1903,6 +2442,41 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-utils" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f59e109e2f795a5070e69578c4dc101068139f74616778025ae1011d4cd41a8" +dependencies = [ + "proc-macro2", + "quote", + "smallvec", +] + [[package]] name = "proc-macro2" version = "1.0.76" @@ -1917,36 +2491,46 @@ name = "processor" version = "1.0.0" dependencies = [ "ahash", + "allocative", + "allocative_derive", "anyhow", + "aptos-in-memory-cache", "aptos-moving-average", "aptos-protos", "async-trait", - "base64 0.13.1", "bcs", "bigdecimal", + "bitflags 2.5.0", + "canonical_json", "chrono", "clap", + "dashmap", "diesel", "diesel-async", - "diesel_async_migrations", "diesel_migrations", "enum_dispatch", "field_count", "futures", "futures-util", - "gcloud-sdk", + "get-size", "google-cloud-googleapis", "google-cloud-pubsub", + "google-cloud-storage", "hex", + "hyper", "itertools 0.12.1", + "jemallocator", "kanal", + "lazy_static", "native-tls", + "num", "num_cpus", "once_cell", + "parquet", + "parquet_derive", "postgres-native-tls", "prometheus", "prost 0.12.3", - "prost-types 0.12.3", "regex", "serde", "serde_json", @@ -1954,12 +2538,16 @@ dependencies = [ "sha2 0.9.9", "sha3", "strum", + "tiny-keccak", "tokio", "tokio-postgres", - "tonic 0.10.2", + "tokio-util", + "tonic 0.11.0", "tracing", "unescape", "url", + "uuid", + "warp", ] [[package]] @@ -2032,37 +2620,85 @@ dependencies = [ ] [[package]] -name = "prost-types" -version = "0.12.3" +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "protobuf-codegen" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" +checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" dependencies = [ - "prost 0.12.3", + "protobuf", +] + +[[package]] +name = "protobuf-codegen-pure" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a29399fc94bcd3eeaa951c715f7bea69409b2445356b00519740bcd6ddd865" +dependencies = [ + "protobuf", + "protobuf-codegen", ] [[package]] name = "psl-types" version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" +checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" + +[[package]] +name = "publicsuffix" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96a8c1bda5ae1af7f99a2962e49df150414a43d62404644d98dd5c3a93d07457" +dependencies = [ + "idna 0.3.0", + "psl-types", +] + +[[package]] +name = "quick-xml" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd" +dependencies = [ + "memchr", +] + +[[package]] +name = "quote" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +dependencies = [ + "proc-macro2", +] [[package]] -name = "publicsuffix" -version = "2.2.3" +name = "quote-use" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96a8c1bda5ae1af7f99a2962e49df150414a43d62404644d98dd5c3a93d07457" +checksum = "a7b5abe3fe82fdeeb93f44d66a7b444dedf2e4827defb0a8e69c437b2de2ef94" dependencies = [ - "idna 0.3.0", - "psl-types", + "quote", + "quote-use-macros", + "syn 2.0.48", ] [[package]] -name = "quote" -version = "1.0.35" +name = "quote-use-macros" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "97ea44c7e20f16017a76a245bb42188517e13d16dcb1aa18044bc406cdc3f4af" dependencies = [ + "derive-where", "proc-macro2", + "quote", + "syn 2.0.48", ] [[package]] @@ -2106,14 +2742,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.1" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.3", - "regex-syntax 0.7.4", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -2127,13 +2763,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39354c10dd07468c2e73926b23bb9c2caca74c5501e38a35da70406f1d923310" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.8.3", ] [[package]] @@ -2144,9 +2780,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "reqwest" @@ -2154,7 +2790,6 @@ version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "async-compression", "base64 0.21.2", "bytes", "cookie", @@ -2166,7 +2801,6 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls", "hyper-tls", "ipnet", "js-sys", @@ -2177,14 +2811,11 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.7", - "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-rustls 0.24.1", "tokio-util", "tower-service", "url", @@ -2192,10 +2823,18 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.25.2", "winreg", ] +[[package]] +name = "rgb" +version = "0.8.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05aaa8004b64fd573fc9d002f4e632d51ad4f026c2b5ba95fcb6c2f32c2c47d8" +dependencies = [ + "bytemuck", +] + [[package]] name = "ring" version = "0.16.20" @@ -2206,11 +2845,74 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "rsa" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" +dependencies = [ + "byteorder", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "smallvec", + "subtle", + "zeroize", +] + +[[package]] +name = "rstack" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7df9d3ebd4f17b52e6134efe2fa20021c80688cbe823d481a729a993b730493" +dependencies = [ + "cfg-if", + "dw", + "lazy_static", + "libc", + "log", +] + +[[package]] +name = "rstack-self" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd5030da3aba0ec731502f74ec38e63798eea6bc8b8ba5972129afe3eababd2" +dependencies = [ + "antidote", + "backtrace", + "bincode", + "lazy_static", + "libc", + "rstack", + "serde", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -2228,7 +2930,7 @@ dependencies = [ "io-lifetimes", "libc", "linux-raw-sys 0.3.8", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2237,11 +2939,11 @@ version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys 0.4.3", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2251,7 +2953,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", - "ring", + "ring 0.16.20", "sct", "webpki", ] @@ -2263,19 +2965,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", - "ring", + "ring 0.16.20", "rustls-webpki 0.101.4", "sct", ] +[[package]] +name = "rustls" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +dependencies = [ + "log", + "ring 0.17.8", + "rustls-pki-types", + "rustls-webpki 0.102.2", + "subtle", + "zeroize", +] + [[package]] name = "rustls-native-certs" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 2.1.2", + "rustls-pki-types", "schannel", "security-framework", ] @@ -2289,14 +3006,30 @@ dependencies = [ "base64 0.21.2", ] +[[package]] +name = "rustls-pemfile" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +dependencies = [ + "base64 0.22.0", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" + [[package]] name = "rustls-webpki" version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -2305,8 +3038,19 @@ version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", +] + +[[package]] +name = "rustls-webpki" +version = "0.102.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +dependencies = [ + "ring 0.17.8", + "rustls-pki-types", + "untrusted 0.9.0", ] [[package]] @@ -2317,9 +3061,9 @@ checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" [[package]] name = "ryu" -version = "1.0.14" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "schannel" @@ -2327,7 +3071,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2358,21 +3102,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "secret-vault-value" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eddaf2631e82016a3262ce75575ec245ceef9a7115ddf8576851302efe6bdece" -dependencies = [ - "prost 0.11.9", - "prost-types 0.11.9", - "serde", - "serde_json", - "zeroize", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -2398,6 +3129,12 @@ dependencies = [ "libc", ] +[[package]] +name = "seq-macro" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" + [[package]] name = "serde" version = "1.0.193" @@ -2468,10 +3205,10 @@ name = "server-framework" version = "1.0.0" dependencies = [ "anyhow", + "aptos-system-utils", "async-trait", "backtrace", "clap", - "futures", "prometheus", "serde", "serde_yaml", @@ -2598,7 +3335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2613,6 +3350,34 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "str_stack" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb" + [[package]] name = "stringprep" version = "0.1.4" @@ -2658,6 +3423,29 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +[[package]] +name = "symbolic-common" +version = "10.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b55cdc318ede251d0957f07afe5fed912119b8c1bc5a7804151826db999e737" +dependencies = [ + "debugid", + "memmap2", + "stable_deref_trait", + "uuid", +] + +[[package]] +name = "symbolic-demangle" +version = "10.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79be897be8a483a81fff6a3a4e195b4ac838ef73ca42d348b3f722da9902e489" +dependencies = [ + "cpp_demangle", + "rustc-demangle", + "symbolic-common", +] + [[package]] name = "syn" version = "1.0.109" @@ -2697,7 +3485,7 @@ dependencies = [ "fastrand", "redox_syscall", "rustix 0.37.23", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2730,13 +3518,27 @@ dependencies = [ "once_cell", ] +[[package]] +name = "thrift" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e54bc85fc7faa8bc175c4bab5b92ba8d9a3ce893d0e9f42cc455c8ab16a9e09" +dependencies = [ + "byteorder", + "integer-encoding", + "ordered-float", +] + [[package]] name = "time" -version = "0.3.23" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ + "deranged", "itoa", + "num-conv", + "powerfmt", "serde", "time-core", "time-macros", @@ -2744,19 +3546,29 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.10" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -2788,7 +3600,7 @@ dependencies = [ "signal-hook-registry", "socket2 0.5.5", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2854,7 +3666,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" dependencies = [ - "pin-project 1.1.2", + "pin-project", "rand", "tokio", ] @@ -2880,6 +3692,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.3", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.14" @@ -2911,6 +3734,7 @@ checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "tokio", @@ -2971,10 +3795,9 @@ dependencies = [ "hyper", "hyper-timeout", "percent-encoding", - "pin-project 1.1.2", + "pin-project", "prost 0.11.9", - "rustls-native-certs", - "rustls-pemfile", + "rustls-pemfile 1.0.3", "tokio", "tokio-rustls 0.24.1", "tokio-stream", @@ -2982,14 +3805,14 @@ dependencies = [ "tower-layer", "tower-service", "tracing", - "webpki-roots 0.23.1", + "webpki-roots", ] [[package]] name = "tonic" -version = "0.10.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" dependencies = [ "async-stream", "async-trait", @@ -3003,18 +3826,19 @@ dependencies = [ "hyper", "hyper-timeout", "percent-encoding", - "pin-project 1.1.2", + "pin-project", "prost 0.12.3", - "rustls 0.21.7", "rustls-native-certs", - "rustls-pemfile", + "rustls-pemfile 2.1.2", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls 0.25.0", "tokio-stream", "tower", "tower-layer", "tower-service", "tracing", + "zstd", ] [[package]] @@ -3026,7 +3850,7 @@ dependencies = [ "futures-core", "futures-util", "indexmap 1.9.3", - "pin-project 1.1.2", + "pin-project", "pin-project-lite", "rand", "slab", @@ -3049,18 +3873,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" -[[package]] -name = "tower-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1093c19826d33807c72511e68f73b4a0469a3f22c2bd5f7d5212178b4b89674" -dependencies = [ - "futures-core", - "futures-util", - "pin-project 0.4.30", - "tower-service", -] - [[package]] name = "tracing" version = "0.1.37" @@ -3162,6 +3974,16 @@ dependencies = [ "utf-8", ] +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if", + "static_assertions", +] + [[package]] name = "typenum" version = "1.16.0" @@ -3210,6 +4032,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "url" version = "2.4.0" @@ -3240,6 +4068,15 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +[[package]] +name = "uuid" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +dependencies = [ + "getrandom", +] + [[package]] name = "valuable" version = "0.1.0" @@ -3284,8 +4121,8 @@ dependencies = [ "mime_guess", "multer", "percent-encoding", - "pin-project 1.1.2", - "rustls-pemfile", + "pin-project", + "rustls-pemfile 1.0.3", "scoped-tls", "serde", "serde_json", @@ -3400,8 +4237,8 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -3413,12 +4250,6 @@ dependencies = [ "rustls-webpki 0.100.1", ] -[[package]] -name = "webpki-roots" -version = "0.25.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" - [[package]] name = "whoami" version = "1.4.1" @@ -3457,7 +4288,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets", + "windows-targets 0.48.1", ] [[package]] @@ -3466,7 +4297,16 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.1", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.5", ] [[package]] @@ -3475,13 +4315,29 @@ version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +dependencies = [ + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -3490,42 +4346,90 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" + [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" + [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +[[package]] +name = "windows_i686_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" + [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +[[package]] +name = "windows_i686_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" + [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" + [[package]] name = "windows_x86_64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" + [[package]] name = "winnow" version = "0.5.0" @@ -3542,7 +4446,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3579,17 +4483,32 @@ name = "zeroize" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" + +[[package]] +name = "zstd" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" dependencies = [ - "zeroize_derive", + "zstd-safe", ] [[package]] -name = "zeroize_derive" -version = "1.4.2" +name = "zstd-safe" +version = "6.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.48", + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.9+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +dependencies = [ + "cc", + "pkg-config", ] diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 7003bc6cc..6dc4ce07e 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -1,13 +1,7 @@ [workspace] resolver = "2" -members = [ - "indexer-metrics", - "moving-average", - "post-processor", - "processor", - "server-framework", -] +members = ["indexer-metrics", "moving-average", "processor", "server-framework"] [workspace.package] authors = ["Aptos Labs "] @@ -16,7 +10,7 @@ homepage = "https://aptoslabs.com" license = "Apache-2.0" publish = false repository = "https://github.com/aptos-labs/aptos-indexer-processors" -rust-version = "1.75" +rust-version = "1.78" [workspace.dependencies] processor = { path = "processor" } @@ -25,23 +19,36 @@ aptos-moving-average = { path = "moving-average" } ahash = { version = "0.8.7", features = ["serde"] } anyhow = "1.0.62" +aptos-protos = { git = "https://github.com/aptos-labs/aptos-core.git", rev = "d76b5bb423b78b2b9affc72d3853f0d973d3f11f" } +aptos-in-memory-cache = { git = "https://github.com/aptos-labs/aptos-core.git", rev = "07952ba261dd8301581e449e26ce17bbbc1adc46" } +aptos-system-utils = { git = "https://github.com/aptos-labs/aptos-core.git", rev = "4541add3fd29826ec57f22658ca286d2d6134b93" } async-trait = "0.1.53" -aptos-protos = { git = "https://github.com/aptos-labs/aptos-core.git", tag = "aptos-node-v1.10.0" } backtrace = "0.3.58" base64 = "0.13.0" bb8 = "0.8.1" bcs = { git = "https://github.com/aptos-labs/bcs.git", rev = "d31fab9d81748e2594be5cd5cdf845786a30562d" } bigdecimal = { version = "0.4.0", features = ["serde"] } +bitflags = "2.5.0" chrono = { version = "0.4.19", features = ["clock", "serde"] } clap = { version = "4.3.5", features = ["derive", "unstable-styles"] } +dashmap = "5.2.0" +# Do NOT enable the postgres feature here, it is conditionally enabled in a feature +# block in the Cargo.toml file for the processor crate. +# https://github.com/aptos-labs/aptos-indexer-processors/pull/325 diesel = { version = "2.1", features = [ "chrono", "postgres_backend", "numeric", - "postgres", "serde_json", ] } -diesel-async = { version = "0.4", features = ["postgres", "bb8", "tokio"] } +# Use the crate version once this feature gets released on crates.io: +# https://github.com/weiznich/diesel_async/commit/e165e8c96a6c540ebde2d6d7c52df5c5620a4bf1 +diesel-async = { git = "https://github.com/weiznich/diesel_async.git", rev = "d02798c67065d763154d7272dd0c09b39757d0f2", features = [ + "async-connection-wrapper", + "postgres", + "bb8", + "tokio", +] } diesel_migrations = { version = "2.1.0", features = ["postgres"] } diesel_async_migrations = { git = "https://github.com/niroco/diesel_async_migrations", rev = "11f331b73c5cfcc894380074f748d8fda710ac12" } enum_dispatch = "0.3.12" @@ -49,14 +56,19 @@ field_count = "0.1.1" futures = "0.3.30" futures-core = "0.3.25" futures-util = "0.3.21" +get-size = { version = "0.1.4", features = ["derive"] } gcloud-sdk = { version = "0.20.4", features = [ "google-cloud-bigquery-storage-v1", ] } -cloud-storage = { version = "0.11.1", features = ["global-client"] } google-cloud-googleapis = "0.10.0" google-cloud-pubsub = "0.18.0" hex = "0.4.3" itertools = "0.12.1" +lazy_static = "1.4.0" +jemallocator = { version = "0.5.0", features = [ + "profiling", + "unprefixed_malloc_on_supported_platforms", +] } kanal = { version = "0.1.0-pre8", features = ["async"] } once_cell = "1.10.0" num_cpus = "1.16.0" @@ -64,6 +76,7 @@ pbjson = "0.5.1" prometheus = { version = "0.13.0", default-features = false } prost = { version = "0.12.3", features = ["no-recursion-limit"] } prost-types = "0.12.3" +quick_cache = "0.4.1" regex = "1.5.5" reqwest = { version = "0.11.20", features = [ "blocking", @@ -80,14 +93,17 @@ strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.3.0" toml = "0.7.4" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } +tiny-keccak = { version = "2.0.2", features = ["keccak", "sha3"] } tokio = { version = "1.35.1", features = ["full"] } -tonic = { version = "0.10.2", features = [ +tokio-util = { version = "0.7.2", features = ["compat", "codec"] } +tonic = { version = "0.11.0", features = [ "tls", "tls-roots", "transport", "prost", "gzip", "codegen", + "zstd", ] } tracing = "0.1.34" unescape = "0.1.0" @@ -98,3 +114,14 @@ warp = { version = "0.3.5", features = ["tls"] } native-tls = "0.2.11" postgres-native-tls = "0.5.0" tokio-postgres = "0.7.10" + +# Parquet support +parquet = { version = "52.0.0", default-features = false, features = ["async", "lz4"] } +num = "0.4.0" +google-cloud-storage = "0.13.0" +hyper = { version = "0.14.18", features = ["full"] } +parquet_derive = { version = "52.0.0" } +canonical_json = "0.5.0" +allocative = "0.3.3" +allocative_derive = "0.3.3" +uuid = { version = "1.8.0", features = ["v4"] } diff --git a/rust/Dockerfile b/rust/Dockerfile index bb293e9ba..fbf528cdd 100644 --- a/rust/Dockerfile +++ b/rust/Dockerfile @@ -8,11 +8,10 @@ WORKDIR /app COPY --link . /app -RUN apt-get update && apt-get install -y cmake curl clang git pkg-config libssl-dev libpq-dev lld +RUN apt-get update && apt-get install -y cmake curl clang git pkg-config libssl-dev libdw-dev libpq-dev lld +ENV CARGO_NET_GIT_FETCH_WITH_CLI true RUN cargo build --locked --release -p processor RUN cp target/release/processor /usr/local/bin -RUN cargo build --locked --release -p post-processor -RUN cp target/release/post-processor /usr/local/bin RUN cargo build --locked --release -p indexer-metrics RUN cp target/release/indexer-metrics /usr/local/bin @@ -29,7 +28,6 @@ ENV GIT_SHA ${GIT_SHA} FROM debian:bullseye-slim COPY --from=builder /usr/local/bin/processor /usr/local/bin -COPY --from=builder /usr/local/bin/post-processor /usr/local/bin COPY --from=builder /usr/local/bin/indexer-metrics /usr/local/bin RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ @@ -41,6 +39,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ tcpdump \ iproute2 \ netcat \ + libdw-dev \ libpq-dev \ curl diff --git a/rust/indexer-metrics/src/main.rs b/rust/indexer-metrics/src/main.rs index 32079a7a7..7c60c2c5b 100644 --- a/rust/indexer-metrics/src/main.rs +++ b/rust/indexer-metrics/src/main.rs @@ -10,7 +10,7 @@ use indexer_metrics::{ HASURA_API_LATEST_VERSION, HASURA_API_LATEST_VERSION_TIMESTAMP, PFN_LEDGER_TIMESTAMP, PFN_LEDGER_VERSION, TASK_FAILURE_COUNT, }, - util::{deserialize_from_string, fetch_url_with_timeout}, + util::{deserialize_from_string, fetch_processor_status_with_timeout, get_url_with_timeout}, }; use serde::{Deserialize, Serialize}; use server_framework::{RunnableConfig, ServerArgs}; @@ -39,14 +39,19 @@ struct ProcessorStatus { } #[derive(Debug, Deserialize, Serialize)] -struct ProcessorsResponse { +struct ProcessorsResponseInner { processor_status: Vec, } +#[derive(Debug, Deserialize, Serialize)] +struct ProcessorsResponse { + data: ProcessorsResponseInner, +} + #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct PostProcessorConfig { - pub hasura_rest_api_endpoint: Option, + pub hasura_graphql_endpoint: Option, pub fullnode_rest_api_endpoint: Option, pub chain_name: String, } @@ -55,13 +60,13 @@ pub struct PostProcessorConfig { impl RunnableConfig for PostProcessorConfig { async fn run(&self) -> Result<()> { let mut tasks = vec![]; - let hasura_rest_api_endpoint = self.hasura_rest_api_endpoint.clone(); + let hasura_graphql_endpoint = self.hasura_graphql_endpoint.clone(); let fullnode_rest_api_endpoint = self.fullnode_rest_api_endpoint.clone(); let chain_name = self.chain_name.clone(); - if let Some(hasura) = hasura_rest_api_endpoint { + if let Some(endpoint) = hasura_graphql_endpoint { tasks.push(tokio::spawn(start_processor_status_fetch( - hasura, + endpoint, chain_name.clone(), ))); } @@ -87,7 +92,7 @@ async fn main() -> Result<()> { async fn start_fn_fetch(url: String, chain_name: String) { loop { - let result = fetch_url_with_timeout(&url, QUERY_TIMEOUT_MS).await; + let result = get_url_with_timeout(&url, QUERY_TIMEOUT_MS).await; let time_now = tokio::time::Instant::now(); // Handle the result @@ -134,7 +139,7 @@ async fn start_fn_fetch(url: String, chain_name: String) { async fn start_processor_status_fetch(url: String, chain_name: String) { loop { - let result = fetch_url_with_timeout(&url, QUERY_TIMEOUT_MS).await; + let result = fetch_processor_status_with_timeout(&url, QUERY_TIMEOUT_MS).await; let time_now = tokio::time::Instant::now(); // Handle the result @@ -144,17 +149,20 @@ async fn start_processor_status_fetch(url: String, chain_name: String) { tracing::info!(url = &url, response = ?resp, "Request succeeded"); // Process the data as needed let system_time_now = chrono::Utc::now().naive_utc(); - for processor in resp.processor_status { + for processor in resp.data.processor_status { HASURA_API_LATEST_VERSION .with_label_values(&[&processor.processor, &chain_name]) .set(processor.last_success_version as i64); HASURA_API_LATEST_VERSION_TIMESTAMP .with_label_values(&[&processor.processor, &chain_name]) - .set(processor.last_updated.timestamp_micros() as f64 * 1e-6); + .set(processor.last_updated.and_utc().timestamp_micros() as f64 * 1e-6); HASURA_API_LATEST_TRANSACTION_TIMESTAMP .with_label_values(&[&processor.processor, &chain_name]) .set( - processor.last_transaction_timestamp.timestamp_micros() as f64 + processor + .last_transaction_timestamp + .and_utc() + .timestamp_micros() as f64 * 1e-6, ); let latency = system_time_now - processor.last_transaction_timestamp; diff --git a/rust/indexer-metrics/src/util.rs b/rust/indexer-metrics/src/util.rs index b3d9122d5..732270492 100644 --- a/rust/indexer-metrics/src/util.rs +++ b/rust/indexer-metrics/src/util.rs @@ -19,7 +19,40 @@ where s.parse::().map_err(D::Error::custom) } -pub async fn fetch_url_with_timeout( +pub async fn fetch_processor_status_with_timeout( + url: &str, + timeout_ms: u64, +) -> Result, Elapsed> { + let data = serde_json::json!({ + "query": r#" + { + processor_status { + processor + last_updated + last_success_version + last_transaction_timestamp + } + } + "# + }); + post_url_with_timeout(url, data, timeout_ms).await +} + +async fn post_url_with_timeout( + url: &str, + data: serde_json::Value, + timeout_ms: u64, +) -> Result, Elapsed> { + let client = Client::new(); + + // Set the timeout duration + let timeout_duration = Duration::from_millis(timeout_ms); + + // Use tokio::time::timeout to set a timeout for the request + timeout(timeout_duration, client.post(url).json(&data).send()).await +} + +pub async fn get_url_with_timeout( url: &str, timeout_ms: u64, ) -> Result, Elapsed> { diff --git a/rust/moving-average/src/lib.rs b/rust/moving-average/src/lib.rs index f7bbb9f50..826949de5 100644 --- a/rust/moving-average/src/lib.rs +++ b/rust/moving-average/src/lib.rs @@ -15,7 +15,7 @@ pub struct MovingAverage { impl MovingAverage { pub fn new(window_millis: u64) -> Self { - let now = chrono::Utc::now().naive_utc().timestamp_millis() as u64; + let now = chrono::Utc::now().naive_utc().and_utc().timestamp_millis() as u64; let mut queue = VecDeque::new(); queue.push_back((now, 0)); Self { @@ -26,7 +26,7 @@ impl MovingAverage { } pub fn tick_now(&mut self, value: u64) { - let now = chrono::Utc::now().naive_utc().timestamp_millis() as u64; + let now = chrono::Utc::now().naive_utc().and_utc().timestamp_millis() as u64; self.tick(now, value); } @@ -49,12 +49,13 @@ impl MovingAverage { self.avg() } + // Only be called after tick_now/tick is called. pub fn avg(&self) -> f64 { if self.values.len() < 2 { 0.0 } else { let elapsed = self.values.back().unwrap().0 - self.values.front().unwrap().0; - self.sum as f64 / elapsed as f64 + (self.sum * 1000) as f64 / elapsed as f64 } } @@ -62,3 +63,22 @@ impl MovingAverage { self.sum } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_moving_average() { + // 10 Second window. + let mut ma = MovingAverage::new(10_000); + // 9 seconds spent at 100 TPS. + for _ in 0..9 { + ma.tick_now(100); + std::thread::sleep(std::time::Duration::from_secs(1)); + } + // No matter what algorithm we use, the average should be 99 at least. + let avg = ma.avg(); + assert!(avg >= 99.0, "Average is too low: {}", avg); + } +} diff --git a/rust/parquet-bq-scripts/move_resources-create.sql b/rust/parquet-bq-scripts/move_resources-create.sql new file mode 100644 index 000000000..67197dd02 --- /dev/null +++ b/rust/parquet-bq-scripts/move_resources-create.sql @@ -0,0 +1,21 @@ +CREATE TABLE `{}` +( + txn_version INT64, + write_set_change_index INT64, + block_height INT64, + block_timestamp TIMESTAMP, + resource_address STRING, + resource_type STRING, + module STRING, + fun STRING, + is_deleted BOOL, + generic_type_params STRING, + data STRING, + state_key_hash STRING, + + bq_inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP(), + PRIMARY KEY(txn_version, write_set_change_index) NOT ENFORCED +) +PARTITION BY TIMESTAMP_TRUNC(block_timestamp, DAY) +CLUSTER BY txn_version, resource_type, block_height, state_key_hash, resource_address +; diff --git a/rust/parquet-bq-scripts/move_resources-merge.sql b/rust/parquet-bq-scripts/move_resources-merge.sql new file mode 100644 index 000000000..0fa25b6a9 --- /dev/null +++ b/rust/parquet-bq-scripts/move_resources-merge.sql @@ -0,0 +1,49 @@ +MERGE INTO `{}` AS main +USING ( + SELECT * + FROM ( + SELECT + *, + ROW_NUMBER() OVER ( + PARTITION BY -- primary key(s) + txn_version, + write_set_change_index + ) AS row_num + FROM `{}` + ) AS foo + WHERE foo.row_num = 1 +) AS staging + ON + main.txn_version = staging.txn_version -- primary key(s) + AND main.write_set_change_index = staging.write_set_change_index +WHEN NOT MATCHED BY TARGET +THEN + INSERT ( + txn_version, + write_set_change_index, + block_height, + block_timestamp, + resource_address, + resource_type, + module, + fun, + is_deleted, + generic_type_params, + data, + state_key_hash + ) + VALUES ( + staging.txn_version, + staging.write_set_change_index, + staging.block_height, + staging.block_timestamp, + staging.resource_address, + staging.resource_type, + staging.module, + staging.fun, + staging.is_deleted, + staging.generic_type_params, + staging.data, + staging.state_key_hash + ); + \ No newline at end of file diff --git a/rust/parquet-bq-scripts/table_items_create.sql b/rust/parquet-bq-scripts/table_items_create.sql new file mode 100644 index 000000000..c6a08f3a4 --- /dev/null +++ b/rust/parquet-bq-scripts/table_items_create.sql @@ -0,0 +1,20 @@ +CREATE TABLE `{}` +( + txn_version INT64, + block_timestamp TIMESTAMP, + write_set_change_index INT64, + transaction_block_height INT64, + table_key STRING, + table_handle STRING, + decoded_key STRING, -- json + decoded_value STRING, -- json + is_deleted BOOL, + -- + bq_inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP(), + + PRIMARY KEY(txn_version, write_set_change_index) NOT ENFORCED +) +PARTITION BY TIMESTAMP_TRUNC(block_timestamp, DAY) +CLUSTER BY table_key, txn_version +; + diff --git a/rust/parquet-bq-scripts/table_items_merge.sql b/rust/parquet-bq-scripts/table_items_merge.sql new file mode 100644 index 000000000..165cb35ec --- /dev/null +++ b/rust/parquet-bq-scripts/table_items_merge.sql @@ -0,0 +1,44 @@ +MERGE INTO `{}` AS main +USING ( + SELECT * + FROM ( + SELECT + *, + ROW_NUMBER() OVER ( + PARTITION BY -- primary key(s) + txn_version, + write_set_change_index + ) AS row_num + FROM `{}` + ) AS foo + WHERE foo.row_num = 1 +) AS staging + ON + main.txn_version = staging.txn_version -- primary key(s) + AND main.write_set_change_index = staging.write_set_change_index +WHEN NOT MATCHED BY TARGET +THEN + INSERT ( + key, + txn_version, + write_set_change_index, + transaction_block_height, + table_handle, + decoded_key, + decoded_value, + is_deleted, + block_timestamp, + ) + VALUES ( + staging.key, + staging.txn_version, + staging.write_set_change_index, + staging.transaction_block_height, + staging.table_handle, + staging.decoded_key, + staging.decoded_value, + staging.is_deleted, + staging.block_timestamp, + + CAST(FLOOR(staging.transaction_block_height / 1e6) AS INT64), + ); \ No newline at end of file diff --git a/rust/parquet-bq-scripts/transactions-create.sql b/rust/parquet-bq-scripts/transactions-create.sql new file mode 100644 index 000000000..bcf75efa1 --- /dev/null +++ b/rust/parquet-bq-scripts/transactions-create.sql @@ -0,0 +1,26 @@ +CREATE TABLE `{}` +( + txn_version INT64, + block_height INT64, + epoch INT64, + txn_type STRING, + payload STRING, + payload_type STRING, + gas_used INT64, + success BOOL, + vm_status STRING, + num_events INT64, + num_write_set_changes INT64, + txn_hash STRING, + state_change_hash STRING, + event_root_hash STRING, + state_checkpoint_hash STRING, + accumulator_root_hash STRING, + block_timestamp TIMESTAMP, + -- + bq_inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP(), + PRIMARY KEY(txn_version) NOT ENFORCED +) +PARTITION BY TIMESTAMP_TRUNC(block_timestamp,DAY) +CLUSTER BY txn_version, txn_type, block_height, txn_hash +; diff --git a/rust/parquet-bq-scripts/transactions-merge.sql b/rust/parquet-bq-scripts/transactions-merge.sql new file mode 100644 index 000000000..fe5fd7cd2 --- /dev/null +++ b/rust/parquet-bq-scripts/transactions-merge.sql @@ -0,0 +1,57 @@ +MERGE INTO `{}` AS main +USING ( + SELECT * + FROM + ( + SELECT + *, + ROW_NUMBER() OVER ( + PARTITION BY -- primary key(s) + txn_version + ) AS row_num + FROM `{}` + ) AS foo + WHERE foo.row_num = 1 +) AS staging + ON main.txn_version = staging.txn_version -- primary key(s) +WHEN NOT MATCHED BY TARGET +THEN + INSERT ( + txn_version, + block_height, + epoch, + txn_type, + payload, + payload_type, + gas_used, + success, + vm_status, + num_events, + num_write_set_changes, + txn_hash, + state_change_hash, + event_root_hash, + state_checkpoint_hash, + accumulator_root_hash, + block_timestamp + ) + VALUES ( + staging.txn_version, + staging.block_height, + staging.epoch, + staging.txn_type, + staging.payload, + staging.payload_type, + staging.gas_used, + staging.success, + staging.vm_status, + staging.num_events, + staging.num_write_set_changes, + staging.txn_hash, + staging.state_change_hash, + staging.event_root_hash, + staging.state_checkpoint_hash, + staging.accumulator_root_hash, + staging.block_timestamp + ) +; diff --git a/rust/parquet-bq-scripts/write_set_changes_create.sql b/rust/parquet-bq-scripts/write_set_changes_create.sql new file mode 100644 index 000000000..8cd33f7e9 --- /dev/null +++ b/rust/parquet-bq-scripts/write_set_changes_create.sql @@ -0,0 +1,16 @@ +CREATE TABLE `{}` +( + txn_version INT64, + write_set_change_index INT64, + state_key_hash STRING, + change_type STRING, + resource_address STRING, + block_height INT64, + block_timestamp TIMESTAMP, + -- + bq_inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP(), + PRIMARY KEY(txn_version, write_set_change_index) NOT ENFORCED +) +PARTITION BY TIMESTAMP_TRUNC(block_timestamp, DAY) +CLUSTER BY txn_version, change_type, block_height, state_key_hash +; \ No newline at end of file diff --git a/rust/parquet-bq-scripts/write_set_changes_merge.sql b/rust/parquet-bq-scripts/write_set_changes_merge.sql new file mode 100644 index 000000000..025c38b5b --- /dev/null +++ b/rust/parquet-bq-scripts/write_set_changes_merge.sql @@ -0,0 +1,40 @@ +MERGE INTO `{}` AS main +USING ( + SELECT * + FROM + ( + SELECT + *, + ROW_NUMBER() OVER ( + PARTITION BY -- primary key(s) + txn_version, + write_set_change_index + ORDER BY inserted_at DESC + ) AS row_num + FROM `{}` + ) AS foo + WHERE foo.row_num = 1 +) AS staging + ON main.txn_version = staging.txn_version -- primary key(s) + AND main.write_set_change_index = staging.write_set_change_index +WHEN NOT MATCHED BY TARGET +THEN + INSERT ( + txn_version, + write_set_change_index, + state_key_hash, + change_type, + resource_address, + block_height, + block_timestamp + ) + VALUES ( + staging.txn_version, + staging.write_set_change_index, + staging.state_key_hash, + staging.change_type, + staging.resource_address, + staging.block_height, + stagin.block_timestamp + ) +; diff --git a/rust/post-processor/Cargo.toml b/rust/post-processor/Cargo.toml deleted file mode 100644 index 364434a38..000000000 --- a/rust/post-processor/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "post-processor" -version = "1.0.0" - -# Workspace inherited keys -authors = { workspace = true } -edition = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -publish = { workspace = true } -repository = { workspace = true } -rust-version = { workspace = true } - -[dependencies] -ahash = { workspace = true } -anyhow = { workspace = true } -async-trait = { workspace = true } -chrono = { workspace = true } -clap = { workspace = true } -futures = { workspace = true } -once_cell = { workspace = true } -prometheus = { workspace = true } -reqwest = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -server-framework = { workspace = true } -tokio = { workspace = true } -tracing = { workspace = true } diff --git a/rust/post-processor/config.yaml b/rust/post-processor/config.yaml deleted file mode 100644 index 44eee77e3..000000000 --- a/rust/post-processor/config.yaml +++ /dev/null @@ -1,13 +0,0 @@ -health_check_port: 8088 -server_config: - processor_status_checker_config: - # Endpoint is one of: - # - https://indexer.mainnet.aptoslabs.com/api/rest/get_lastest_processor_status - # - https://indexer-testnet.staging.gcp.aptosdev.com/api/rest/get_lastest_processor_status - # - https://indexer-devnet.staging.gcp.aptosdev.com/api/rest/get_lastest_processor_status - hasura_rest_api_endpoint: "https://indexer.mainnet.aptoslabs.com/api/rest/get_lastest_processor_status" - # fullnode_rest_api_endpoint is one of - # - https://fullnode.mainnet.aptoslabs.com/v1 - # - https://fullnode.testnet.aptoslabs.com/v1 - # - https://fullnode.devnet.aptoslabs.com/v1 - fullnode_rest_api_endpoint: "https://fullnode.mainnet.aptoslabs.com/v1" diff --git a/rust/post-processor/src/lib.rs b/rust/post-processor/src/lib.rs deleted file mode 100644 index e77961406..000000000 --- a/rust/post-processor/src/lib.rs +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod metrics; -pub mod processor_status_checker; diff --git a/rust/post-processor/src/main.rs b/rust/post-processor/src/main.rs deleted file mode 100644 index 2b9c1b8b0..000000000 --- a/rust/post-processor/src/main.rs +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use anyhow::Result; -use clap::Parser; -use post_processor::{ - metrics::TASK_FAILURE_COUNT, processor_status_checker::ProcessorStatusChecker, -}; -use serde::{Deserialize, Serialize}; -use server_framework::{RunnableConfig, ServerArgs}; -use tracing::info; - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct ProcessorStatusCheckerConfig { - pub hasura_rest_api_endpoint: String, - pub fullnode_rest_api_endpoint: String, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct PostProcessorConfig { - pub processor_status_checker_config: Option, -} - -#[async_trait::async_trait] -impl RunnableConfig for PostProcessorConfig { - async fn run(&self) -> Result<()> { - let mut tasks = vec![]; - - if let Some(config) = &self.processor_status_checker_config { - tasks.push(tokio::spawn({ - let config = config.clone(); - async move { - let checker = ProcessorStatusChecker::new( - config.hasura_rest_api_endpoint.clone(), - config.fullnode_rest_api_endpoint.clone(), - ); - info!("Starting ProcessorStatusChecker"); - if let Err(err) = checker.run().await { - tracing::error!("ProcessorStatusChecker failed: {:?}", err); - TASK_FAILURE_COUNT - .with_label_values(&["processor_status_checker"]) - .inc(); - } - } - })) - } - - let _ = futures::future::join_all(tasks).await; - unreachable!("All tasks should run forever"); - } - - fn get_server_name(&self) -> String { - "idxbg".to_string() - } -} - -#[tokio::main] -async fn main() -> Result<()> { - let args = ServerArgs::parse(); - args.run::(tokio::runtime::Handle::current()) - .await -} diff --git a/rust/post-processor/src/metrics.rs b/rust/post-processor/src/metrics.rs deleted file mode 100644 index 5afa57d78..000000000 --- a/rust/post-processor/src/metrics.rs +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use once_cell::sync::Lazy; -use prometheus::{ - register_gauge_vec, register_int_counter_vec, register_int_gauge_vec, GaugeVec, IntCounterVec, - IntGaugeVec, -}; - -/// Task failure count. -pub static TASK_FAILURE_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_processors_post_processing_task_failure_count", - "Task failure count.", - &["task_name"], - ) - .unwrap() -}); - -// API last update time latency to current time in seconds. -pub static HASURA_API_LAST_UPDATED_TIME_LATENCY_IN_SECS: Lazy = Lazy::new(|| { - register_gauge_vec!( - "indexer_processors_hasura_api_last_updated_time_latency_in_secs", - "Processor last update time latency to current time in seconds.", - &["processor_name"], - ) - .unwrap() -}); - -// Processor latest version latency to fullnode latest version. -pub static HASURA_API_LATEST_VERSION_LATENCY: Lazy = Lazy::new(|| { - register_int_gauge_vec!( - "indexer_processors_hasura_api_latest_version_latency", - "Processor latest version latency to fullnode latest version.", - &["processor_name"], - ) - .unwrap() -}); diff --git a/rust/post-processor/src/processor_status_checker.rs b/rust/post-processor/src/processor_status_checker.rs deleted file mode 100644 index 7a06666ad..000000000 --- a/rust/post-processor/src/processor_status_checker.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::metrics::{ - HASURA_API_LAST_UPDATED_TIME_LATENCY_IN_SECS, HASURA_API_LATEST_VERSION_LATENCY, -}; -use ahash::AHashMap; -use anyhow::Result; -use chrono::NaiveDateTime; -use core::panic; -use serde::{Deserialize, Serialize}; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use tracing::info; - -const PROCESSOR_STATUS_CHECKER_WAIT_TIME_IN_SECS: u64 = 10; - -pub struct ProcessorStatusChecker { - pub hasura_rest_api_endpoint: String, - pub fullnode_rest_api_endpoint: String, -} - -#[derive(Debug, Deserialize, Serialize)] -struct ProcessorStatusResponse { - processor_status: Vec, -} - -#[derive(Debug, Deserialize, Serialize)] -struct ProcessorStatus { - pub processor: String, - pub last_updated: String, - pub last_success_version: i64, -} - -#[derive(Debug, Deserialize, Serialize)] -struct FullnodeResponse { - chain_id: u8, - epoch: String, - ledger_version: String, - oldest_ledger_version: String, - ledger_timestamp: String, - node_role: String, - oldest_block_height: String, - block_height: String, - git_hash: String, -} - -impl ProcessorStatusChecker { - pub fn new(hasura_rest_api_endpoint: String, fullnode_rest_api_endpoint: String) -> Self { - Self { - hasura_rest_api_endpoint, - fullnode_rest_api_endpoint, - } - } - - pub async fn run(&self) -> Result<()> { - loop { - let processor_latest_version_map = handle_hasura_response( - self.hasura_rest_api_endpoint.clone(), - ) - .await - .unwrap_or_else(|e| { - tracing::error!(e = ?e, "Failed to get processor status response from hasura"); - panic!(); - }); - - let fullnode_latest_version = - handle_fullnode_api_response(self.fullnode_rest_api_endpoint.clone()) - .await - .unwrap_or_else(|e| { - tracing::error!(e = ?e, "Failed to get fullnode response from fullnode"); - panic!(); - }); - for processor_latest_version in processor_latest_version_map { - let latency = fullnode_latest_version - processor_latest_version.1; - HASURA_API_LATEST_VERSION_LATENCY - .with_label_values(&[processor_latest_version.0.as_str()]) - .set(latency); - } - tokio::time::sleep(Duration::from_secs( - PROCESSOR_STATUS_CHECKER_WAIT_TIME_IN_SECS, - )) - .await; - } - } -} - -async fn handle_hasura_response(hasura_endpoint: String) -> Result> { - let endpoint = hasura_endpoint.clone(); - info!("Connecting to hasura endpoint: {}", endpoint); - let client = reqwest::Client::new(); - let result = client.get(endpoint).send().await?; - let processor_status_response_result = result.json::().await; - let processor_status_response = match processor_status_response_result { - Ok(processor_status_response) => processor_status_response, - Err(e) => { - anyhow::bail!("Failed to handle hasura api response: {:?}", e); - }, - }; - - let mut processor_latest_version_map = AHashMap::new(); - - for processor_status in processor_status_response.processor_status { - let last_updated_time = NaiveDateTime::parse_from_str( - processor_status.last_updated.as_str(), - "%Y-%m-%dT%H:%M:%S%.f", - ) - .unwrap(); - let current_time = SystemTime::now(); - let latency = current_time.duration_since(UNIX_EPOCH)?.as_secs_f64() - - last_updated_time - .signed_duration_since(NaiveDateTime::from_timestamp_opt(0, 0).unwrap()) - .to_std()? - .as_secs_f64(); - HASURA_API_LAST_UPDATED_TIME_LATENCY_IN_SECS - .with_label_values(&[processor_status.processor.as_str()]) - .set(latency); - processor_latest_version_map.insert( - processor_status.processor, - processor_status.last_success_version, - ); - } - Ok(processor_latest_version_map) -} - -async fn handle_fullnode_api_response(fullnode_endpoint: String) -> Result { - let endpoint = fullnode_endpoint.clone(); - info!("Connecting to fullnode endpoint: {}", endpoint); - let client = reqwest::Client::new(); - let result = client.get(endpoint).send().await?; - let fullnode_response_result = result.json::().await; - match fullnode_response_result { - Ok(fullnode_response) => Ok(fullnode_response.ledger_version.parse::().unwrap()), - Err(e) => anyhow::bail!("Failed to handle fullnode api response: {:?}", e), - } -} diff --git a/rust/processor/Cargo.toml b/rust/processor/Cargo.toml index 74fb894cf..f40725823 100644 --- a/rust/processor/Cargo.toml +++ b/rust/processor/Cargo.toml @@ -15,33 +15,34 @@ rust-version = { workspace = true } [dependencies] ahash = { workspace = true } anyhow = { workspace = true } +aptos-in-memory-cache = { workspace = true } aptos-moving-average = { workspace = true } aptos-protos = { workspace = true } async-trait = { workspace = true } -base64 = { workspace = true } bcs = { workspace = true } bigdecimal = { workspace = true } +bitflags = { workspace = true } chrono = { workspace = true } clap = { workspace = true } +dashmap = { workspace = true } diesel = { workspace = true } diesel-async = { workspace = true } -diesel_async_migrations = { workspace = true } diesel_migrations = { workspace = true } enum_dispatch = { workspace = true } field_count = { workspace = true } futures = { workspace = true } futures-util = { workspace = true } -gcloud-sdk = { workspace = true } +get-size = { workspace = true } google-cloud-googleapis = { workspace = true } google-cloud-pubsub = { workspace = true } hex = { workspace = true } itertools = { workspace = true } kanal = { workspace = true } +lazy_static = { workspace = true } num_cpus = { workspace = true } once_cell = { workspace = true } prometheus = { workspace = true } prost = { workspace = true } -prost-types = { workspace = true } regex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } @@ -50,12 +51,37 @@ sha2 = { workspace = true } sha3 = { workspace = true } strum = { workspace = true } tokio = { workspace = true } +tokio-util = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } unescape = { workspace = true } url = { workspace = true } +warp = { workspace = true } # Postgres SSL support native-tls = { workspace = true } postgres-native-tls = { workspace = true } +tiny-keccak = { workspace = true } tokio-postgres = { workspace = true } + +[target.'cfg(unix)'.dependencies] +jemallocator = { workspace = true } + +# Parquet support +parquet = { workspace = true } +num = { workspace = true } +google-cloud-storage = { workspace = true } +hyper = { workspace = true } +lazy_static = { workspace = true } +parquet_derive = { workspace = true } +canonical_json = { workspace = true } +allocative = { workspace = true } +allocative_derive = { workspace = true } +uuid = { workspace = true } + +[features] +libpq = ["diesel/postgres"] +# When using the default features we enable the diesel/postgres feature. We configure +# it in a feature so the CLI can opt out, since it cannot tolerate the libpq dep. +# Recall that features should always be additive. +default = ["libpq"] diff --git a/rust/processor/README.md b/rust/processor/README.md index f385e35b7..ad6338be6 100644 --- a/rust/processor/README.md +++ b/rust/processor/README.md @@ -11,31 +11,37 @@ Indexer GRPC parser is to indexer data processor that leverages the indexer grpc - A running PostgreSQL instance, with a valid database. More tutorial can be found [here](https://github.com/aptos-labs/aptos-core/tree/main/crates/indexer#postgres) -- A config YAML file - - For exmaple, `config.yaml` - - ```yaml - health_check_port: 8084 - server_config: - processor_config: - type: default_processor - postgres_connection_string: postgresql://postgres:@localhost:5432/postgres_v2 - indexer_grpc_data_service_address: 127.0.0.1:50051 - indexer_grpc_http2_ping_interval_in_secs: 60 - indexer_grpc_http2_ping_timeout_in_secs: 10 - number_concurrent_processing_tasks: 10 - auth_token: AUTH_TOKEN - starting_version: 0 # optional - ending_version: 0 # optional - transaction_filter: - # Only allow transactions from these contract addresses - #focus_contract_addresses: - #- "0x0" - # Skip transactions from these sender addresses - skip_sender_addresses: - - "0x07" - # Skip all transactions that aren't user transactions - focus_user_transactions: false - ``` + - A config YAML file + - For example, `parser.yaml` + + ```yaml + health_check_port: 8084 + server_config: + processor_config: + type: default_processor + postgres_connection_string: postgresql://postgres:@localhost:5432/postgres_v2 + indexer_grpc_data_service_address: 127.0.0.1:50051 + indexer_grpc_http2_ping_interval_in_secs: 60 + indexer_grpc_http2_ping_timeout_in_secs: 10 + number_concurrent_processing_tasks: 10 + auth_token: AUTH_TOKEN + starting_version: 0 # optional + ending_version: 0 # optional + transaction_filter: + # Only allow transactions from these contract addresses + # focus_contract_addresses: + # - "0x0" + # Skip transactions from these sender addresses + skip_sender_addresses: + - "0x07" + # Skip all transactions that aren't user transactions + focus_user_transactions: false + deprecated_tables: [ + "MOVE_RESOURCES", + "WRITE_SET_CHANGES", + "TRANSACTIONS", + ] + ``` #### Config Explanation @@ -48,6 +54,7 @@ Indexer GRPC parser is to indexer data processor that leverages the indexer grpc - `starting_version`: start processor at starting_version. - `ending_version`: stop processor after ending_version. - `number_concurrent_processing_tasks`: number of tasks to parse and insert; 1 means sequential processing, otherwise, +- `deprecated_tables`: a list of tables to skip writing to alloyDB. transactions are splitted into tasks and inserted with random order. ### Use docker image for existing parsers(Only for **Unix/Linux**) @@ -64,3 +71,6 @@ Indexer GRPC parser is to indexer data processor that leverages the indexer grpc ### Use a custom parser - Check our [indexer processors](https://github.com/aptos-labs/aptos-indexer-processors)! + +### Manually running diesel-cli +- `cd` into the database folder you use under `src/db/` (e.g. `src/db/postgres`), then run it. diff --git a/rust/processor/diesel.toml b/rust/processor/diesel.toml deleted file mode 100644 index 92267c829..000000000 --- a/rust/processor/diesel.toml +++ /dev/null @@ -1,5 +0,0 @@ -# For documentation on how to configure this file, -# see diesel.rs/guides/configuring-diesel-cli - -[print_schema] -file = "src/schema.rs" diff --git a/rust/processor/parser.yaml b/rust/processor/parser.yaml index 37d1c73b6..25ed06b54 100644 --- a/rust/processor/parser.yaml +++ b/rust/processor/parser.yaml @@ -7,4 +7,4 @@ server_config: type: default_processor postgres_connection_string: postgresql://postgres:@localhost:5432/default_processor indexer_grpc_data_service_address: http://127.0.0.1:50051 - auth_token: AUTH_TOKEN + auth_token: AUTH_TOKEN \ No newline at end of file diff --git a/rust/processor/src/bq_analytics/gcs_handler.rs b/rust/processor/src/bq_analytics/gcs_handler.rs new file mode 100644 index 000000000..c038152a6 --- /dev/null +++ b/rust/processor/src/bq_analytics/gcs_handler.rs @@ -0,0 +1,121 @@ +use crate::bq_analytics::ParquetProcessorError; +use anyhow::{anyhow, Result}; +use chrono::{Datelike, Timelike}; +use google_cloud_storage::{ + client::Client as GCSClient, + http::objects::upload::{Media, UploadObjectRequest, UploadType}, +}; +use hyper::Body; +use std::path::PathBuf; +use tokio::io::AsyncReadExt; // for read_to_end() +use tokio::{ + fs::File as TokioFile, + time::{sleep, timeout, Duration}, +}; +use tracing::{debug, error, info}; +const BUCKET_REGULAR_TRAFFIC: &str = "devnet-airflow-continue"; +const MAX_RETRIES: usize = 3; +const INITIAL_DELAY_MS: u64 = 500; +const TIMEOUT_SECONDS: u64 = 300; +pub async fn upload_parquet_to_gcs( + client: &GCSClient, + file_path: &PathBuf, + table_name: &str, + bucket_name: &str, +) -> Result<(), ParquetProcessorError> { + let mut file = TokioFile::open(&file_path) + .await + .map_err(|e| anyhow!("Failed to open file for reading: {}", e))?; + + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer) + .await + .map_err(|e| anyhow!("Failed to read file: {}", e))?; + + if buffer.is_empty() { + error!("The file is empty and has no data to upload.",); + return Err(ParquetProcessorError::Other( + "The file is empty and has no data to upload.".to_string(), + )); + } + + let now = chrono::Utc::now(); + let start_of_month = now + .with_day(1) + .unwrap() + .with_hour(0) + .unwrap() + .with_minute(0) + .unwrap() + .with_second(0) + .unwrap() + .with_nanosecond(0) + .unwrap(); + let highwater_s = start_of_month.timestamp_millis(); + let highwater_ms = now.timestamp_millis(); + let counter = 0; // THIS NEED TO BE REPLACED OR REIMPLEMENTED WITH AN ACTUAL LOGIC TO ENSURE FILE UNIQUENESS. + let object_name: PathBuf = generate_parquet_file_path( + BUCKET_REGULAR_TRAFFIC, + table_name, + highwater_s, + highwater_ms, + counter, + ); + + let file_name = object_name.to_str().unwrap().to_owned(); + let upload_type: UploadType = UploadType::Simple(Media::new(file_name.clone())); + + let upload_request = UploadObjectRequest { + bucket: bucket_name.to_string(), + ..Default::default() + }; + + let mut retry_count = 0; + let mut delay = INITIAL_DELAY_MS; + + loop { + let data = Body::from(buffer.clone()); + let upload_result = timeout( + Duration::from_secs(TIMEOUT_SECONDS), + client.upload_object(&upload_request, data, &upload_type), + ) + .await; + + match upload_result { + Ok(Ok(result)) => { + info!("File uploaded successfully to GCS: {}", result.name); + return Ok(()); + }, + Ok(Err(e)) => { + error!("Failed to upload file to GCS: {}", e); + if retry_count >= MAX_RETRIES { + return Err(ParquetProcessorError::StorageError(e)); + } + }, + Err(e) => { + error!("Upload timed out: {}", e); + if retry_count >= MAX_RETRIES { + return Err(ParquetProcessorError::TimeoutError(e)); + } + }, + } + + retry_count += 1; + sleep(Duration::from_millis(delay)).await; + delay *= 2; + debug!("Retrying upload operation. Retry count: {}", retry_count); + } +} + +fn generate_parquet_file_path( + gcs_bucket_root: &str, + table: &str, + highwater_s: i64, + highwater_ms: i64, + counter: u32, +) -> PathBuf { + PathBuf::from(format!( + "{}/{}/{}/{}_{}.parquet", + gcs_bucket_root, table, highwater_s, highwater_ms, counter + )) +} diff --git a/rust/processor/src/bq_analytics/generic_parquet_processor.rs b/rust/processor/src/bq_analytics/generic_parquet_processor.rs new file mode 100644 index 000000000..f10c21259 --- /dev/null +++ b/rust/processor/src/bq_analytics/generic_parquet_processor.rs @@ -0,0 +1,241 @@ +use super::ParquetProcessingResult; +use crate::{ + bq_analytics::gcs_handler::upload_parquet_to_gcs, + gap_detectors::ProcessingResult, + utils::counters::{PARQUET_HANDLER_BUFFER_SIZE, PARQUET_STRUCT_SIZE}, +}; +use ahash::AHashMap; +use allocative::Allocative; +use anyhow::{anyhow, Result}; +use google_cloud_storage::client::Client as GCSClient; +use parquet::{ + file::{properties::WriterProperties, writer::SerializedFileWriter}, + record::RecordWriter, + schema::types::Type, +}; +use std::{ + fs::{remove_file, rename, File}, + path::PathBuf, + sync::Arc, +}; +use tracing::{debug, error}; +use uuid::Uuid; + +#[derive(Debug, Default, Clone)] +pub struct ParquetDataGeneric { + pub data: Vec, + pub first_txn_version: u64, + pub last_txn_version: u64, + pub last_transaction_timestamp: Option, + pub transaction_version_to_struct_count: AHashMap, +} + +pub trait NamedTable { + const TABLE_NAME: &'static str; +} + +pub trait HasVersion { + fn version(&self) -> i64; +} + +pub trait HasParquetSchema { + fn schema() -> Arc; +} + +/// Auto-implement this for all types that implement `Default` and `RecordWriter` +impl HasParquetSchema for ParquetType +where + ParquetType: std::fmt::Debug + Default + Sync + Send, + for<'a> &'a [ParquetType]: RecordWriter, +{ + fn schema() -> Arc { + let example: Self = Default::default(); + [example].as_slice().schema().unwrap() + } +} + +pub struct ParquetHandler +where + ParquetType: NamedTable + HasVersion + HasParquetSchema + 'static + Allocative, + for<'a> &'a [ParquetType]: RecordWriter, +{ + pub schema: Arc, + pub writer: SerializedFileWriter, + pub buffer: Vec, + pub buffer_size_bytes: usize, + + pub transaction_version_to_struct_count: AHashMap, + pub bucket_name: String, + pub gap_detector_sender: kanal::AsyncSender, + pub file_path: String, +} + +fn create_new_writer( + file_path: &str, + schema: Arc, +) -> Result> { + let props = WriterProperties::builder() + .set_compression(parquet::basic::Compression::LZ4) + .build(); + let props_arc = Arc::new(props); + let file: File = File::options() + .create(true) + .truncate(true) + .write(true) + .open(file_path)?; + + Ok(SerializedFileWriter::new( + file.try_clone()?, + schema, + props_arc, + )?) +} + +impl ParquetHandler +where + ParquetType: NamedTable + HasVersion + HasParquetSchema + 'static + Allocative, + for<'a> &'a [ParquetType]: RecordWriter, +{ + fn create_new_writer(&self) -> Result> { + let file_path = &self.file_path; + create_new_writer(file_path, self.schema.clone()) + } + + fn close_writer(&mut self) -> Result<()> { + let mut writer = self.create_new_writer()?; + std::mem::swap(&mut self.writer, &mut writer); + writer.close()?; + Ok(()) + } + + pub fn new( + bucket_name: String, + gap_detector_sender: kanal::AsyncSender, + schema: Arc, + ) -> Result { + // had to append unique id to avoid concurrent write issues + let file_path = format!("{}_{}.parquet", ParquetType::TABLE_NAME, Uuid::new_v4()); + let writer = create_new_writer(&file_path, schema.clone())?; + + Ok(Self { + writer, + buffer: Vec::new(), + buffer_size_bytes: 0, + transaction_version_to_struct_count: AHashMap::new(), + bucket_name, + gap_detector_sender, + schema, + file_path, + }) + } + + pub async fn handle( + &mut self, + gcs_client: &GCSClient, + changes: ParquetDataGeneric, + max_buffer_size: usize, + ) -> Result<()> { + let last_transaction_timestamp = changes.last_transaction_timestamp; + let parquet_structs = changes.data; + self.transaction_version_to_struct_count + .extend(changes.transaction_version_to_struct_count); + + for parquet_struct in parquet_structs { + let size_of_struct = allocative::size_of_unique(&parquet_struct); + PARQUET_STRUCT_SIZE + .with_label_values(&[ParquetType::TABLE_NAME]) + .set(size_of_struct as i64); + self.buffer_size_bytes += size_of_struct; + self.buffer.push(parquet_struct); + } + + // for now, it's okay to go little above the buffer_size, given that we will keep max size as 200 MB + if self.buffer_size_bytes >= max_buffer_size { + let start_version = self.buffer.first().unwrap().version(); + let end_version = self.buffer.last().unwrap().version(); + + let txn_version_to_struct_count = process_struct_count_map( + &self.buffer, + &mut self.transaction_version_to_struct_count, + ); + + let new_file_path: PathBuf = PathBuf::from(format!( + "{}_{}.parquet", + ParquetType::TABLE_NAME, + Uuid::new_v4() + )); + rename(&self.file_path, &new_file_path)?; // this fixes an issue with concurrent file access issues + + let struct_buffer = std::mem::take(&mut self.buffer); + + let mut row_group_writer = self.writer.next_row_group()?; + struct_buffer + .as_slice() + .write_to_row_group(&mut row_group_writer) + .unwrap(); + row_group_writer.close()?; + self.close_writer()?; + + debug!( + table_name = ParquetType::TABLE_NAME, + start_version = start_version, + end_version = end_version, + "Max buffer size reached, uploading to GCS." + ); + let upload_result = upload_parquet_to_gcs( + gcs_client, + &new_file_path, + ParquetType::TABLE_NAME, + &self.bucket_name, + ) + .await; + self.buffer_size_bytes = 0; + remove_file(&new_file_path)?; + + return match upload_result { + Ok(_) => { + let parquet_processing_result = ParquetProcessingResult { + start_version, + end_version, + last_transaction_timestamp: last_transaction_timestamp.clone(), + txn_version_to_struct_count, + }; + + self.gap_detector_sender + .send(ProcessingResult::ParquetProcessingResult( + parquet_processing_result, + )) + .await + .expect("[Parser] Failed to send versions to gap detector"); + Ok(()) + }, + Err(e) => { + error!("Failed to upload file to GCS: {}", e); + Err(anyhow!("Failed to upload file to GCS: {}", e)) + }, + }; + } + + PARQUET_HANDLER_BUFFER_SIZE + .with_label_values(&[ParquetType::TABLE_NAME]) + .set(self.buffer.len() as i64); + Ok(()) + } +} + +fn process_struct_count_map( + buffer: &[ParquetType], + txn_version_to_struct_count: &mut AHashMap, +) -> AHashMap { + let mut txn_version_to_struct_count_for_gap_detector = AHashMap::new(); + + for item in buffer.iter() { + let version = item.version(); + + if let Some(count) = txn_version_to_struct_count.get(&(version)) { + txn_version_to_struct_count_for_gap_detector.insert(version, *count); + txn_version_to_struct_count.remove(&(version)); + } + } + txn_version_to_struct_count_for_gap_detector +} diff --git a/rust/processor/src/bq_analytics/mod.rs b/rust/processor/src/bq_analytics/mod.rs new file mode 100644 index 000000000..2bb132fca --- /dev/null +++ b/rust/processor/src/bq_analytics/mod.rs @@ -0,0 +1,68 @@ +pub mod gcs_handler; +pub mod generic_parquet_processor; +pub mod parquet_handler; + +use ahash::AHashMap; +use google_cloud_storage::http::Error as StorageError; +use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display, Formatter, Result as FormatResult}; +use tokio::io; + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct ParquetProcessingResult { + pub start_version: i64, + pub end_version: i64, + pub last_transaction_timestamp: Option, + pub txn_version_to_struct_count: AHashMap, +} + +#[derive(Debug)] +pub enum ParquetProcessorError { + ParquetError(parquet::errors::ParquetError), + StorageError(StorageError), + TimeoutError(tokio::time::error::Elapsed), + IoError(io::Error), + Other(String), +} + +impl std::error::Error for ParquetProcessorError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match *self { + ParquetProcessorError::ParquetError(ref err) => Some(err), + ParquetProcessorError::StorageError(ref err) => Some(err), + ParquetProcessorError::TimeoutError(ref err) => Some(err), + ParquetProcessorError::IoError(ref err) => Some(err), + ParquetProcessorError::Other(_) => None, + } + } +} + +impl Display for ParquetProcessorError { + fn fmt(&self, f: &mut Formatter<'_>) -> FormatResult { + match *self { + ParquetProcessorError::ParquetError(ref err) => write!(f, "Parquet error: {}", err), + ParquetProcessorError::StorageError(ref err) => write!(f, "Storage error: {}", err), + ParquetProcessorError::TimeoutError(ref err) => write!(f, "Timeout error: {}", err), + ParquetProcessorError::IoError(ref err) => write!(f, "IO error: {}", err), + ParquetProcessorError::Other(ref desc) => write!(f, "Error: {}", desc), + } + } +} + +impl From for ParquetProcessorError { + fn from(err: std::io::Error) -> Self { + ParquetProcessorError::IoError(err) + } +} + +impl From for ParquetProcessorError { + fn from(err: anyhow::Error) -> Self { + ParquetProcessorError::Other(err.to_string()) + } +} + +impl From for ParquetProcessorError { + fn from(err: parquet::errors::ParquetError) -> Self { + ParquetProcessorError::ParquetError(err) + } +} diff --git a/rust/processor/src/bq_analytics/parquet_handler.rs b/rust/processor/src/bq_analytics/parquet_handler.rs new file mode 100644 index 000000000..785bdb8bd --- /dev/null +++ b/rust/processor/src/bq_analytics/parquet_handler.rs @@ -0,0 +1,80 @@ +use crate::{ + bq_analytics::generic_parquet_processor::{ + HasParquetSchema, HasVersion, NamedTable, ParquetDataGeneric, + ParquetHandler as GenericParquetHandler, + }, + gap_detectors::ProcessingResult, + worker::PROCESSOR_SERVICE_TYPE, +}; +use allocative::Allocative; +use google_cloud_storage::client::{Client as GCSClient, ClientConfig as GcsClientConfig}; +use kanal::AsyncSender; +use parquet::record::RecordWriter; +use std::sync::Arc; +use tracing::{debug, error, info}; + +pub fn create_parquet_handler_loop( + new_gap_detector_sender: AsyncSender, + processor_name: &str, + bucket_name: String, + parquet_handler_response_channel_size: usize, + max_buffer_size: usize, +) -> AsyncSender> +where + ParquetType: NamedTable + HasVersion + HasParquetSchema + Send + Sync + 'static + Allocative, + for<'a> &'a [ParquetType]: RecordWriter, +{ + let processor_name = processor_name.to_owned(); + + let (parquet_sender, parquet_receiver) = kanal::bounded_async::>( + parquet_handler_response_channel_size, + ); + + debug!( + processor_name = processor_name.clone(), + service_type = PROCESSOR_SERVICE_TYPE, + "[Parquet Handler] Starting parquet handler loop", + ); + + let mut parquet_manager = GenericParquetHandler::new( + bucket_name.clone(), + new_gap_detector_sender.clone(), + ParquetType::schema(), + ) + .expect("Failed to create parquet manager"); + + tokio::spawn(async move { + let gcs_config = GcsClientConfig::default() + .with_auth() + .await + .expect("Failed to create GCS client config"); + let gcs_client = Arc::new(GCSClient::new(gcs_config)); + + loop { + let txn_pb_res = parquet_receiver.recv().await.unwrap(); // handle error properly + + let result = parquet_manager + .handle(&gcs_client, txn_pb_res, max_buffer_size) + .await; + match result { + Ok(_) => { + info!( + processor_name = processor_name.clone(), + service_type = PROCESSOR_SERVICE_TYPE, + "[Parquet Handler] Successfully processed parquet files", + ); + }, + Err(e) => { + error!( + processor_name = processor_name.clone(), + service_type = PROCESSOR_SERVICE_TYPE, + "[Parquet Handler] Error processing parquet files: {:?}", + e + ); + }, + } + } + }); + + parquet_sender +} diff --git a/rust/processor/src/config.rs b/rust/processor/src/config.rs index 35a5f8cb6..785130334 100644 --- a/rust/processor/src/config.rs +++ b/rust/processor/src/config.rs @@ -2,16 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - gap_detector::DEFAULT_GAP_DETECTION_BATCH_SIZE, processors::ProcessorConfig, + gap_detectors::DEFAULT_GAP_DETECTION_BATCH_SIZE, processors::ProcessorConfig, transaction_filter::TransactionFilter, worker::Worker, }; use ahash::AHashMap; use anyhow::{Context, Result}; use serde::{Deserialize, Serialize}; use server_framework::RunnableConfig; -use std::time::Duration; +use std::{collections::HashSet, time::Duration}; use url::Url; +pub const QUERY_DEFAULT_RETRIES: u32 = 5; +pub const QUERY_DEFAULT_RETRY_DELAY_MS: u64 = 500; + #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct IndexerGrpcProcessorConfig { @@ -33,6 +36,9 @@ pub struct IndexerGrpcProcessorConfig { // Maximum number of batches "missing" before we assume we have an issue with gaps and abort #[serde(default = "IndexerGrpcProcessorConfig::default_gap_detection_batch_size")] pub gap_detection_batch_size: u64, + // Maximum number of batches "missing" before we assume we have an issue with gaps and abort + #[serde(default = "IndexerGrpcProcessorConfig::default_gap_detection_batch_size")] + pub parquet_gap_detection_batch_size: u64, // Number of protobuff transactions to send per chunk to the processor tasks #[serde(default = "IndexerGrpcProcessorConfig::default_pb_channel_txn_chunk_size")] pub pb_channel_txn_chunk_size: usize, @@ -40,8 +46,15 @@ pub struct IndexerGrpcProcessorConfig { #[serde(default = "AHashMap::new")] pub per_table_chunk_sizes: AHashMap, pub enable_verbose_logging: Option, + + #[serde(default = "IndexerGrpcProcessorConfig::default_grpc_response_item_timeout_in_secs")] + pub grpc_response_item_timeout_in_secs: u64, + #[serde(default)] pub transaction_filter: TransactionFilter, + // String vector for deprecated tables to skip db writes + #[serde(default)] + pub deprecated_tables: HashSet, } impl IndexerGrpcProcessorConfig { @@ -49,11 +62,24 @@ impl IndexerGrpcProcessorConfig { DEFAULT_GAP_DETECTION_BATCH_SIZE } + pub const fn default_query_retries() -> u32 { + QUERY_DEFAULT_RETRIES + } + + pub const fn default_query_retry_delay_ms() -> u64 { + QUERY_DEFAULT_RETRY_DELAY_MS + } + /// Make the default very large on purpose so that by default it's not chunked /// This prevents any unexpected changes in behavior pub const fn default_pb_channel_txn_chunk_size() -> usize { 100_000 } + + /// Default timeout for grpc response item in seconds. Defaults to 60 seconds. + pub const fn default_grpc_response_item_timeout_in_secs() -> u64 { + 60 + } } #[async_trait::async_trait] @@ -70,10 +96,13 @@ impl RunnableConfig for IndexerGrpcProcessorConfig { self.number_concurrent_processing_tasks, self.db_pool_size, self.gap_detection_batch_size, + self.parquet_gap_detection_batch_size, self.pb_channel_txn_chunk_size, self.per_table_chunk_sizes.clone(), self.enable_verbose_logging, self.transaction_filter.clone(), + self.grpc_response_item_timeout_in_secs, + self.deprecated_tables.clone(), ) .await .context("Failed to build worker")?; @@ -103,6 +132,9 @@ pub struct IndexerGrpcHttp2Config { /// Indexer GRPC http2 ping timeout in seconds. Defaults to 10. indexer_grpc_http2_ping_timeout_in_secs: u64, + + /// Seconds before timeout for grpc connection. + indexer_grpc_connection_timeout_secs: u64, } impl IndexerGrpcHttp2Config { @@ -113,6 +145,10 @@ impl IndexerGrpcHttp2Config { pub fn grpc_http2_ping_timeout_in_secs(&self) -> Duration { Duration::from_secs(self.indexer_grpc_http2_ping_timeout_in_secs) } + + pub fn grpc_connection_timeout_secs(&self) -> Duration { + Duration::from_secs(self.indexer_grpc_connection_timeout_secs) + } } impl Default for IndexerGrpcHttp2Config { @@ -120,6 +156,7 @@ impl Default for IndexerGrpcHttp2Config { Self { indexer_grpc_http2_ping_interval_in_secs: 30, indexer_grpc_http2_ping_timeout_in_secs: 10, + indexer_grpc_connection_timeout_secs: 5, } } } diff --git a/rust/processor/src/db/common/mod.rs b/rust/processor/src/db/common/mod.rs new file mode 100644 index 000000000..c446ac883 --- /dev/null +++ b/rust/processor/src/db/common/mod.rs @@ -0,0 +1 @@ +pub mod models; diff --git a/rust/processor/src/models/account_transaction_models/account_transactions.rs b/rust/processor/src/db/common/models/account_transaction_models/account_transactions.rs similarity index 99% rename from rust/processor/src/models/account_transaction_models/account_transactions.rs rename to rust/processor/src/db/common/models/account_transaction_models/account_transactions.rs index 6903c7773..fcf0c4666 100644 --- a/rust/processor/src/models/account_transaction_models/account_transactions.rs +++ b/rust/processor/src/db/common/models/account_transaction_models/account_transactions.rs @@ -6,7 +6,7 @@ #![allow(clippy::unused_unit)] use crate::{ - models::{ + db::common::models::{ object_models::v2_object_utils::ObjectWithMetadata, user_transactions_models::user_transactions::UserTransaction, }, diff --git a/rust/processor/src/models/account_transaction_models/mod.rs b/rust/processor/src/db/common/models/account_transaction_models/mod.rs similarity index 100% rename from rust/processor/src/models/account_transaction_models/mod.rs rename to rust/processor/src/db/common/models/account_transaction_models/mod.rs diff --git a/rust/processor/src/models/ans_models/ans_lookup.rs b/rust/processor/src/db/common/models/ans_models/ans_lookup.rs similarity index 100% rename from rust/processor/src/models/ans_models/ans_lookup.rs rename to rust/processor/src/db/common/models/ans_models/ans_lookup.rs diff --git a/rust/processor/src/models/ans_models/ans_lookup_v2.rs b/rust/processor/src/db/common/models/ans_models/ans_lookup_v2.rs similarity index 95% rename from rust/processor/src/models/ans_models/ans_lookup_v2.rs rename to rust/processor/src/db/common/models/ans_models/ans_lookup_v2.rs index 090cb00e5..7b68d7891 100644 --- a/rust/processor/src/models/ans_models/ans_lookup_v2.rs +++ b/rust/processor/src/db/common/models/ans_models/ans_lookup_v2.rs @@ -10,7 +10,7 @@ use super::{ ans_utils::{get_token_name, NameRecordV2, SetReverseLookupEvent, SubdomainExtV2}, }; use crate::{ - models::token_v2_models::v2_token_utils::TokenStandard, + db::common::models::token_v2_models::v2_token_utils::TokenStandard, schema::{ ans_lookup_v2, ans_primary_name_v2, current_ans_lookup_v2, current_ans_primary_name_v2, }, @@ -55,6 +55,7 @@ pub struct CurrentAnsLookupV2 { pub expiration_timestamp: chrono::NaiveDateTime, pub token_name: String, pub is_deleted: bool, + pub subdomain_expiration_policy: Option, } #[derive(Clone, Default, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] @@ -71,6 +72,7 @@ pub struct AnsLookupV2 { pub expiration_timestamp: chrono::NaiveDateTime, pub token_name: String, pub is_deleted: bool, + pub subdomain_expiration_policy: Option, } #[derive( @@ -150,6 +152,7 @@ impl CurrentAnsLookupV2 { expiration_timestamp: v1_current_ans_lookup.expiration_timestamp, token_name: v1_current_ans_lookup.token_name, is_deleted: v1_current_ans_lookup.is_deleted, + subdomain_expiration_policy: None, }, AnsLookupV2 { transaction_version: v1_ans_lookup.transaction_version, @@ -161,6 +164,7 @@ impl CurrentAnsLookupV2 { expiration_timestamp: v1_ans_lookup.expiration_timestamp, token_name: v1_ans_lookup.token_name, is_deleted: v1_ans_lookup.is_deleted, + subdomain_expiration_policy: None, }, ) } @@ -177,12 +181,11 @@ impl CurrentAnsLookupV2 { .unwrap() { // If this resource account has a SubdomainExt, then it's a subdomain - let maybe_subdomain_name = address_to_subdomain_ext + let (subdomain_name, subdomain_expiration_policy) = match address_to_subdomain_ext .get(&standardize_address(write_resource.address.as_str())) - .map(|subdomain_ext| subdomain_ext.get_subdomain_trunc()); - let subdomain_name = match maybe_subdomain_name.clone() { - Some(subdomain) => subdomain, - None => "".to_string(), + { + Some(s) => (s.get_subdomain_trunc(), Some(s.subdomain_expiration_policy)), + None => ("".to_string(), None), }; let token_name = get_token_name( @@ -200,6 +203,7 @@ impl CurrentAnsLookupV2 { token_name: token_name.clone(), last_transaction_version: txn_version, is_deleted: false, + subdomain_expiration_policy, }, AnsLookupV2 { transaction_version: txn_version, @@ -211,6 +215,7 @@ impl CurrentAnsLookupV2 { expiration_timestamp: inner.get_expiration_time(), token_name, is_deleted: false, + subdomain_expiration_policy, }, ))); } diff --git a/rust/processor/src/models/ans_models/ans_utils.rs b/rust/processor/src/db/common/models/ans_models/ans_utils.rs similarity index 87% rename from rust/processor/src/models/ans_models/ans_utils.rs rename to rust/processor/src/db/common/models/ans_models/ans_utils.rs index b1f800531..4f6ee6ed9 100644 --- a/rust/processor/src/models/ans_models/ans_utils.rs +++ b/rust/processor/src/db/common/models/ans_models/ans_utils.rs @@ -5,7 +5,7 @@ #![allow(clippy::extra_unused_lifetimes)] use crate::{ - models::default_models::move_resources::MoveResource, + db::common::models::default_models::move_resources::MoveResource, utils::util::{ bigdecimal_to_u64, deserialize_from_string, parse_timestamp_secs, standardize_address, truncate_str, @@ -38,12 +38,6 @@ pub struct OptionalBigDecimal { vec: Vec, } -impl OptionalBigDecimal { - fn get_big_decimal(&self) -> Option { - self.vec.first().map(|x| x.0.clone()) - } -} - pub fn get_token_name(domain_name: &str, subdomain_name: &str) -> String { let domain = truncate_str(domain_name, DOMAIN_LENGTH); let subdomain = truncate_str(subdomain_name, DOMAIN_LENGTH); @@ -99,10 +93,6 @@ impl NameRecordV1 { parse_timestamp_secs(bigdecimal_to_u64(&self.expiration_time_sec), 0) } - pub fn get_property_version(&self) -> u64 { - bigdecimal_to_u64(&self.property_version) - } - pub fn get_target_address(&self) -> Option { self.target_address .get_string() @@ -181,7 +171,7 @@ impl NameRecordV2 { #[derive(Serialize, Deserialize, Debug, Clone)] pub struct SubdomainExtV2 { - subdomain_expiration_policy: BigDecimal, + pub subdomain_expiration_policy: i64, subdomain_name: String, } @@ -220,7 +210,7 @@ impl AnsWriteResource { ans_v2_contract_address: &str, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); let data = write_resource.data.as_str(); match type_str.clone() { @@ -309,44 +299,6 @@ impl SetReverseLookupEvent { get_token_name(&domain, &subdomain) } - pub fn get_curr_expiration_time(&self) -> Option { - self.curr_expiration_time_secs - .get_big_decimal() - .map(|x| parse_timestamp_secs(bigdecimal_to_u64(&x), 0)) - } - - pub fn get_prev_domain_trunc(&self) -> String { - truncate_str( - self.prev_domain_name - .get_string() - .unwrap_or_default() - .as_str(), - DOMAIN_LENGTH, - ) - } - - pub fn get_prev_subdomain_trunc(&self) -> String { - truncate_str( - self.prev_subdomain_name - .get_string() - .unwrap_or_default() - .as_str(), - DOMAIN_LENGTH, - ) - } - - pub fn get_prev_token_name(&self) -> String { - let domain = self.get_prev_domain_trunc(); - let subdomain = self.get_prev_subdomain_trunc(); - get_token_name(&domain, &subdomain) - } - - pub fn get_prev_expiration_time(&self) -> Option { - self.prev_expiration_time_secs - .get_big_decimal() - .map(|x| parse_timestamp_secs(bigdecimal_to_u64(&x), 0)) - } - pub fn from_event( event: &Event, ans_v2_contract_address: &str, diff --git a/rust/processor/src/models/ans_models/mod.rs b/rust/processor/src/db/common/models/ans_models/mod.rs similarity index 100% rename from rust/processor/src/models/ans_models/mod.rs rename to rust/processor/src/db/common/models/ans_models/mod.rs diff --git a/rust/processor/src/models/coin_models/coin_activities.rs b/rust/processor/src/db/common/models/coin_models/coin_activities.rs similarity index 89% rename from rust/processor/src/models/coin_models/coin_activities.rs rename to rust/processor/src/db/common/models/coin_models/coin_activities.rs index 86e750117..e7467761f 100644 --- a/rust/processor/src/models/coin_models/coin_activities.rs +++ b/rust/processor/src/db/common/models/coin_models/coin_activities.rs @@ -8,11 +8,10 @@ use super::{ coin_balances::{CoinBalance, CurrentCoinBalance}, coin_infos::CoinInfo, - coin_supply::CoinSupply, coin_utils::{CoinEvent, EventGuidResource}, }; use crate::{ - models::{ + db::common::models::{ fungible_asset_models::{ v2_fungible_asset_activities::{ CoinType, CurrentCoinBalancePK, EventToCoinType, BURN_GAS_EVENT_CREATION_NUM, @@ -22,11 +21,13 @@ use crate::{ }, user_transactions_models::signatures::Signature, }, - processors::coin_processor::APTOS_COIN_TYPE_STR, schema::coin_activities, utils::{ counters::PROCESSOR_UNKNOWN_TYPE_COUNT, - util::{get_entry_function_from_user_request, standardize_address, u64_to_bigdecimal}, + util::{ + get_entry_function_from_user_request, standardize_address, u64_to_bigdecimal, + APTOS_COIN_TYPE_STR, + }, }, }; use ahash::AHashMap; @@ -80,7 +81,6 @@ impl CoinActivity { Vec, AHashMap, AHashMap, - Vec, ) { // All the items we want to track let mut coin_activities = Vec::new(); @@ -90,7 +90,7 @@ impl CoinActivity { AHashMap::new(); // This will help us get the coin type when we see coin deposit/withdraw events for coin activities let mut all_event_to_coin_type: EventToCoinType = AHashMap::new(); - let mut all_coin_supply = Vec::new(); + // Extracts events and user request from genesis and user transactions. Other transactions won't have coin events let txn_data = match transaction.txn_data.as_ref() { Some(data) => data, @@ -114,7 +114,6 @@ impl CoinActivity { // The rest are fields common to all transactions let txn_version = transaction.version as i64; - let txn_epoch = transaction.epoch as i64; let block_height = transaction.block_height as i64; let transaction_info = transaction .info @@ -125,6 +124,7 @@ impl CoinActivity { .as_ref() .expect("Transaction timestamp doesn't exist!") .seconds; + #[allow(deprecated)] let txn_timestamp = NaiveDateTime::from_timestamp_opt(txn_timestamp, 0).expect("Txn Timestamp is invalid!"); @@ -168,15 +168,6 @@ impl CoinActivity { (None, None) }; - let maybe_coin_supply = if let WriteSetChangeEnum::WriteTableItem(table_item) = - wsc.change.as_ref().unwrap() - { - CoinSupply::from_write_table_item(table_item, txn_version, txn_timestamp, txn_epoch) - .unwrap() - } else { - None - }; - if let Some(coin_info) = maybe_coin_info { coin_infos.insert(coin_info.coin_type.clone(), coin_info); } @@ -193,9 +184,6 @@ impl CoinActivity { coin_balances.push(coin_balance); all_event_to_coin_type.extend(event_to_coin_type); } - if let Some(coin_supply) = maybe_coin_supply { - all_coin_supply.push(coin_supply); - } } for (index, event) in events.iter().enumerate() { let event_type = event.type_str.clone(); @@ -220,7 +208,6 @@ impl CoinActivity { coin_balances, coin_infos, current_coin_balances, - all_coin_supply, ) } @@ -286,18 +273,10 @@ impl CoinActivity { ) -> Self { let aptos_coin_burned = BigDecimal::from(txn_info.gas_used * user_transaction_request.gas_unit_price); - let signature = user_transaction_request - .signature - .as_ref() - .unwrap_or_else(|| { - tracing::error!( - transaction_version = transaction_version, - "User transaction must have signature" - ); - panic!("User transaction must have signature") - }); - let gas_fee_payer_address = - Signature::get_fee_payer_address(signature, transaction_version); + let gas_fee_payer_address = match user_transaction_request.signature.as_ref() { + Some(signature) => Signature::get_fee_payer_address(signature, transaction_version), + None => None, + }; Self { transaction_version, diff --git a/rust/processor/src/models/coin_models/coin_balances.rs b/rust/processor/src/db/common/models/coin_models/coin_balances.rs similarity index 97% rename from rust/processor/src/models/coin_models/coin_balances.rs rename to rust/processor/src/db/common/models/coin_models/coin_balances.rs index 365730f04..a50691a77 100644 --- a/rust/processor/src/models/coin_models/coin_balances.rs +++ b/rust/processor/src/db/common/models/coin_models/coin_balances.rs @@ -7,7 +7,7 @@ use super::coin_utils::{CoinInfoType, CoinResource}; use crate::{ - models::fungible_asset_models::v2_fungible_asset_activities::EventToCoinType, + db::common::models::fungible_asset_models::v2_fungible_asset_activities::EventToCoinType, schema::{coin_balances, current_coin_balances}, utils::util::standardize_address, }; diff --git a/rust/processor/src/models/coin_models/coin_infos.rs b/rust/processor/src/db/common/models/coin_models/coin_infos.rs similarity index 100% rename from rust/processor/src/models/coin_models/coin_infos.rs rename to rust/processor/src/db/common/models/coin_models/coin_infos.rs diff --git a/rust/processor/src/models/coin_models/coin_supply.rs b/rust/processor/src/db/common/models/coin_models/coin_supply.rs similarity index 95% rename from rust/processor/src/models/coin_models/coin_supply.rs rename to rust/processor/src/db/common/models/coin_models/coin_supply.rs index a6b4503af..f3942dae8 100644 --- a/rust/processor/src/models/coin_models/coin_supply.rs +++ b/rust/processor/src/db/common/models/coin_models/coin_supply.rs @@ -6,8 +6,9 @@ #![allow(clippy::unused_unit)] use crate::{ - models::default_models::move_tables::TableItem, - processors::coin_processor::APTOS_COIN_TYPE_STR, schema::coin_supply, utils::util::hash_str, + db::common::models::default_models::move_tables::TableItem, + schema::coin_supply, + utils::util::{hash_str, APTOS_COIN_TYPE_STR}, }; use anyhow::Context; use aptos_protos::transaction::v1::WriteTableItem; diff --git a/rust/processor/src/models/coin_models/coin_utils.rs b/rust/processor/src/db/common/models/coin_models/coin_utils.rs similarity index 82% rename from rust/processor/src/models/coin_models/coin_utils.rs rename to rust/processor/src/db/common/models/coin_models/coin_utils.rs index 30d1b4d6f..ec46f532c 100644 --- a/rust/processor/src/models/coin_models/coin_utils.rs +++ b/rust/processor/src/db/common/models/coin_models/coin_utils.rs @@ -5,12 +5,13 @@ #![allow(clippy::extra_unused_lifetimes)] use crate::{ - models::default_models::move_resources::MoveResource, + db::common::models::default_models::move_resources::MoveResource, utils::util::{deserialize_from_string, hash_str, standardize_address, truncate_str}, }; -use anyhow::{Context, Result}; -use aptos_protos::transaction::v1::{move_type::Content, MoveType, WriteResource}; +use anyhow::{bail, Context, Result}; +use aptos_protos::transaction::v1::{move_type::Content, DeleteResource, MoveType, WriteResource}; use bigdecimal::BigDecimal; +use once_cell::sync::Lazy; use regex::Regex; use serde::{Deserialize, Serialize}; use tracing::error; @@ -77,13 +78,6 @@ pub struct IntegerWrapperResource { pub vec: Vec, } -impl IntegerWrapperResource { - /// In case we do want to track supply - pub fn get_supply(&self) -> Option { - self.vec.first().map(|inner| inner.value.clone()) - } -} - #[derive(Serialize, Deserialize, Debug, Clone)] pub struct AggregatorResource { pub handle: String, @@ -162,15 +156,22 @@ pub struct CoinInfoType { creator_address: String, } +static RE: Lazy = Lazy::new(|| Regex::new(r"(<(.*)>)").unwrap()); + +static COIN_RESOURCES: Lazy<[String; 2]> = Lazy::new(|| { + [ + format!("{}::coin::CoinInfo", COIN_ADDR), + format!("{}::coin::CoinStore", COIN_ADDR), + ] +}); + impl CoinInfoType { /// get creator address from move_type, and get coin type from move_type_str /// Since move_type_str will contain things we don't need, e.g. 0x1::coin::CoinInfo. We will use /// regex to extract T. pub fn from_move_type(move_type: &MoveType, move_type_str: &str, txn_version: i64) -> Self { if let Content::Struct(struct_tag) = move_type.content.as_ref().unwrap() { - let re = Regex::new(r"(<(.*)>)").unwrap(); - - let matched = re.captures(move_type_str).unwrap_or_else(|| { + let matched = RE.captures(move_type_str).unwrap_or_else(|| { error!( txn_version = txn_version, move_type_str = move_type_str, @@ -221,15 +222,13 @@ impl CoinInfoType { pub enum CoinResource { CoinInfoResource(CoinInfoResource), CoinStoreResource(CoinStoreResource), + CoinInfoDeletion, + CoinStoreDeletion, } impl CoinResource { pub fn is_resource_supported(data_type: &str) -> bool { - [ - format!("{}::coin::CoinInfo", COIN_ADDR), - format!("{}::coin::CoinStore", COIN_ADDR), - ] - .contains(&data_type.to_string()) + COIN_RESOURCES.contains(&data_type.to_string()) } pub fn from_resource( @@ -258,11 +257,27 @@ impl CoinResource { )) } + fn from_delete_resource_internal(data_type: &str, txn_version: i64) -> Result { + match data_type { + x if x == format!("{}::coin::CoinInfo", COIN_ADDR) => { + Ok(CoinResource::CoinInfoDeletion) + }, + x if x == format!("{}::coin::CoinStore", COIN_ADDR) => { + Ok(CoinResource::CoinStoreDeletion) + }, + _ => bail!( + "Resource unsupported! Call is_resource_supported first. version {} type {}", + txn_version, + data_type + ), + } + } + pub fn from_write_resource( write_resource: &WriteResource, txn_version: i64, ) -> Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !CoinResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -278,6 +293,20 @@ impl CoinResource { txn_version, )?)) } + + pub fn from_delete_resource( + delete_resource: &DeleteResource, + txn_version: i64, + ) -> Result> { + let type_str = MoveResource::get_outer_type_from_delete_resource(delete_resource); + if !CoinResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + Ok(Some(Self::from_delete_resource_internal( + &type_str, + txn_version, + )?)) + } } #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/rust/processor/src/models/coin_models/mod.rs b/rust/processor/src/db/common/models/coin_models/mod.rs similarity index 100% rename from rust/processor/src/models/coin_models/mod.rs rename to rust/processor/src/db/common/models/coin_models/mod.rs diff --git a/rust/processor/src/models/default_models/block_metadata_transactions.rs b/rust/processor/src/db/common/models/default_models/block_metadata_transactions.rs similarity index 100% rename from rust/processor/src/models/default_models/block_metadata_transactions.rs rename to rust/processor/src/db/common/models/default_models/block_metadata_transactions.rs diff --git a/rust/processor/src/models/default_models/mod.rs b/rust/processor/src/db/common/models/default_models/mod.rs similarity index 60% rename from rust/processor/src/models/default_models/mod.rs rename to rust/processor/src/db/common/models/default_models/mod.rs index 22f3722d4..d3d54d58f 100644 --- a/rust/processor/src/models/default_models/mod.rs +++ b/rust/processor/src/db/common/models/default_models/mod.rs @@ -7,3 +7,9 @@ pub mod move_resources; pub mod move_tables; pub mod transactions; pub mod write_set_changes; + +// parquet models +pub mod parquet_move_resources; +pub mod parquet_move_tables; +pub mod parquet_transactions; +pub mod parquet_write_set_changes; diff --git a/rust/processor/src/models/default_models/move_modules.rs b/rust/processor/src/db/common/models/default_models/move_modules.rs similarity index 100% rename from rust/processor/src/models/default_models/move_modules.rs rename to rust/processor/src/db/common/models/default_models/move_modules.rs diff --git a/rust/processor/src/models/default_models/move_resources.rs b/rust/processor/src/db/common/models/default_models/move_resources.rs similarity index 90% rename from rust/processor/src/models/default_models/move_resources.rs rename to rust/processor/src/db/common/models/default_models/move_resources.rs index b588aba27..ac73336bd 100644 --- a/rust/processor/src/models/default_models/move_resources.rs +++ b/rust/processor/src/db/common/models/default_models/move_resources.rs @@ -116,7 +116,7 @@ impl MoveResource { } } - pub fn get_outer_type_from_resource(write_resource: &WriteResource) -> String { + pub fn get_outer_type_from_write_resource(write_resource: &WriteResource) -> String { let move_struct_tag = Self::convert_move_struct_tag(write_resource.r#type.as_ref().unwrap()); @@ -127,6 +127,18 @@ impl MoveResource { move_struct_tag.name, ) } + + pub fn get_outer_type_from_delete_resource(delete_resource: &DeleteResource) -> String { + let move_struct_tag = + Self::convert_move_struct_tag(delete_resource.r#type.as_ref().unwrap()); + + format!( + "{}::{}::{}", + move_struct_tag.get_address(), + move_struct_tag.module, + move_struct_tag.name, + ) + } } impl MoveStructTag { diff --git a/rust/processor/src/models/default_models/move_tables.rs b/rust/processor/src/db/common/models/default_models/move_tables.rs similarity index 100% rename from rust/processor/src/models/default_models/move_tables.rs rename to rust/processor/src/db/common/models/default_models/move_tables.rs diff --git a/rust/processor/src/db/common/models/default_models/parquet_move_resources.rs b/rust/processor/src/db/common/models/default_models/parquet_move_resources.rs new file mode 100644 index 000000000..134127add --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/parquet_move_resources.rs @@ -0,0 +1,153 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + bq_analytics::generic_parquet_processor::{HasVersion, NamedTable}, + utils::util::standardize_address, +}; +use allocative_derive::Allocative; +use anyhow::{Context, Result}; +use aptos_protos::transaction::v1::{ + DeleteResource, MoveStructTag as MoveStructTagPB, WriteResource, +}; +use field_count::FieldCount; +use parquet_derive::ParquetRecordWriter; +use serde::{Deserialize, Serialize}; + +#[derive( + Allocative, Clone, Debug, Default, Deserialize, FieldCount, Serialize, ParquetRecordWriter, +)] +pub struct MoveResource { + pub txn_version: i64, + pub write_set_change_index: i64, + pub block_height: i64, + #[allocative(skip)] + pub block_timestamp: chrono::NaiveDateTime, + pub resource_address: String, + pub resource_type: String, + pub module: String, + pub fun: String, + pub is_deleted: bool, + pub generic_type_params: Option, + pub data: Option, + pub state_key_hash: String, +} + +impl NamedTable for MoveResource { + const TABLE_NAME: &'static str = "move_resources"; +} + +impl HasVersion for MoveResource { + fn version(&self) -> i64 { + self.txn_version + } +} + +pub struct MoveStructTag { + resource_address: String, + pub module: String, + pub fun: String, + pub generic_type_params: Option, +} + +impl MoveResource { + pub fn from_write_resource( + write_resource: &WriteResource, + write_set_change_index: i64, + txn_version: i64, + block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> Self { + let parsed_data = Self::convert_move_struct_tag( + write_resource + .r#type + .as_ref() + .expect("MoveStructTag Not Exists."), + ); + Self { + txn_version, + block_height, + write_set_change_index, + resource_type: write_resource.type_str.clone(), + fun: parsed_data.fun.clone(), + resource_address: standardize_address(&write_resource.address.to_string()), + module: parsed_data.module.clone(), + generic_type_params: parsed_data.generic_type_params, + data: Some(write_resource.data.clone()), + is_deleted: false, + state_key_hash: standardize_address( + hex::encode(write_resource.state_key_hash.as_slice()).as_str(), + ), + block_timestamp, + } + } + + pub fn from_delete_resource( + delete_resource: &DeleteResource, + write_set_change_index: i64, + txn_version: i64, + block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> Self { + let parsed_data = Self::convert_move_struct_tag( + delete_resource + .r#type + .as_ref() + .expect("MoveStructTag Not Exists."), + ); + Self { + txn_version, + block_height, + write_set_change_index, + resource_type: delete_resource.type_str.clone(), + fun: parsed_data.fun.clone(), + resource_address: standardize_address(&delete_resource.address.to_string()), + module: parsed_data.module.clone(), + generic_type_params: parsed_data.generic_type_params, + data: None, + is_deleted: true, + state_key_hash: standardize_address( + hex::encode(delete_resource.state_key_hash.as_slice()).as_str(), + ), + block_timestamp, + } + } + + pub fn convert_move_struct_tag(struct_tag: &MoveStructTagPB) -> MoveStructTag { + MoveStructTag { + resource_address: standardize_address(struct_tag.address.as_str()), + module: struct_tag.module.to_string(), + fun: struct_tag.name.to_string(), + generic_type_params: struct_tag + .generic_type_params + .iter() + .map(|move_type| -> Result> { + Ok(Some( + serde_json::to_string(move_type).context("Failed to parse move type")?, + )) + }) + .collect::>>() + .unwrap_or(None), + } + } + + pub fn get_outer_type_from_resource(write_resource: &WriteResource) -> String { + let move_struct_tag = + Self::convert_move_struct_tag(write_resource.r#type.as_ref().unwrap()); + + format!( + "{}::{}::{}", + move_struct_tag.get_address(), + move_struct_tag.module, + move_struct_tag.fun, + ) + } +} + +impl MoveStructTag { + pub fn get_address(&self) -> String { + standardize_address(self.resource_address.as_str()) + } +} diff --git a/rust/processor/src/db/common/models/default_models/parquet_move_tables.rs b/rust/processor/src/db/common/models/default_models/parquet_move_tables.rs new file mode 100644 index 000000000..014f00fef --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/parquet_move_tables.rs @@ -0,0 +1,139 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + bq_analytics::generic_parquet_processor::{HasVersion, NamedTable}, + utils::util::{hash_str, standardize_address}, +}; +use allocative_derive::Allocative; +use aptos_protos::transaction::v1::{DeleteTableItem, WriteTableItem}; +use field_count::FieldCount; +use parquet_derive::ParquetRecordWriter; +use serde::{Deserialize, Serialize}; + +#[derive( + Allocative, Clone, Debug, Default, Deserialize, FieldCount, Serialize, ParquetRecordWriter, +)] +pub struct TableItem { + pub txn_version: i64, + #[allocative(skip)] + pub block_timestamp: chrono::NaiveDateTime, + pub write_set_change_index: i64, + pub transaction_block_height: i64, + pub table_key: String, + pub table_handle: String, + pub decoded_key: String, + pub decoded_value: Option, + pub is_deleted: bool, +} + +impl NamedTable for TableItem { + const TABLE_NAME: &'static str = "table_items"; +} + +impl HasVersion for TableItem { + fn version(&self) -> i64 { + self.txn_version + } +} +#[derive(Clone, Debug, Deserialize, FieldCount, Serialize)] +pub struct CurrentTableItem { + pub table_handle: String, + pub key_hash: String, + pub key: String, + pub decoded_key: serde_json::Value, + pub decoded_value: Option, + pub last_transaction_version: i64, + pub is_deleted: bool, +} +#[derive(Clone, Debug, Deserialize, FieldCount, Serialize)] +pub struct TableMetadata { + pub handle: String, + pub key_type: String, + pub value_type: String, +} + +impl TableItem { + pub fn from_write_table_item( + write_table_item: &WriteTableItem, + write_set_change_index: i64, + txn_version: i64, + transaction_block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> (Self, CurrentTableItem) { + ( + Self { + txn_version, + write_set_change_index, + transaction_block_height, + table_key: write_table_item.key.to_string(), + table_handle: standardize_address(&write_table_item.handle.to_string()), + decoded_key: write_table_item.data.as_ref().unwrap().key.clone(), + decoded_value: Some(write_table_item.data.as_ref().unwrap().value.clone()), + is_deleted: false, + block_timestamp, + }, + CurrentTableItem { + table_handle: standardize_address(&write_table_item.handle.to_string()), + key_hash: hash_str(&write_table_item.key.to_string()), + key: write_table_item.key.to_string(), + decoded_key: serde_json::from_str( + write_table_item.data.as_ref().unwrap().key.as_str(), + ) + .unwrap(), + decoded_value: serde_json::from_str( + write_table_item.data.as_ref().unwrap().value.as_str(), + ) + .unwrap(), + last_transaction_version: txn_version, + is_deleted: false, + }, + ) + } + + pub fn from_delete_table_item( + delete_table_item: &DeleteTableItem, + write_set_change_index: i64, + txn_version: i64, + transaction_block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> (Self, CurrentTableItem) { + ( + Self { + txn_version, + write_set_change_index, + transaction_block_height, + table_key: delete_table_item.key.to_string(), + table_handle: standardize_address(&delete_table_item.handle.to_string()), + decoded_key: delete_table_item.data.as_ref().unwrap().key.clone(), + decoded_value: None, + is_deleted: true, + block_timestamp, + }, + CurrentTableItem { + table_handle: standardize_address(&delete_table_item.handle.to_string()), + key_hash: hash_str(&delete_table_item.key.to_string()), + key: delete_table_item.key.to_string(), + decoded_key: serde_json::from_str( + delete_table_item.data.as_ref().unwrap().key.as_str(), + ) + .unwrap(), + decoded_value: None, + last_transaction_version: txn_version, + is_deleted: true, + }, + ) + } +} + +impl TableMetadata { + pub fn from_write_table_item(table_item: &WriteTableItem) -> Self { + Self { + handle: table_item.handle.to_string(), + key_type: table_item.data.as_ref().unwrap().key_type.clone(), + value_type: table_item.data.as_ref().unwrap().value_type.clone(), + } + } +} diff --git a/rust/processor/src/db/common/models/default_models/parquet_transactions.rs b/rust/processor/src/db/common/models/default_models/parquet_transactions.rs new file mode 100644 index 000000000..7a18d621a --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/parquet_transactions.rs @@ -0,0 +1,374 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + block_metadata_transactions::BlockMetadataTransaction, + parquet_write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel}, +}; +use crate::{ + bq_analytics::generic_parquet_processor::{HasVersion, NamedTable}, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + util::{get_clean_payload, get_clean_writeset, get_payload_type, standardize_address}, + }, +}; +use ahash::AHashMap; +use allocative_derive::Allocative; +use aptos_protos::transaction::v1::{ + transaction::{TransactionType, TxnData}, + Transaction as TransactionPB, TransactionInfo, +}; +use field_count::FieldCount; +use parquet_derive::ParquetRecordWriter; +use serde::{Deserialize, Serialize}; + +#[derive( + Allocative, Clone, Debug, Default, Deserialize, FieldCount, Serialize, ParquetRecordWriter, +)] +pub struct Transaction { + pub txn_version: i64, + pub block_height: i64, + pub epoch: i64, + pub txn_type: String, + pub payload: Option, + pub payload_type: Option, + pub gas_used: u64, + pub success: bool, + pub vm_status: String, + pub num_events: i64, + pub num_write_set_changes: i64, + pub txn_hash: String, + pub state_change_hash: String, + pub event_root_hash: String, + pub state_checkpoint_hash: Option, + pub accumulator_root_hash: String, + #[allocative(skip)] + pub block_timestamp: chrono::NaiveDateTime, +} + +impl NamedTable for Transaction { + const TABLE_NAME: &'static str = "transactions"; +} + +impl HasVersion for Transaction { + fn version(&self) -> i64 { + self.txn_version + } +} + +impl Transaction { + fn from_transaction_info( + info: &TransactionInfo, + txn_version: i64, + epoch: i64, + block_height: i64, + ) -> Self { + Self { + txn_version, + block_height, + txn_hash: standardize_address(hex::encode(info.hash.as_slice()).as_str()), + state_change_hash: standardize_address( + hex::encode(info.state_change_hash.as_slice()).as_str(), + ), + event_root_hash: standardize_address( + hex::encode(info.event_root_hash.as_slice()).as_str(), + ), + state_checkpoint_hash: info + .state_checkpoint_hash + .as_ref() + .map(|hash| standardize_address(hex::encode(hash).as_str())), + gas_used: info.gas_used, + success: info.success, + vm_status: info.vm_status.clone(), + accumulator_root_hash: standardize_address( + hex::encode(info.accumulator_root_hash.as_slice()).as_str(), + ), + num_write_set_changes: info.changes.len() as i64, + epoch, + ..Default::default() + } + } + + fn from_transaction_info_with_data( + info: &TransactionInfo, + payload: Option, + payload_type: Option, + txn_version: i64, + txn_type: String, + num_events: i64, + block_height: i64, + epoch: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> Self { + Self { + txn_type, + payload, + txn_version, + block_height, + txn_hash: standardize_address(hex::encode(info.hash.as_slice()).as_str()), + state_change_hash: standardize_address( + hex::encode(info.state_change_hash.as_slice()).as_str(), + ), + event_root_hash: standardize_address( + hex::encode(info.event_root_hash.as_slice()).as_str(), + ), + state_checkpoint_hash: info + .state_checkpoint_hash + .as_ref() + .map(|hash| standardize_address(hex::encode(hash).as_str())), + gas_used: info.gas_used, + success: info.success, + vm_status: info.vm_status.clone(), + accumulator_root_hash: standardize_address( + hex::encode(info.accumulator_root_hash.as_slice()).as_str(), + ), + num_events, + num_write_set_changes: info.changes.len() as i64, + epoch, + payload_type, + block_timestamp, + } + } + + pub fn from_transaction( + transaction: &TransactionPB, + ) -> ( + Self, + Option, + Vec, + Vec, + ) { + let block_height = transaction.block_height as i64; + let epoch = transaction.epoch as i64; + let transaction_info = transaction + .info + .as_ref() + .expect("Transaction info doesn't exist!"); + let txn_data = match transaction.txn_data.as_ref() { + Some(txn_data) => txn_data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["Transaction"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + let transaction_out = Self::from_transaction_info( + transaction_info, + transaction.version as i64, + epoch, + block_height, + ); + return (transaction_out, None, Vec::new(), Vec::new()); + }, + }; + let txn_version = transaction.version as i64; + let transaction_type = TransactionType::try_from(transaction.r#type) + .expect("Transaction type doesn't exist!") + .as_str_name() + .to_string(); + let timestamp = transaction + .timestamp + .as_ref() + .expect("Transaction timestamp doesn't exist!"); + #[allow(deprecated)] + let block_timestamp = chrono::NaiveDateTime::from_timestamp_opt(timestamp.seconds, 0) + .expect("Txn Timestamp is invalid!"); + match txn_data { + TxnData::User(user_txn) => { + let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( + &transaction_info.changes, + txn_version, + block_height, + block_timestamp, + ); + let payload = user_txn + .request + .as_ref() + .expect("Getting user request failed.") + .payload + .as_ref() + .expect("Getting payload failed."); + let payload_cleaned = get_clean_payload(payload, txn_version); + let payload_type = get_payload_type(payload); + + // let serialized_payload = serde_json::to_string(&payload_cleaned).unwrap(); // Handle errors as needed) + let serialized_payload = + payload_cleaned.map(|payload| canonical_json::to_string(&payload).unwrap()); + ( + Self::from_transaction_info_with_data( + transaction_info, + serialized_payload, + Some(payload_type), + txn_version, + transaction_type, + user_txn.events.len() as i64, + block_height, + epoch, + block_timestamp, + ), + None, + wsc, + wsc_detail, + ) + }, + TxnData::Genesis(genesis_txn) => { + let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( + &transaction_info.changes, + txn_version, + block_height, + block_timestamp, + ); + let payload = genesis_txn.payload.as_ref().unwrap(); + let payload_cleaned = get_clean_writeset(payload, txn_version); + // It's genesis so no big deal + // let serialized_payload = serde_json::to_string(&payload_cleaned).unwrap(); // Handle errors as needed + let serialized_payload = + payload_cleaned.map(|payload| canonical_json::to_string(&payload).unwrap()); + + let payload_type = None; + ( + Self::from_transaction_info_with_data( + transaction_info, + serialized_payload, + payload_type, + txn_version, + transaction_type, + genesis_txn.events.len() as i64, + block_height, + epoch, + block_timestamp, + ), + None, + wsc, + wsc_detail, + ) + }, + TxnData::BlockMetadata(block_metadata_txn) => { + let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( + &transaction_info.changes, + txn_version, + block_height, + block_timestamp, + ); + ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + txn_version, + transaction_type, + block_metadata_txn.events.len() as i64, + block_height, + epoch, + block_timestamp, + ), + Some(BlockMetadataTransaction::from_transaction( + block_metadata_txn, + txn_version, + block_height, + epoch, + timestamp, + )), + wsc, + wsc_detail, + ) + }, + TxnData::StateCheckpoint(_) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + txn_version, + transaction_type, + 0, + block_height, + epoch, + block_timestamp, + ), + None, + vec![], + vec![], + ), + TxnData::Validator(_) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + txn_version, + transaction_type, + 0, + block_height, + epoch, + block_timestamp, + ), + None, + vec![], + vec![], + ), + TxnData::BlockEpilogue(_) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + txn_version, + transaction_type, + 0, + block_height, + epoch, + block_timestamp, + ), + None, + vec![], + vec![], + ), + } + } + + pub fn from_transactions( + transactions: &[TransactionPB], + transaction_version_to_struct_count: &mut AHashMap, + ) -> ( + Vec, + Vec, + Vec, + Vec, + ) { + let mut txns = vec![]; + let mut block_metadata_txns = vec![]; + let mut wscs = vec![]; + let mut wsc_details = vec![]; + + for txn in transactions { + let (txn, block_metadata, mut wsc_list, mut wsc_detail_list) = + Self::from_transaction(txn); + txns.push(txn.clone()); + transaction_version_to_struct_count + .entry(txn.txn_version) + .and_modify(|e| *e += 1) + .or_insert(1); + + if let Some(a) = block_metadata { + block_metadata_txns.push(a.clone()); + // transaction_version_to_struct_count.entry(a.version).and_modify(|e| *e += 1); + } + wscs.append(&mut wsc_list); + + if !wsc_list.is_empty() { + transaction_version_to_struct_count + .entry(wsc_list[0].txn_version) + .and_modify(|e| *e += wsc_list.len() as i64); + } + wsc_details.append(&mut wsc_detail_list); + } + (txns, block_metadata_txns, wscs, wsc_details) + } +} + +// Prevent conflicts with other things named `Transaction` +pub type TransactionModel = Transaction; diff --git a/rust/processor/src/db/common/models/default_models/parquet_write_set_changes.rs b/rust/processor/src/db/common/models/default_models/parquet_write_set_changes.rs new file mode 100644 index 000000000..8507a20cb --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/parquet_write_set_changes.rs @@ -0,0 +1,254 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use super::{ + move_modules::MoveModule, + parquet_move_resources::MoveResource, + parquet_move_tables::{CurrentTableItem, TableItem, TableMetadata}, +}; +use crate::{ + bq_analytics::generic_parquet_processor::{HasVersion, NamedTable}, + utils::util::standardize_address, +}; +use allocative_derive::Allocative; +use aptos_protos::transaction::v1::{ + write_set_change::{Change as WriteSetChangeEnum, Type as WriteSetChangeTypeEnum}, + WriteSetChange as WriteSetChangePB, +}; +use field_count::FieldCount; +use parquet_derive::ParquetRecordWriter; +use serde::{Deserialize, Serialize}; + +#[derive(Allocative, Clone, Debug, Deserialize, FieldCount, Serialize, ParquetRecordWriter)] +pub struct WriteSetChange { + pub txn_version: i64, + pub write_set_change_index: i64, + pub state_key_hash: String, + pub change_type: String, + pub resource_address: String, + pub block_height: i64, + #[allocative(skip)] + pub block_timestamp: chrono::NaiveDateTime, +} + +impl NamedTable for WriteSetChange { + const TABLE_NAME: &'static str = "write_set_changes"; +} + +impl HasVersion for WriteSetChange { + fn version(&self) -> i64 { + self.txn_version + } +} + +impl Default for WriteSetChange { + fn default() -> Self { + Self { + txn_version: 0, + write_set_change_index: 0, + state_key_hash: "".to_string(), + change_type: "".to_string(), + resource_address: "".to_string(), + block_height: 0, + #[allow(deprecated)] + block_timestamp: chrono::NaiveDateTime::from_timestamp(0, 0), + } + } +} + +impl WriteSetChange { + pub fn from_write_set_change( + write_set_change: &WriteSetChangePB, + write_set_change_index: i64, + txn_version: i64, + block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> (Self, WriteSetChangeDetail) { + let change_type = Self::get_write_set_change_type(write_set_change); + let change = write_set_change + .change + .as_ref() + .expect("WriteSetChange must have a change"); + match change { + WriteSetChangeEnum::WriteModule(inner) => ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: standardize_address(&inner.address.to_string()), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Module(MoveModule::from_write_module( + inner, + write_set_change_index, + txn_version, + block_height, + )), + ), + WriteSetChangeEnum::DeleteModule(inner) => ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: standardize_address(&inner.address.to_string()), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Module(MoveModule::from_delete_module( + inner, + write_set_change_index, + txn_version, + block_height, + )), + ), + WriteSetChangeEnum::WriteResource(inner) => ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: standardize_address(&inner.address.to_string()), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Resource(MoveResource::from_write_resource( + inner, + write_set_change_index, + txn_version, + block_height, + block_timestamp, + )), + ), + WriteSetChangeEnum::DeleteResource(inner) => ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: standardize_address(&inner.address.to_string()), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Resource(MoveResource::from_delete_resource( + inner, + write_set_change_index, + txn_version, + block_height, + block_timestamp, + )), + ), + WriteSetChangeEnum::WriteTableItem(inner) => { + let (ti, cti) = TableItem::from_write_table_item( + inner, + write_set_change_index, + txn_version, + block_height, + block_timestamp, + ); + ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: String::default(), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Table( + ti, + cti, + Some(TableMetadata::from_write_table_item(inner)), + ), + ) + }, + WriteSetChangeEnum::DeleteTableItem(inner) => { + let (ti, cti) = TableItem::from_delete_table_item( + inner, + write_set_change_index, + txn_version, + block_height, + block_timestamp, + ); + ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: String::default(), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Table(ti, cti, None), + ) + }, + } + } + + pub fn from_write_set_changes( + write_set_changes: &[WriteSetChangePB], + txn_version: i64, + block_height: i64, + timestamp: chrono::NaiveDateTime, + ) -> (Vec, Vec) { + write_set_changes + .iter() + .enumerate() + .map(|(write_set_change_index, write_set_change)| { + Self::from_write_set_change( + write_set_change, + write_set_change_index as i64, + txn_version, + block_height, + timestamp, + ) + }) + .collect::>() + .into_iter() + .unzip() + } + + fn get_write_set_change_type(t: &WriteSetChangePB) -> String { + match WriteSetChangeTypeEnum::try_from(t.r#type) + .expect("WriteSetChange must have a valid type.") + { + WriteSetChangeTypeEnum::DeleteModule => "delete_module".to_string(), + WriteSetChangeTypeEnum::DeleteResource => "delete_resource".to_string(), + WriteSetChangeTypeEnum::DeleteTableItem => "delete_table_item".to_string(), + WriteSetChangeTypeEnum::WriteModule => "write_module".to_string(), + WriteSetChangeTypeEnum::WriteResource => "write_resource".to_string(), + WriteSetChangeTypeEnum::WriteTableItem => "write_table_item".to_string(), + WriteSetChangeTypeEnum::Unspecified => { + panic!("WriteSetChange type must be specified.") + }, + } + } +} + +#[derive(Deserialize, Serialize)] +pub enum WriteSetChangeDetail { + Module(MoveModule), + Resource(MoveResource), + Table(TableItem, CurrentTableItem, Option), +} + +// Prevent conflicts with other things named `WriteSetChange` +pub type WriteSetChangeModel = WriteSetChange; diff --git a/rust/processor/src/models/default_models/transactions.rs b/rust/processor/src/db/common/models/default_models/transactions.rs similarity index 84% rename from rust/processor/src/models/default_models/transactions.rs rename to rust/processor/src/db/common/models/default_models/transactions.rs index dade5448f..5b4c17eb2 100644 --- a/rust/processor/src/models/default_models/transactions.rs +++ b/rust/processor/src/db/common/models/default_models/transactions.rs @@ -186,28 +186,33 @@ impl Transaction { .timestamp .as_ref() .expect("Transaction timestamp doesn't exist!"); + + let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( + &transaction_info.changes, + version, + block_height, + ); + match txn_data { TxnData::User(user_txn) => { - let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( - &transaction_info.changes, - version, - block_height, - ); - let payload = user_txn + let request = &user_txn .request .as_ref() - .expect("Getting user request failed.") - .payload - .as_ref() - .expect("Getting payload failed."); - let payload_cleaned = get_clean_payload(payload, version); - let payload_type = get_payload_type(payload); + .expect("Getting user request failed."); + + let (payload_cleaned, payload_type) = match request.payload.as_ref() { + Some(payload) => { + let payload_cleaned = get_clean_payload(payload, version); + (payload_cleaned, Some(get_payload_type(payload))) + }, + None => (None, None), + }; ( Self::from_transaction_info_with_data( transaction_info, payload_cleaned, - Some(payload_type), + payload_type, version, transaction_type, user_txn.events.len() as i64, @@ -220,13 +225,11 @@ impl Transaction { ) }, TxnData::Genesis(genesis_txn) => { - let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( - &transaction_info.changes, - version, - block_height, - ); - let payload = genesis_txn.payload.as_ref().unwrap(); - let payload_cleaned = get_clean_writeset(payload, version); + let payload_cleaned = genesis_txn + .payload + .as_ref() + .map(|payload| get_clean_writeset(payload, version)) + .unwrap_or(None); // It's genesis so no big deal let payload_type = None; ( @@ -245,34 +248,27 @@ impl Transaction { wsc_detail, ) }, - TxnData::BlockMetadata(block_metadata_txn) => { - let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( - &transaction_info.changes, + TxnData::BlockMetadata(block_metadata_txn) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, version, + transaction_type, + block_metadata_txn.events.len() as i64, block_height, - ); - ( - Self::from_transaction_info_with_data( - transaction_info, - None, - None, - version, - transaction_type, - block_metadata_txn.events.len() as i64, - block_height, - epoch, - ), - Some(BlockMetadataTransaction::from_transaction( - block_metadata_txn, - version, - block_height, - epoch, - timestamp, - )), - wsc, - wsc_detail, - ) - }, + epoch, + ), + Some(BlockMetadataTransaction::from_transaction( + block_metadata_txn, + version, + block_height, + epoch, + timestamp, + )), + wsc, + wsc_detail, + ), TxnData::StateCheckpoint(_) => ( Self::from_transaction_info_with_data( transaction_info, @@ -303,6 +299,21 @@ impl Transaction { vec![], vec![], ), + TxnData::BlockEpilogue(_) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + version, + transaction_type, + 0, + block_height, + epoch, + ), + None, + vec![], + vec![], + ), } } diff --git a/rust/processor/src/models/default_models/write_set_changes.rs b/rust/processor/src/db/common/models/default_models/write_set_changes.rs similarity index 99% rename from rust/processor/src/models/default_models/write_set_changes.rs rename to rust/processor/src/db/common/models/default_models/write_set_changes.rs index 79d271fcb..c28a97c51 100644 --- a/rust/processor/src/models/default_models/write_set_changes.rs +++ b/rust/processor/src/db/common/models/default_models/write_set_changes.rs @@ -44,6 +44,7 @@ impl WriteSetChange { .change .as_ref() .expect("WriteSetChange must have a change"); + match change { WriteSetChangeEnum::WriteModule(inner) => ( Self { diff --git a/rust/processor/src/db/common/models/events_models/events.rs b/rust/processor/src/db/common/models/events_models/events.rs new file mode 100644 index 000000000..15fc2f7bd --- /dev/null +++ b/rust/processor/src/db/common/models/events_models/events.rs @@ -0,0 +1,163 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + schema::events, + utils::util::{standardize_address, truncate_str}, +}; +use aptos_protos::transaction::v1::Event as EventPB; +use field_count::FieldCount; +use get_size::GetSize; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +// p99 currently is 303 so using 300 as a safe max length +const EVENT_TYPE_MAX_LENGTH: usize = 300; + +#[derive( + Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, Eq, PartialEq, +)] +#[diesel(primary_key(transaction_version, event_index))] +#[diesel(table_name = events)] +pub struct Event { + pub sequence_number: i64, + pub creation_number: i64, + pub account_address: String, + pub transaction_version: i64, + pub transaction_block_height: i64, + pub type_: String, + pub data: serde_json::Value, + pub event_index: i64, + pub indexed_type: String, +} + +impl Event { + pub fn from_event( + event: &EventPB, + transaction_version: i64, + transaction_block_height: i64, + event_index: i64, + ) -> Self { + let t: &str = event.type_str.as_ref(); + Event { + account_address: standardize_address( + event.key.as_ref().unwrap().account_address.as_str(), + ), + creation_number: event.key.as_ref().unwrap().creation_number as i64, + sequence_number: event.sequence_number as i64, + transaction_version, + transaction_block_height, + type_: t.to_string(), + data: serde_json::from_str(event.data.as_str()).unwrap(), + event_index, + indexed_type: truncate_str(t, EVENT_TYPE_MAX_LENGTH), + } + } + + pub fn from_events( + events: &[EventPB], + transaction_version: i64, + transaction_block_height: i64, + ) -> Vec { + events + .iter() + .enumerate() + .map(|(index, event)| { + Self::from_event( + event, + transaction_version, + transaction_block_height, + index as i64, + ) + }) + .collect::>() + } +} + +// Prevent conflicts with other things named `Event` +pub type EventModel = Event; + +#[derive(Clone, Debug, GetSize, Deserialize, Serialize, Eq, PartialEq)] +pub struct EventContext { + pub coin_type: String, +} + +#[derive(Clone, Debug, GetSize, Serialize, Deserialize, Eq, PartialEq)] +pub struct EventStreamMessage { + pub sequence_number: i64, + pub creation_number: i64, + pub account_address: String, + pub transaction_version: i64, + pub transaction_block_height: i64, + pub type_: String, + #[get_size(size_fn = get_serde_json_size_estimate)] + pub data: serde_json::Value, + pub event_index: i64, + pub indexed_type: String, + #[get_size(size = 12)] + pub transaction_timestamp: chrono::NaiveDateTime, + pub context: Option, +} + +fn get_serde_json_size_estimate(value: &serde_json::Value) -> usize { + match value { + serde_json::Value::Null => 0, + serde_json::Value::Bool(_) => 1, + serde_json::Value::Number(_) => 8, + serde_json::Value::String(s) => s.len(), + serde_json::Value::Array(arr) => arr.iter().map(get_serde_json_size_estimate).sum(), + serde_json::Value::Object(obj) => obj + .iter() + .map(|(k, v)| k.len() + get_serde_json_size_estimate(v)) + .sum(), + } +} + +impl EventStreamMessage { + pub fn from_event( + event: &Event, + context: Option, + transaction_timestamp: chrono::NaiveDateTime, + ) -> Self { + EventStreamMessage { + account_address: event.account_address.clone(), + creation_number: event.creation_number, + sequence_number: event.sequence_number, + transaction_version: event.transaction_version, + transaction_block_height: event.transaction_block_height, + type_: event.type_.clone(), + data: event.data.clone(), + event_index: event.event_index, + indexed_type: event.indexed_type.clone(), + transaction_timestamp, + context: context.clone(), + } + } +} + +#[derive(Clone, Debug, GetSize, Serialize, Deserialize, Eq, PartialEq)] +pub struct CachedEvents { + pub transaction_version: i64, + pub events: Vec>, +} + +impl CachedEvents { + pub fn from_event_stream_message( + transaction_version: i64, + event_stream_message: Vec>, + ) -> Self { + CachedEvents { + transaction_version, + events: event_stream_message.clone(), + } + } + + pub fn empty(transaction_version: i64) -> Self { + CachedEvents { + transaction_version, + events: Vec::with_capacity(0), + } + } +} diff --git a/rust/processor/src/models/events_models/mod.rs b/rust/processor/src/db/common/models/events_models/mod.rs similarity index 100% rename from rust/processor/src/models/events_models/mod.rs rename to rust/processor/src/db/common/models/events_models/mod.rs diff --git a/rust/processor/src/models/fungible_asset_models/mod.rs b/rust/processor/src/db/common/models/fungible_asset_models/mod.rs similarity index 100% rename from rust/processor/src/models/fungible_asset_models/mod.rs rename to rust/processor/src/db/common/models/fungible_asset_models/mod.rs diff --git a/rust/processor/src/models/fungible_asset_models/v2_fungible_asset_activities.rs b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_activities.rs similarity index 88% rename from rust/processor/src/models/fungible_asset_models/v2_fungible_asset_activities.rs rename to rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_activities.rs index a2566892b..c4ee7880b 100644 --- a/rust/processor/src/models/fungible_asset_models/v2_fungible_asset_activities.rs +++ b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_activities.rs @@ -5,12 +5,9 @@ #![allow(clippy::extra_unused_lifetimes)] #![allow(clippy::unused_unit)] -use super::{ - v2_fungible_asset_utils::{FeeStatement, FungibleAssetEvent}, - v2_fungible_metadata::FungibleAssetMetadataModel, -}; +use super::v2_fungible_asset_utils::{FeeStatement, FungibleAssetEvent}; use crate::{ - models::{ + db::common::models::{ coin_models::{ coin_activities::CoinActivity, coin_utils::{CoinEvent, CoinInfoType, EventGuidResource}, @@ -19,7 +16,7 @@ use crate::{ token_v2_models::v2_token_utils::TokenStandard, }, schema::fungible_asset_activities, - utils::{database::PgPoolConnection, util::standardize_address}, + utils::util::standardize_address, }; use ahash::AHashMap; use anyhow::Context; @@ -70,7 +67,6 @@ impl FungibleAssetActivity { event_index: i64, entry_function_id_str: &Option, object_aggregated_data_mapping: &ObjectAggregatedDataMapping, - conn: &mut PgPoolConnection<'_>, ) -> anyhow::Result> { let event_type = event.type_str.clone(); if let Some(fa_event) = @@ -84,17 +80,6 @@ impl FungibleAssetActivity { let object_core = &object_metadata.object.object_core; let fungible_asset = object_metadata.fungible_asset_store.as_ref().unwrap(); let asset_type = fungible_asset.metadata.get_reference_address(); - // If it's a fungible token, return early - if !FungibleAssetMetadataModel::is_address_fungible_asset( - conn, - &asset_type, - object_aggregated_data_mapping, - txn_version, - ) - .await - { - return Ok(None); - } let (is_frozen, amount) = match fa_event { FungibleAssetEvent::WithdrawEvent(inner) => (None, Some(inner.amount.clone())), @@ -146,15 +131,18 @@ impl FungibleAssetActivity { addr: standardize_address(event_key.account_address.as_str()), creation_num: event_key.creation_number as i64, }; - let coin_type = - event_to_coin_type - .get(&event_move_guid) - .unwrap_or_else(|| { - panic!( - "Could not find event in resources (CoinStore), version: {}, event guid: {:?}, mapping: {:?}", - txn_version, event_move_guid, event_to_coin_type - ) - }).clone(); + // Given this mapping only contains coin type < 1000 length, we should not assume that the mapping exists. + // If it doesn't exist, skip. + let coin_type = match event_to_coin_type.get(&event_move_guid) { + Some(coin_type) => coin_type.clone(), + None => { + tracing::warn!( + "Could not find event in resources (CoinStore), version: {}, event guid: {:?}, mapping: {:?}", + txn_version, event_move_guid, event_to_coin_type + ); + return Ok(None); + }, + }; let storage_id = CoinInfoType::get_storage_id(coin_type.as_str(), event_move_guid.addr.as_str()); Ok(Some(Self { diff --git a/rust/processor/src/models/fungible_asset_models/v2_fungible_asset_balances.rs b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_balances.rs similarity index 53% rename from rust/processor/src/models/fungible_asset_models/v2_fungible_asset_balances.rs rename to rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_balances.rs index 0da57d55d..33819b470 100644 --- a/rust/processor/src/models/fungible_asset_models/v2_fungible_asset_balances.rs +++ b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_balances.rs @@ -7,25 +7,28 @@ use super::{ v2_fungible_asset_activities::EventToCoinType, v2_fungible_asset_utils::FungibleAssetStore, - v2_fungible_metadata::FungibleAssetMetadataModel, }; use crate::{ - models::{ + db::common::models::{ coin_models::coin_utils::{CoinInfoType, CoinResource}, object_models::v2_object_utils::ObjectAggregatedDataMapping, - token_v2_models::v2_token_utils::TokenStandard, + token_v2_models::v2_token_utils::{TokenStandard, V2_STANDARD}, + }, + schema::{ + current_fungible_asset_balances, current_unified_fungible_asset_balances_to_be_renamed, + fungible_asset_balances, + }, + utils::util::{ + hex_to_raw_bytes, sha3_256, standardize_address, APTOS_COIN_TYPE_STR, + APT_METADATA_ADDRESS_HEX, APT_METADATA_ADDRESS_RAW, }, - schema::{current_fungible_asset_balances, fungible_asset_balances}, - utils::{database::PgPoolConnection, util::standardize_address}, }; use ahash::AHashMap; -use aptos_protos::transaction::v1::WriteResource; -use bigdecimal::BigDecimal; +use aptos_protos::transaction::v1::{DeleteResource, WriteResource}; +use bigdecimal::{BigDecimal, Zero}; use field_count::FieldCount; -use hex::FromHex; use serde::{Deserialize, Serialize}; -use sha2::Digest; -use sha3::Sha3_256; +use std::borrow::Borrow; // Storage id pub type CurrentFungibleAssetBalancePK = String; @@ -63,6 +66,86 @@ pub struct CurrentFungibleAssetBalance { pub token_standard: String, } +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, Default)] +#[diesel(primary_key(storage_id))] +#[diesel(table_name = current_unified_fungible_asset_balances_to_be_renamed)] +#[diesel(treat_none_as_null = true)] +pub struct CurrentUnifiedFungibleAssetBalance { + pub storage_id: String, + pub owner_address: String, + // metadata address for (paired) Fungible Asset + pub asset_type_v1: Option, + pub asset_type_v2: Option, + pub is_primary: Option, + pub is_frozen: bool, + pub amount_v1: Option, + pub amount_v2: Option, + pub last_transaction_version_v1: Option, + pub last_transaction_version_v2: Option, + pub last_transaction_timestamp_v1: Option, + pub last_transaction_timestamp_v2: Option, +} + +fn get_paired_metadata_address(coin_type_name: &str) -> String { + if coin_type_name == APTOS_COIN_TYPE_STR { + APT_METADATA_ADDRESS_HEX.clone() + } else { + let mut preimage = APT_METADATA_ADDRESS_RAW.to_vec(); + preimage.extend(coin_type_name.as_bytes()); + preimage.push(0xFE); + format!("0x{}", hex::encode(sha3_256(&preimage))) + } +} + +fn get_primary_fungible_store_address( + owner_address: &str, + metadata_address: &str, +) -> anyhow::Result { + let mut preimage = hex_to_raw_bytes(owner_address)?; + preimage.append(&mut hex_to_raw_bytes(metadata_address)?); + preimage.push(0xFC); + Ok(standardize_address(&hex::encode(sha3_256(&preimage)))) +} + +impl From<&CurrentFungibleAssetBalance> for CurrentUnifiedFungibleAssetBalance { + fn from(cfab: &CurrentFungibleAssetBalance) -> Self { + if cfab.token_standard.as_str() == V2_STANDARD.borrow().as_str() { + Self { + storage_id: cfab.storage_id.clone(), + owner_address: cfab.owner_address.clone(), + asset_type_v2: Some(cfab.asset_type.clone()), + asset_type_v1: None, + is_primary: Some(cfab.is_primary), + is_frozen: cfab.is_frozen, + amount_v1: None, + amount_v2: Some(cfab.amount.clone()), + last_transaction_version_v1: None, + last_transaction_version_v2: Some(cfab.last_transaction_version), + last_transaction_timestamp_v1: None, + last_transaction_timestamp_v2: Some(cfab.last_transaction_timestamp), + } + } else { + let metadata_addr = get_paired_metadata_address(&cfab.asset_type); + let pfs_addr = get_primary_fungible_store_address(&cfab.owner_address, &metadata_addr) + .expect("calculate pfs_address failed"); + Self { + storage_id: pfs_addr, + owner_address: cfab.owner_address.clone(), + asset_type_v2: None, + asset_type_v1: Some(cfab.asset_type.clone()), + is_primary: None, + is_frozen: cfab.is_frozen, + amount_v1: Some(cfab.amount.clone()), + amount_v2: None, + last_transaction_version_v1: Some(cfab.last_transaction_version), + last_transaction_version_v2: None, + last_transaction_timestamp_v1: Some(cfab.last_transaction_timestamp), + last_transaction_timestamp_v2: None, + } + } + } +} + impl FungibleAssetBalance { /// Basically just need to index FA Store, but we'll need to look up FA metadata pub async fn get_v2_from_write_resource( @@ -71,7 +154,6 @@ impl FungibleAssetBalance { txn_version: i64, txn_timestamp: chrono::NaiveDateTime, object_metadatas: &ObjectAggregatedDataMapping, - conn: &mut PgPoolConnection<'_>, ) -> anyhow::Result> { if let Some(inner) = &FungibleAssetStore::from_write_resource(write_resource, txn_version)? { @@ -81,19 +163,15 @@ impl FungibleAssetBalance { let object = &object_data.object.object_core; let owner_address = object.get_owner_address(); let asset_type = inner.metadata.get_reference_address(); - // If it's a fungible token, return early - if !FungibleAssetMetadataModel::is_address_fungible_asset( - conn, - &asset_type, - object_metadatas, - txn_version, - ) - .await - { - return Ok(None); - } let is_primary = Self::is_primary(&owner_address, &asset_type, &storage_id); + let concurrent_balance = object_data + .concurrent_fungible_asset_balance + .as_ref() + .map(|concurrent_fungible_asset_balance| { + concurrent_fungible_asset_balance.balance.value.clone() + }); + let coin_balance = Self { transaction_version: txn_version, write_set_change_index, @@ -102,7 +180,9 @@ impl FungibleAssetBalance { asset_type: asset_type.clone(), is_primary, is_frozen: inner.frozen, - amount: inner.balance.clone(), + amount: concurrent_balance + .clone() + .unwrap_or_else(|| inner.balance.clone()), transaction_timestamp: txn_timestamp, token_standard: TokenStandard::V2.to_string(), }; @@ -112,7 +192,7 @@ impl FungibleAssetBalance { asset_type: asset_type.clone(), is_primary, is_frozen: inner.frozen, - amount: inner.balance.clone(), + amount: concurrent_balance.unwrap_or_else(|| inner.balance.clone()), last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, token_standard: TokenStandard::V2.to_string(), @@ -124,7 +204,59 @@ impl FungibleAssetBalance { Ok(None) } + pub fn get_v1_from_delete_resource( + delete_resource: &DeleteResource, + write_set_change_index: i64, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + if let Some(CoinResource::CoinStoreDeletion) = + &CoinResource::from_delete_resource(delete_resource, txn_version)? + { + let coin_info_type = &CoinInfoType::from_move_type( + &delete_resource.r#type.as_ref().unwrap().generic_type_params[0], + delete_resource.type_str.as_ref(), + txn_version, + ); + if let Some(coin_type) = coin_info_type.get_coin_type_below_max() { + let owner_address = standardize_address(delete_resource.address.as_str()); + let storage_id = + CoinInfoType::get_storage_id(coin_type.as_str(), owner_address.as_str()); + let coin_balance = Self { + transaction_version: txn_version, + write_set_change_index, + storage_id: storage_id.clone(), + owner_address: owner_address.clone(), + asset_type: coin_type.clone(), + is_primary: true, + is_frozen: false, + amount: BigDecimal::zero(), + transaction_timestamp: txn_timestamp, + token_standard: TokenStandard::V1.to_string(), + }; + let current_coin_balance = CurrentFungibleAssetBalance { + storage_id, + owner_address, + asset_type: coin_type.clone(), + is_primary: true, + is_frozen: false, + amount: BigDecimal::zero(), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + token_standard: TokenStandard::V1.to_string(), + }; + return Ok(Some(( + coin_balance, + current_coin_balance, + AHashMap::default(), + ))); + } + } + Ok(None) + } + /// Getting coin balances from resources for v1 + /// If the fully qualified coin type is too long (currently 1000 length), we exclude from indexing pub fn get_v1_from_write_resource( write_resource: &WriteResource, write_set_change_index: i64, @@ -190,17 +322,8 @@ impl FungibleAssetBalance { metadata_address: &str, fungible_store_address: &str, ) -> bool { - let owner_address_bytes = <[u8; 32]>::from_hex(&owner_address[2..]).unwrap(); - let metadata_address_bytes = <[u8; 32]>::from_hex(&metadata_address[2..]).unwrap(); - - // construct the expected metadata address - let mut hasher = Sha3_256::new(); - hasher.update(owner_address_bytes); - hasher.update(metadata_address_bytes); - hasher.update([0xFC]); - let hash_result = hasher.finalize(); - // compare address to actual metadata address - hex::encode(hash_result) == fungible_store_address[2..] + fungible_store_address + == get_primary_fungible_store_address(owner_address, metadata_address).unwrap() } } @@ -248,4 +371,13 @@ mod tests { fungible_store_address, )); } + + #[test] + fn test_paired_metadata_address() { + assert_eq!( + get_paired_metadata_address("0x1::aptos_coin::AptosCoin"), + *APT_METADATA_ADDRESS_HEX + ); + assert_eq!(get_paired_metadata_address("0x66c34778730acbb120cefa57a3d98fd21e0c8b3a51e9baee530088b2e444e94c::moon_coin::MoonCoin"), "0xf772c28c069aa7e4417d85d771957eb3c5c11b5bf90b1965cda23b899ebc0384"); + } } diff --git a/rust/processor/src/models/fungible_asset_models/v2_fungible_asset_utils.rs b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_utils.rs similarity index 74% rename from rust/processor/src/models/fungible_asset_models/v2_fungible_asset_utils.rs rename to rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_utils.rs index da43ca2c0..5de2a9d49 100644 --- a/rust/processor/src/models/fungible_asset_models/v2_fungible_asset_utils.rs +++ b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_utils.rs @@ -5,11 +5,11 @@ #![allow(clippy::extra_unused_lifetimes)] use crate::{ - models::{ + db::common::models::{ coin_models::coin_utils::COIN_ADDR, default_models::move_resources::MoveResource, token_models::token_utils::URI_LENGTH, token_v2_models::v2_token_utils::ResourceReference, }, - utils::util::{deserialize_from_string, truncate_str}, + utils::util::{deserialize_from_string, truncate_str, Aggregator}, }; use anyhow::{Context, Result}; use aptos_protos::transaction::v1::WriteResource; @@ -59,7 +59,7 @@ impl FungibleAssetMetadata { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -113,7 +113,7 @@ impl FungibleAssetStore { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -158,7 +158,7 @@ impl FungibleAssetSupply { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str: String = MoveResource::get_outer_type_from_resource(write_resource); + let type_str: String = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -187,6 +187,76 @@ impl FungibleAssetSupply { } } +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ConcurrentFungibleAssetSupply { + pub current: Aggregator, +} + +impl ConcurrentFungibleAssetSupply { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str: String = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::ConcurrentFungibleAssetSupply(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ConcurrentFungibleAssetBalance { + pub balance: Aggregator, +} + +impl ConcurrentFungibleAssetBalance { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str: String = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::ConcurrentFungibleAssetBalance(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + #[derive(Serialize, Deserialize, Debug, Clone)] pub struct DepositEvent { #[serde(deserialize_with = "deserialize_from_string")] @@ -209,14 +279,18 @@ pub enum V2FungibleAssetResource { FungibleAssetMetadata(FungibleAssetMetadata), FungibleAssetStore(FungibleAssetStore), FungibleAssetSupply(FungibleAssetSupply), + ConcurrentFungibleAssetSupply(ConcurrentFungibleAssetSupply), + ConcurrentFungibleAssetBalance(ConcurrentFungibleAssetBalance), } impl V2FungibleAssetResource { pub fn is_resource_supported(data_type: &str) -> bool { [ format!("{}::fungible_asset::Supply", COIN_ADDR), + format!("{}::fungible_asset::ConcurrentSupply", COIN_ADDR), format!("{}::fungible_asset::Metadata", COIN_ADDR), format!("{}::fungible_asset::FungibleStore", COIN_ADDR), + format!("{}::fungible_asset::ConcurrentFungibleBalance", COIN_ADDR), ] .contains(&data_type.to_string()) } @@ -231,6 +305,10 @@ impl V2FungibleAssetResource { serde_json::from_value(data.clone()) .map(|inner| Some(Self::FungibleAssetSupply(inner))) }, + x if x == format!("{}::fungible_asset::ConcurrentSupply", COIN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::ConcurrentFungibleAssetSupply(inner))) + }, x if x == format!("{}::fungible_asset::Metadata", COIN_ADDR) => { serde_json::from_value(data.clone()) .map(|inner| Some(Self::FungibleAssetMetadata(inner))) @@ -239,6 +317,10 @@ impl V2FungibleAssetResource { serde_json::from_value(data.clone()) .map(|inner| Some(Self::FungibleAssetStore(inner))) }, + x if x == format!("{}::fungible_asset::ConcurrentFungibleBalance", COIN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::ConcurrentFungibleAssetBalance(inner))) + }, _ => Ok(None), } .context(format!( @@ -312,4 +394,6 @@ mod tests { panic!("Wrong type") } } + + // TODO: Add similar tests for ConcurrentFungibleAssetSupply. } diff --git a/rust/processor/src/models/fungible_asset_models/v2_fungible_metadata.rs b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_metadata.rs similarity index 61% rename from rust/processor/src/models/fungible_asset_models/v2_fungible_metadata.rs rename to rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_metadata.rs index 1332ffa88..bcb1df18d 100644 --- a/rust/processor/src/models/fungible_asset_models/v2_fungible_metadata.rs +++ b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_metadata.rs @@ -7,18 +7,18 @@ use super::v2_fungible_asset_utils::FungibleAssetMetadata; use crate::{ - models::{ + db::common::models::{ coin_models::coin_utils::{CoinInfoType, CoinResource}, object_models::v2_object_utils::ObjectAggregatedDataMapping, token_v2_models::v2_token_utils::TokenStandard, }, schema::fungible_asset_metadata, - utils::{database::PgPoolConnection, util::standardize_address}, + utils::util::standardize_address, }; use ahash::AHashMap; -use aptos_protos::transaction::v1::WriteResource; +use aptos_protos::transaction::v1::{DeleteResource, WriteResource}; +use bigdecimal::BigDecimal; use diesel::prelude::*; -use diesel_async::RunQueryDsl; use field_count::FieldCount; use serde::{Deserialize, Serialize}; @@ -44,26 +44,8 @@ pub struct FungibleAssetMetadataModel { pub supply_aggregator_table_key_v1: Option, pub token_standard: String, pub is_token_v2: Option, -} - -#[derive(Debug, Deserialize, Identifiable, Queryable, Serialize)] -#[diesel(primary_key(asset_type))] -#[diesel(table_name = fungible_asset_metadata)] -pub struct FungibleAssetMetadataQuery { - pub asset_type: String, - pub creator_address: String, - pub name: String, - pub symbol: String, - pub decimals: i32, - pub icon_uri: Option, - pub project_uri: Option, - pub last_transaction_version: i64, - pub last_transaction_timestamp: chrono::NaiveDateTime, - pub supply_aggregator_table_handle_v1: Option, - pub supply_aggregator_table_key_v1: Option, - pub token_standard: String, - pub inserted_at: chrono::NaiveDateTime, - pub is_token_v2: Option, + pub supply_v2: Option, + pub maximum_v2: Option, } impl FungibleAssetMetadataModel { @@ -81,7 +63,23 @@ impl FungibleAssetMetadataModel { let asset_type = standardize_address(&write_resource.address.to_string()); if let Some(object_metadata) = object_metadatas.get(&asset_type) { let object = &object_metadata.object.object_core; - let is_token_v2 = object_metadata.token.is_some(); + let (maximum_v2, supply_v2) = if let Some(fungible_asset_supply) = + object_metadata.fungible_asset_supply.as_ref() + { + ( + fungible_asset_supply.get_maximum(), + Some(fungible_asset_supply.current.clone()), + ) + } else if let Some(concurrent_fungible_asset_supply) = + object_metadata.concurrent_fungible_asset_supply.as_ref() + { + ( + Some(concurrent_fungible_asset_supply.current.max_value.clone()), + Some(concurrent_fungible_asset_supply.current.value.clone()), + ) + } else { + (None, None) + }; return Ok(Some(Self { asset_type: asset_type.clone(), @@ -96,7 +94,9 @@ impl FungibleAssetMetadataModel { supply_aggregator_table_handle_v1: None, supply_aggregator_table_key_v1: None, token_standard: TokenStandard::V2.to_string(), - is_token_v2: Some(is_token_v2), + is_token_v2: None, + supply_v2, + maximum_v2, })); } } @@ -135,7 +135,9 @@ impl FungibleAssetMetadataModel { supply_aggregator_table_handle_v1: supply_aggregator_table_handle, supply_aggregator_table_key_v1: supply_aggregator_table_key, token_standard: TokenStandard::V1.to_string(), - is_token_v2: Some(false), + is_token_v2: None, + supply_v2: None, + maximum_v2: None, })) } else { Ok(None) @@ -145,53 +147,46 @@ impl FungibleAssetMetadataModel { } } - /// A fungible asset can also be a token. We will make a best effort guess at whether this is a fungible token. - /// 1. If metadata is present without token object, then it's not a token - /// 2. If metadata is not present, we will do a lookup in the db. - pub async fn is_address_fungible_asset( - conn: &mut PgPoolConnection<'_>, - asset_type: &str, - object_aggregated_data_mapping: &ObjectAggregatedDataMapping, + pub fn get_v1_from_delete_resource( + delete_resource: &DeleteResource, txn_version: i64, - ) -> bool { - // 1. If metadata is present without token object, then it's not a token - if let Some(object_data) = object_aggregated_data_mapping.get(asset_type) { - if object_data.fungible_asset_metadata.is_some() { - return object_data.token.is_none(); - } - } - // 2. If metadata is not present, we will do a lookup in the db. - match FungibleAssetMetadataQuery::get_by_asset_type(conn, asset_type).await { - Ok(metadata) => { - if let Some(is_token_v2) = metadata.is_token_v2 { - return !is_token_v2; - } - - // If is_token_v2 is null, then the metadata is a v1 coin info, and it's not a token - true - }, - Err(_) => { - tracing::error!( - transaction_version = txn_version, - lookup_key = asset_type, - "Missing fungible_asset_metadata for asset_type: {}. You probably should backfill db.", - asset_type, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + match &CoinResource::from_delete_resource(delete_resource, txn_version)? { + Some(CoinResource::CoinInfoResource(inner)) => { + let coin_info_type = &CoinInfoType::from_move_type( + &delete_resource.r#type.as_ref().unwrap().generic_type_params[0], + delete_resource.type_str.as_ref(), + txn_version, ); - // Default - true + let (supply_aggregator_table_handle, supply_aggregator_table_key) = inner + .get_aggregator_metadata() + .map(|agg| (Some(agg.handle), Some(agg.key))) + .unwrap_or((None, None)); + // If asset type is too long, just ignore + if let Some(asset_type) = coin_info_type.get_coin_type_below_max() { + Ok(Some(Self { + asset_type, + creator_address: coin_info_type.get_creator_address(), + name: inner.get_name_trunc(), + symbol: inner.get_symbol_trunc(), + decimals: inner.decimals, + icon_uri: None, + project_uri: None, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + supply_aggregator_table_handle_v1: supply_aggregator_table_handle, + supply_aggregator_table_key_v1: supply_aggregator_table_key, + token_standard: TokenStandard::V1.to_string(), + is_token_v2: None, + supply_v2: None, + maximum_v2: None, + })) + } else { + Ok(None) + } }, + _ => Ok(None), } } } - -impl FungibleAssetMetadataQuery { - pub async fn get_by_asset_type( - conn: &mut PgPoolConnection<'_>, - asset_type: &str, - ) -> diesel::QueryResult { - fungible_asset_metadata::table - .filter(fungible_asset_metadata::asset_type.eq(asset_type)) - .first::(conn) - .await - } -} diff --git a/rust/processor/src/models/ledger_info.rs b/rust/processor/src/db/common/models/ledger_info.rs similarity index 81% rename from rust/processor/src/models/ledger_info.rs rename to rust/processor/src/db/common/models/ledger_info.rs index 17ca11add..f25759ec5 100644 --- a/rust/processor/src/models/ledger_info.rs +++ b/rust/processor/src/db/common/models/ledger_info.rs @@ -3,7 +3,7 @@ #![allow(clippy::extra_unused_lifetimes)] -use crate::{schema::ledger_infos, utils::database::PgPoolConnection}; +use crate::{schema::ledger_infos, utils::database::DbPoolConnection}; use diesel::{OptionalExtension, QueryDsl}; use diesel_async::RunQueryDsl; @@ -15,7 +15,7 @@ pub struct LedgerInfo { } impl LedgerInfo { - pub async fn get(conn: &mut PgPoolConnection<'_>) -> diesel::QueryResult> { + pub async fn get(conn: &mut DbPoolConnection<'_>) -> diesel::QueryResult> { ledger_infos::table .select(ledger_infos::all_columns) .first::(conn) diff --git a/rust/processor/src/models/mod.rs b/rust/processor/src/db/common/models/mod.rs similarity index 100% rename from rust/processor/src/models/mod.rs rename to rust/processor/src/db/common/models/mod.rs diff --git a/rust/processor/src/models/object_models/mod.rs b/rust/processor/src/db/common/models/object_models/mod.rs similarity index 100% rename from rust/processor/src/models/object_models/mod.rs rename to rust/processor/src/db/common/models/object_models/mod.rs diff --git a/rust/processor/src/models/object_models/v2_object_utils.rs b/rust/processor/src/db/common/models/object_models/v2_object_utils.rs similarity index 59% rename from rust/processor/src/models/object_models/v2_object_utils.rs rename to rust/processor/src/db/common/models/object_models/v2_object_utils.rs index e2a397d53..617e98f00 100644 --- a/rust/processor/src/models/object_models/v2_object_utils.rs +++ b/rust/processor/src/db/common/models/object_models/v2_object_utils.rs @@ -6,10 +6,11 @@ #![allow(clippy::unused_unit)] use crate::{ - models::{ + db::common::models::{ default_models::move_resources::MoveResource, fungible_asset_models::v2_fungible_asset_utils::{ - FungibleAssetMetadata, FungibleAssetStore, FungibleAssetSupply, + ConcurrentFungibleAssetBalance, ConcurrentFungibleAssetSupply, FungibleAssetMetadata, + FungibleAssetStore, FungibleAssetSupply, }, token_v2_models::v2_token_utils::{ AptosCollection, ConcurrentSupply, FixedSupply, PropertyMapModel, TokenIdentifiers, @@ -38,10 +39,14 @@ pub struct ObjectAggregatedData { pub object: ObjectWithMetadata, // There could be more than one transfers on the same transaction pub transfer_events: Vec<(EventIndex, TransferEvent)>, + // This would make transfers impossible + pub untransferable: Option, // Fungible asset structs pub fungible_asset_metadata: Option, pub fungible_asset_supply: Option, + pub concurrent_fungible_asset_supply: Option, pub fungible_asset_store: Option, + pub concurrent_fungible_asset_balance: Option, // Token v2 structs pub aptos_collection: Option, pub fixed_supply: Option, @@ -52,6 +57,35 @@ pub struct ObjectAggregatedData { pub token_identifier: Option, } +impl Default for ObjectAggregatedData { + fn default() -> Self { + Self { + object: ObjectWithMetadata { + object_core: ObjectCore { + allow_ungated_transfer: false, + guid_creation_num: BigDecimal::default(), + owner: String::default(), + }, + state_key_hash: String::default(), + }, + transfer_events: Vec::new(), + untransferable: None, + fungible_asset_metadata: None, + fungible_asset_supply: None, + concurrent_fungible_asset_supply: None, + concurrent_fungible_asset_balance: None, + fungible_asset_store: None, + aptos_collection: None, + fixed_supply: None, + property_map: None, + token: None, + unlimited_supply: None, + concurrent_supply: None, + token_identifier: None, + } + } +} + #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ObjectCore { pub allow_ungated_transfer: bool, @@ -77,7 +111,7 @@ impl ObjectWithMetadata { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -101,3 +135,32 @@ impl ObjectWithMetadata { standardize_address(&self.state_key_hash) } } + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Untransferable {} + +impl Untransferable { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::Untransferable(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} diff --git a/rust/processor/src/models/object_models/v2_objects.rs b/rust/processor/src/db/common/models/object_models/v2_objects.rs similarity index 83% rename from rust/processor/src/models/object_models/v2_objects.rs rename to rust/processor/src/db/common/models/object_models/v2_objects.rs index ad1fd605d..66d67cc63 100644 --- a/rust/processor/src/models/object_models/v2_objects.rs +++ b/rust/processor/src/db/common/models/object_models/v2_objects.rs @@ -7,12 +7,9 @@ use super::v2_object_utils::{CurrentObjectPK, ObjectAggregatedDataMapping}; use crate::{ - models::{ - default_models::move_resources::MoveResource, - token_models::collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, - }, + db::common::models::default_models::move_resources::MoveResource, schema::{current_objects, objects}, - utils::{database::PgPoolConnection, util::standardize_address}, + utils::{database::DbPoolConnection, util::standardize_address}, }; use ahash::AHashMap; use aptos_protos::transaction::v1::{DeleteResource, WriteResource}; @@ -21,7 +18,6 @@ use diesel::prelude::*; use diesel_async::RunQueryDsl; use field_count::FieldCount; use serde::{Deserialize, Serialize}; -use tracing::warn; #[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] #[diesel(primary_key(transaction_version, write_set_change_index))] @@ -35,6 +31,7 @@ pub struct Object { pub guid_creation_num: BigDecimal, pub allow_ungated_transfer: bool, pub is_deleted: bool, + pub untransferrable: bool, } #[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] @@ -48,6 +45,7 @@ pub struct CurrentObject { pub last_guid_creation_num: BigDecimal, pub last_transaction_version: i64, pub is_deleted: bool, + pub untransferrable: bool, } #[derive(Debug, Deserialize, Identifiable, Queryable, Serialize)] @@ -62,6 +60,7 @@ pub struct CurrentObjectQuery { pub last_transaction_version: i64, pub is_deleted: bool, pub inserted_at: chrono::NaiveDateTime, + pub untransferrable: bool, } impl Object { @@ -76,6 +75,12 @@ impl Object { // do something let object_with_metadata = object_aggregated_metadata.object.clone(); let object_core = object_with_metadata.object_core; + + let untransferrable = if object_aggregated_metadata.untransferable.as_ref().is_some() { + true + } else { + !object_core.allow_ungated_transfer + }; Ok(Some(( Self { transaction_version: txn_version, @@ -86,6 +91,7 @@ impl Object { guid_creation_num: object_core.guid_creation_num.clone(), allow_ungated_transfer: object_core.allow_ungated_transfer, is_deleted: false, + untransferrable, }, CurrentObject { object_address: address, @@ -95,6 +101,7 @@ impl Object { last_guid_creation_num: object_core.guid_creation_num.clone(), last_transaction_version: txn_version, is_deleted: false, + untransferrable, }, ))) } else { @@ -110,7 +117,9 @@ impl Object { txn_version: i64, write_set_change_index: i64, object_mapping: &AHashMap, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result> { if delete_resource.type_str == "0x1::object::ObjectGroup" { let resource = MoveResource::from_delete_resource( @@ -122,7 +131,14 @@ impl Object { let previous_object = if let Some(object) = object_mapping.get(&resource.address) { object.clone() } else { - match Self::get_current_object(conn, &resource.address, txn_version).await { + match Self::get_current_object( + conn, + &resource.address, + query_retries, + query_retry_delay_ms, + ) + .await + { Ok(object) => object, Err(_) => { tracing::error!( @@ -145,6 +161,7 @@ impl Object { guid_creation_num: previous_object.last_guid_creation_num.clone(), allow_ungated_transfer: previous_object.allow_ungated_transfer, is_deleted: true, + untransferrable: previous_object.untransferrable, }, CurrentObject { object_address: resource.address, @@ -154,6 +171,7 @@ impl Object { allow_ungated_transfer: previous_object.allow_ungated_transfer, last_transaction_version: txn_version, is_deleted: true, + untransferrable: previous_object.untransferrable, }, ))) } else { @@ -164,13 +182,14 @@ impl Object { /// This is actually not great because object owner can change. The best we can do now though. /// This will loop forever until we get the object from the db pub async fn get_current_object( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, object_address: &str, - transaction_version: i64, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result { - let mut retries = 0; - while retries < QUERY_RETRIES { - retries += 1; + let mut tried = 0; + while tried < query_retries { + tried += 1; match CurrentObjectQuery::get_by_address(object_address, conn).await { Ok(res) => { return Ok(CurrentObject { @@ -181,20 +200,14 @@ impl Object { last_guid_creation_num: res.last_guid_creation_num, last_transaction_version: res.last_transaction_version, is_deleted: res.is_deleted, + untransferrable: res.untransferrable, }); }, - Err(e) => { - warn!( - transaction_version, - error = ?e, - object_address, - retry_ms = QUERY_RETRY_DELAY_MS, - "Failed to get object from current_objects table for object_address: {}, retrying in {} ms. ", - object_address, - QUERY_RETRY_DELAY_MS, - ); - tokio::time::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)) - .await; + Err(_) => { + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } }, } } @@ -206,7 +219,7 @@ impl CurrentObjectQuery { /// TODO: Change this to a KV store pub async fn get_by_address( object_address: &str, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, ) -> diesel::QueryResult { current_objects::table .filter(current_objects::object_address.eq(object_address)) diff --git a/rust/processor/src/models/processor_status.rs b/rust/processor/src/db/common/models/processor_status.rs similarity index 91% rename from rust/processor/src/models/processor_status.rs rename to rust/processor/src/db/common/models/processor_status.rs index 0141a4eef..2d7928511 100644 --- a/rust/processor/src/models/processor_status.rs +++ b/rust/processor/src/db/common/models/processor_status.rs @@ -3,7 +3,7 @@ #![allow(clippy::extra_unused_lifetimes)] -use crate::{schema::processor_status, utils::database::PgPoolConnection}; +use crate::{schema::processor_status, utils::database::DbPoolConnection}; use diesel::{ExpressionMethods, OptionalExtension, QueryDsl}; use diesel_async::RunQueryDsl; @@ -29,7 +29,7 @@ pub struct ProcessorStatusQuery { impl ProcessorStatusQuery { pub async fn get_by_processor( processor_name: &str, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, ) -> diesel::QueryResult> { processor_status::table .filter(processor_status::processor.eq(processor_name)) diff --git a/rust/processor/src/models/property_map.rs b/rust/processor/src/db/common/models/property_map.rs similarity index 100% rename from rust/processor/src/models/property_map.rs rename to rust/processor/src/db/common/models/property_map.rs diff --git a/rust/processor/src/models/stake_models/current_delegated_voter.rs b/rust/processor/src/db/common/models/stake_models/current_delegated_voter.rs similarity index 85% rename from rust/processor/src/models/stake_models/current_delegated_voter.rs rename to rust/processor/src/db/common/models/stake_models/current_delegated_voter.rs index 772216eb1..e87dcde71 100644 --- a/rust/processor/src/models/stake_models/current_delegated_voter.rs +++ b/rust/processor/src/db/common/models/stake_models/current_delegated_voter.rs @@ -9,9 +9,8 @@ use super::{ stake_utils::VoteDelegationTableItem, }; use crate::{ - models::token_models::collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, schema::current_delegated_voter, - utils::{database::PgPoolConnection, util::standardize_address}, + utils::{database::DbPoolConnection, util::standardize_address}, }; use ahash::AHashMap; use aptos_protos::transaction::v1::WriteTableItem; @@ -68,7 +67,7 @@ impl CurrentDelegatedVoter { /// There are 3 pieces of information we need in order to get the delegated voters /// 1. We need the mapping between pool address and table handle of the governance record. This will help us - /// figure out what the pool address it is + /// figure out what the pool address it is /// 2. We need to parse the governance record itself /// 3. All active shares prior to governance contract need to be tracked as well, the default voters are the delegators themselves pub async fn from_write_table_item( @@ -76,7 +75,9 @@ impl CurrentDelegatedVoter { txn_version: i64, txn_timestamp: chrono::NaiveDateTime, vote_delegation_handle_to_pool_address: &VoteDelegationTableHandleToPool, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result { let mut delegated_voter_map: CurrentDelegatedVoterMap = AHashMap::new(); @@ -93,7 +94,7 @@ impl CurrentDelegatedVoter { Some(pool_address) => pool_address.clone(), None => { // look up from db - Self::get_delegation_pool_address_by_table_handle(conn, &table_handle).await + Self::get_delegation_pool_address_by_table_handle(conn, &table_handle, query_retries, query_retry_delay_ms).await .unwrap_or_else(|_| { tracing::error!( transaction_version = txn_version, @@ -136,7 +137,9 @@ impl CurrentDelegatedVoter { txn_timestamp: chrono::NaiveDateTime, active_pool_to_staking_pool: &ShareToStakingPoolMapping, previous_delegated_voters: &CurrentDelegatedVoterMap, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result> { if let Some((_, active_balance)) = CurrentDelegatorBalance::get_active_share_from_write_table_item( @@ -156,7 +159,14 @@ impl CurrentDelegatedVoter { Some(_) => true, None => { // look up from db - Self::get_existence_by_pk(conn, &delegator_address, &pool_address).await + Self::get_existence_by_pk( + conn, + &delegator_address, + &pool_address, + query_retries, + query_retry_delay_ms, + ) + .await }, }; if !already_exists { @@ -175,19 +185,23 @@ impl CurrentDelegatedVoter { } pub async fn get_delegation_pool_address_by_table_handle( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, table_handle: &str, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; + let mut tried = 0; + while tried < query_retries { + tried += 1; match CurrentDelegatedVoterQuery::get_by_table_handle(conn, table_handle).await { Ok(current_delegated_voter_query_result) => { return Ok(current_delegated_voter_query_result.delegation_pool_address); }, Err(_) => { - tokio::time::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)) - .await; + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } }, } } @@ -197,13 +211,15 @@ impl CurrentDelegatedVoter { } pub async fn get_existence_by_pk( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, delegator_address: &str, delegation_pool_address: &str, + query_retries: u32, + query_retry_delay_ms: u64, ) -> bool { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; + let mut tried = 0; + while tried < query_retries { + tried += 1; match CurrentDelegatedVoterQuery::get_by_pk( conn, delegator_address, @@ -213,8 +229,10 @@ impl CurrentDelegatedVoter { { Ok(_) => return true, Err(_) => { - tokio::time::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)) - .await; + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } }, } } @@ -224,7 +242,7 @@ impl CurrentDelegatedVoter { impl CurrentDelegatedVoterQuery { pub async fn get_by_table_handle( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, table_handle: &str, ) -> diesel::QueryResult { current_delegated_voter::table @@ -234,7 +252,7 @@ impl CurrentDelegatedVoterQuery { } pub async fn get_by_pk( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, delegator_address: &str, delegation_pool_address: &str, ) -> diesel::QueryResult { diff --git a/rust/processor/src/models/stake_models/delegator_activities.rs b/rust/processor/src/db/common/models/stake_models/delegator_activities.rs similarity index 100% rename from rust/processor/src/models/stake_models/delegator_activities.rs rename to rust/processor/src/db/common/models/stake_models/delegator_activities.rs diff --git a/rust/processor/src/models/stake_models/delegator_balances.rs b/rust/processor/src/db/common/models/stake_models/delegator_balances.rs similarity index 92% rename from rust/processor/src/models/stake_models/delegator_balances.rs rename to rust/processor/src/db/common/models/stake_models/delegator_balances.rs index 916c3d7ff..54790f43b 100644 --- a/rust/processor/src/models/stake_models/delegator_balances.rs +++ b/rust/processor/src/db/common/models/stake_models/delegator_balances.rs @@ -5,12 +5,9 @@ use super::delegator_pools::{DelegatorPool, DelegatorPoolBalanceMetadata, PoolBalanceMetadata}; use crate::{ - models::{ - default_models::move_tables::TableItem, - token_models::collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, - }, + db::common::models::default_models::move_tables::TableItem, schema::{current_delegator_balances, delegator_balances}, - utils::{database::PgPoolConnection, util::standardize_address}, + utils::{database::DbPoolConnection, util::standardize_address}, }; use ahash::AHashMap; use anyhow::Context; @@ -136,7 +133,9 @@ impl CurrentDelegatorBalance { write_set_change_index: i64, inactive_pool_to_staking_pool: &ShareToStakingPoolMapping, inactive_share_to_pool: &ShareToPoolMapping, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result> { let table_handle = standardize_address(&write_table_item.handle.to_string()); // The mapping will tell us if the table item belongs to an inactive pool @@ -153,6 +152,8 @@ impl CurrentDelegatorBalance { match Self::get_staking_pool_from_inactive_share_handle( conn, &inactive_pool_handle, + query_retries, + query_retry_delay_ms, ) .await { @@ -256,7 +257,9 @@ impl CurrentDelegatorBalance { write_set_change_index: i64, inactive_pool_to_staking_pool: &ShareToStakingPoolMapping, inactive_share_to_pool: &ShareToPoolMapping, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result> { let table_handle = standardize_address(&delete_table_item.handle.to_string()); // The mapping will tell us if the table item belongs to an inactive pool @@ -269,13 +272,17 @@ impl CurrentDelegatorBalance { .map(|metadata| metadata.staking_pool_address.clone()) { Some(pool_address) => pool_address, - None => { - Self::get_staking_pool_from_inactive_share_handle(conn, &inactive_pool_handle) - .await - .context(format!("Failed to get staking pool address from inactive share handle {}, txn version {}", - inactive_pool_handle, txn_version - ))? - } + None => Self::get_staking_pool_from_inactive_share_handle( + conn, + &inactive_pool_handle, + query_retries, + query_retry_delay_ms, + ) + .await + .context(format!( + "Failed to get staking pool from inactive share handle {}, txn version {}", + inactive_pool_handle, txn_version + ))?, }; let delegator_address = standardize_address(&delete_table_item.key.to_string()); @@ -362,19 +369,23 @@ impl CurrentDelegatorBalance { } pub async fn get_staking_pool_from_inactive_share_handle( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, table_handle: &str, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; + let mut tried = 0; + while tried < query_retries { + tried += 1; match CurrentDelegatorBalanceQuery::get_by_inactive_share_handle(conn, table_handle) .await { Ok(current_delegator_balance) => return Ok(current_delegator_balance.pool_address), Err(_) => { - tokio::time::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)) - .await; + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } }, } } @@ -386,7 +397,9 @@ impl CurrentDelegatorBalance { pub async fn from_transaction( transaction: &Transaction, active_pool_to_staking_pool: &ShareToStakingPoolMapping, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result<(Vec, CurrentDelegatorBalanceMap)> { let mut inactive_pool_to_staking_pool: ShareToStakingPoolMapping = AHashMap::new(); let mut inactive_share_to_pool: ShareToPoolMapping = AHashMap::new(); @@ -436,6 +449,8 @@ impl CurrentDelegatorBalance { &inactive_pool_to_staking_pool, &inactive_share_to_pool, conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap() @@ -461,6 +476,8 @@ impl CurrentDelegatorBalance { &inactive_pool_to_staking_pool, &inactive_share_to_pool, conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap() @@ -486,7 +503,7 @@ impl CurrentDelegatorBalance { impl CurrentDelegatorBalanceQuery { pub async fn get_by_inactive_share_handle( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, table_handle: &str, ) -> diesel::QueryResult { current_delegator_balances::table diff --git a/rust/processor/src/models/stake_models/delegator_pools.rs b/rust/processor/src/db/common/models/stake_models/delegator_pools.rs similarity index 100% rename from rust/processor/src/models/stake_models/delegator_pools.rs rename to rust/processor/src/db/common/models/stake_models/delegator_pools.rs diff --git a/rust/processor/src/models/stake_models/mod.rs b/rust/processor/src/db/common/models/stake_models/mod.rs similarity index 100% rename from rust/processor/src/models/stake_models/mod.rs rename to rust/processor/src/db/common/models/stake_models/mod.rs diff --git a/rust/processor/src/models/stake_models/proposal_votes.rs b/rust/processor/src/db/common/models/stake_models/proposal_votes.rs similarity index 100% rename from rust/processor/src/models/stake_models/proposal_votes.rs rename to rust/processor/src/db/common/models/stake_models/proposal_votes.rs diff --git a/rust/processor/src/models/stake_models/stake_utils.rs b/rust/processor/src/db/common/models/stake_models/stake_utils.rs similarity index 97% rename from rust/processor/src/models/stake_models/stake_utils.rs rename to rust/processor/src/db/common/models/stake_models/stake_utils.rs index 40cf75fc2..f623d3ffb 100644 --- a/rust/processor/src/models/stake_models/stake_utils.rs +++ b/rust/processor/src/db/common/models/stake_models/stake_utils.rs @@ -2,7 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - models::{default_models::move_resources::MoveResource, token_models::token_utils::Table}, + db::common::models::{ + default_models::move_resources::MoveResource, token_models::token_utils::Table, + }, utils::util::{deserialize_from_string, standardize_address}, }; use anyhow::{Context, Result}; @@ -167,7 +169,7 @@ impl StakeResource { write_resource: &WriteResource, txn_version: i64, ) -> Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !Self::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -323,7 +325,7 @@ impl DelegationVoteGovernanceRecordsResource { write_resource: &WriteResource, txn_version: i64, ) -> Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); let resource = MoveResource::from_write_resource( write_resource, 0, // Placeholder, this isn't used anyway diff --git a/rust/processor/src/models/stake_models/staking_pool_voter.rs b/rust/processor/src/db/common/models/stake_models/staking_pool_voter.rs similarity index 100% rename from rust/processor/src/models/stake_models/staking_pool_voter.rs rename to rust/processor/src/db/common/models/stake_models/staking_pool_voter.rs diff --git a/rust/processor/src/models/token_models/collection_datas.rs b/rust/processor/src/db/common/models/token_models/collection_datas.rs similarity index 89% rename from rust/processor/src/models/token_models/collection_datas.rs rename to rust/processor/src/db/common/models/token_models/collection_datas.rs index 5ba8d7fd6..977fa8fc8 100644 --- a/rust/processor/src/models/token_models/collection_datas.rs +++ b/rust/processor/src/db/common/models/token_models/collection_datas.rs @@ -11,7 +11,7 @@ use super::{ }; use crate::{ schema::{collection_datas, current_collection_datas}, - utils::{database::PgPoolConnection, util::standardize_address}, + utils::{database::DbPoolConnection, util::standardize_address}, }; use aptos_protos::transaction::v1::WriteTableItem; use bigdecimal::BigDecimal; @@ -20,9 +20,6 @@ use diesel_async::RunQueryDsl; use field_count::FieldCount; use serde::{Deserialize, Serialize}; -pub const QUERY_RETRIES: u32 = 5; -pub const QUERY_RETRY_DELAY_MS: u64 = 500; - #[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] #[diesel(primary_key(collection_data_id_hash, transaction_version))] #[diesel(table_name = collection_datas)] @@ -88,7 +85,9 @@ impl CollectionData { txn_version: i64, txn_timestamp: chrono::NaiveDateTime, table_handle_to_owner: &TableHandleToOwner, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result> { let table_item_data = table_item.data.as_ref().unwrap(); @@ -107,7 +106,14 @@ impl CollectionData { .map(|table_metadata| table_metadata.get_owner_address()); let mut creator_address = match maybe_creator_address { Some(ca) => ca, - None => match Self::get_collection_creator(conn, &table_handle).await { + None => match Self::get_collection_creator( + conn, + &table_handle, + query_retries, + query_retry_delay_ms, + ) + .await + { Ok(creator) => creator, Err(_) => { tracing::error!( @@ -167,17 +173,21 @@ impl CollectionData { /// cannot change, we can just look in the current_collection_datas table. /// Retrying a few times since this collection could've been written in a separate thread. pub async fn get_collection_creator( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, table_handle: &str, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; + let mut tried = 0; + while tried < query_retries { + tried += 1; match CurrentCollectionDataQuery::get_by_table_handle(conn, table_handle).await { Ok(current_collection_data) => return Ok(current_collection_data.creator_address), Err(_) => { - tokio::time::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)) - .await; + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } }, } } @@ -187,7 +197,7 @@ impl CollectionData { impl CurrentCollectionDataQuery { pub async fn get_by_table_handle( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, table_handle: &str, ) -> diesel::QueryResult { current_collection_datas::table diff --git a/rust/processor/src/models/token_models/mod.rs b/rust/processor/src/db/common/models/token_models/mod.rs similarity index 100% rename from rust/processor/src/models/token_models/mod.rs rename to rust/processor/src/db/common/models/token_models/mod.rs diff --git a/rust/processor/src/models/token_models/nft_points.rs b/rust/processor/src/db/common/models/token_models/nft_points.rs similarity index 94% rename from rust/processor/src/models/token_models/nft_points.rs rename to rust/processor/src/db/common/models/token_models/nft_points.rs index 0224a0f1c..1debccb11 100644 --- a/rust/processor/src/models/token_models/nft_points.rs +++ b/rust/processor/src/db/common/models/token_models/nft_points.rs @@ -66,13 +66,14 @@ impl NftPoints { .request .as_ref() .expect("Sends is not present in user txn"); - let payload = user_txn - .request - .as_ref() - .expect("Getting user request failed.") - .payload - .as_ref() - .expect("Getting payload failed."); + + let payload = match user_request.payload.as_ref() { + Some(payload) => payload, + None => { + tracing::warn!(transaction_version = version, "Payload is empty."); + return None; + }, + }; let entry_function_id_str = get_entry_function_from_user_request(user_request).unwrap_or_default(); diff --git a/rust/processor/src/models/token_models/token_activities.rs b/rust/processor/src/db/common/models/token_models/token_activities.rs similarity index 100% rename from rust/processor/src/models/token_models/token_activities.rs rename to rust/processor/src/db/common/models/token_models/token_activities.rs diff --git a/rust/processor/src/models/token_models/token_claims.rs b/rust/processor/src/db/common/models/token_models/token_claims.rs similarity index 100% rename from rust/processor/src/models/token_models/token_claims.rs rename to rust/processor/src/db/common/models/token_models/token_claims.rs diff --git a/rust/processor/src/models/token_models/token_datas.rs b/rust/processor/src/db/common/models/token_models/token_datas.rs similarity index 100% rename from rust/processor/src/models/token_models/token_datas.rs rename to rust/processor/src/db/common/models/token_models/token_datas.rs diff --git a/rust/processor/src/models/token_models/token_ownerships.rs b/rust/processor/src/db/common/models/token_models/token_ownerships.rs similarity index 100% rename from rust/processor/src/models/token_models/token_ownerships.rs rename to rust/processor/src/db/common/models/token_models/token_ownerships.rs diff --git a/rust/processor/src/models/token_models/token_utils.rs b/rust/processor/src/db/common/models/token_models/token_utils.rs similarity index 98% rename from rust/processor/src/models/token_models/token_utils.rs rename to rust/processor/src/db/common/models/token_models/token_utils.rs index 94efbc77d..aa2d74c18 100644 --- a/rust/processor/src/models/token_models/token_utils.rs +++ b/rust/processor/src/db/common/models/token_models/token_utils.rs @@ -95,10 +95,6 @@ impl CollectionDataIdType { hash_str(&self.to_string()) } - pub fn get_name_trunc(&self) -> String { - truncate_str(&self.name, NAME_LENGTH) - } - pub fn to_id(&self) -> String { format!("0x{}", self.to_hash()) } @@ -149,10 +145,6 @@ impl TokenDataType { pub fn get_uri_trunc(&self) -> String { truncate_str(&self.uri, URI_LENGTH) } - - pub fn get_name_trunc(&self) -> String { - truncate_str(&self.name, NAME_LENGTH) - } } #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/rust/processor/src/models/token_models/tokens.rs b/rust/processor/src/db/common/models/token_models/tokens.rs similarity index 97% rename from rust/processor/src/models/token_models/tokens.rs rename to rust/processor/src/db/common/models/token_models/tokens.rs index 38728b2ef..f6d2e8fab 100644 --- a/rust/processor/src/models/token_models/tokens.rs +++ b/rust/processor/src/db/common/models/token_models/tokens.rs @@ -13,11 +13,11 @@ use super::{ token_utils::{TokenResource, TokenWriteSet}, }; use crate::{ - models::default_models::move_resources::MoveResource, + db::common::models::default_models::move_resources::MoveResource, schema::tokens, utils::{ counters::PROCESSOR_UNKNOWN_TYPE_COUNT, - database::PgPoolConnection, + database::DbPoolConnection, util::{ensure_not_negative, parse_timestamp, standardize_address}, }, }; @@ -72,7 +72,9 @@ impl Token { pub async fn from_transaction( transaction: &Transaction, table_handle_to_owner: &TableHandleToOwner, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> ( Vec, Vec, @@ -156,6 +158,8 @@ impl Token { txn_timestamp, table_handle_to_owner, conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap(), @@ -415,7 +419,7 @@ impl TableMetadataForToken { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } diff --git a/rust/processor/src/models/token_v2_models/mod.rs b/rust/processor/src/db/common/models/token_v2_models/mod.rs similarity index 89% rename from rust/processor/src/models/token_v2_models/mod.rs rename to rust/processor/src/db/common/models/token_v2_models/mod.rs index ab7c4616c..49bd71da5 100644 --- a/rust/processor/src/models/token_v2_models/mod.rs +++ b/rust/processor/src/db/common/models/token_v2_models/mod.rs @@ -1,6 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +pub mod v1_token_royalty; pub mod v2_collections; pub mod v2_token_activities; pub mod v2_token_datas; diff --git a/rust/processor/src/db/common/models/token_v2_models/v1_token_royalty.rs b/rust/processor/src/db/common/models/token_v2_models/v1_token_royalty.rs new file mode 100644 index 000000000..f7e1cb124 --- /dev/null +++ b/rust/processor/src/db/common/models/token_v2_models/v1_token_royalty.rs @@ -0,0 +1,100 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use crate::{ + db::common::models::token_models::token_utils::TokenWriteSet, schema::current_token_royalty_v1, +}; +use aptos_protos::transaction::v1::WriteTableItem; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive( + Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, PartialEq, Eq, +)] +#[diesel(primary_key(token_data_id))] +#[diesel(table_name = current_token_royalty_v1)] +pub struct CurrentTokenRoyaltyV1 { + pub token_data_id: String, + pub payee_address: String, + pub royalty_points_numerator: BigDecimal, + pub royalty_points_denominator: BigDecimal, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +impl Ord for CurrentTokenRoyaltyV1 { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.token_data_id.cmp(&other.token_data_id) + } +} +impl PartialOrd for CurrentTokenRoyaltyV1 { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl CurrentTokenRoyaltyV1 { + pub fn pk(&self) -> String { + self.token_data_id.clone() + } + + // Royalty for v2 token is more complicated and not supported yet. For token v2, royalty can be on the collection (default) or on + // the token (override). + pub fn get_v1_from_write_table_item( + write_table_item: &WriteTableItem, + transaction_version: i64, + transaction_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + let table_item_data = write_table_item.data.as_ref().unwrap(); + + let maybe_token_data = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + transaction_version, + )? { + Some(TokenWriteSet::TokenData(inner)) => Some(inner), + _ => None, + }; + + if let Some(token_data) = maybe_token_data { + let maybe_token_data_id = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + transaction_version, + )? { + Some(TokenWriteSet::TokenDataId(inner)) => Some(inner), + _ => None, + }; + if let Some(token_data_id_struct) = maybe_token_data_id { + // token data id is the 0x{hash} version of the creator, collection name, and token name + let token_data_id = token_data_id_struct.to_id(); + let payee_address = token_data.royalty.get_payee_address(); + let royalty_points_numerator = token_data.royalty.royalty_points_numerator.clone(); + let royalty_points_denominator = + token_data.royalty.royalty_points_denominator.clone(); + + return Ok(Some(Self { + token_data_id, + payee_address, + royalty_points_numerator, + royalty_points_denominator, + last_transaction_version: transaction_version, + last_transaction_timestamp: transaction_timestamp, + })); + } else { + tracing::warn!( + transaction_version, + key_type = table_item_data.key_type, + key = table_item_data.key, + "Expecting token_data_id as key for value = token_data" + ); + } + } + Ok(None) + } +} diff --git a/rust/processor/src/models/token_v2_models/v2_collections.rs b/rust/processor/src/db/common/models/token_v2_models/v2_collections.rs similarity index 89% rename from rust/processor/src/models/token_v2_models/v2_collections.rs rename to rust/processor/src/db/common/models/token_v2_models/v2_collections.rs index 601547fdf..5d3a394a9 100644 --- a/rust/processor/src/models/token_v2_models/v2_collections.rs +++ b/rust/processor/src/db/common/models/token_v2_models/v2_collections.rs @@ -7,17 +7,17 @@ use super::v2_token_utils::{TokenStandard, V2TokenResource}; use crate::{ - models::{ + db::common::models::{ default_models::move_resources::MoveResource, object_models::v2_object_utils::ObjectAggregatedDataMapping, token_models::{ - collection_datas::{CollectionData, QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, + collection_datas::CollectionData, token_utils::{CollectionDataIdType, TokenWriteSet}, tokens::TableHandleToOwner, }, }, schema::{collections_v2, current_collections_v2}, - utils::{database::PgPoolConnection, util::standardize_address}, + utils::{database::DbPoolConnection, util::standardize_address}, }; use anyhow::Context; use aptos_protos::transaction::v1::{WriteResource, WriteTableItem}; @@ -85,7 +85,7 @@ impl CollectionV2 { txn_timestamp: chrono::NaiveDateTime, object_metadatas: &ObjectAggregatedDataMapping, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -200,7 +200,9 @@ impl CollectionV2 { write_set_change_index: i64, txn_timestamp: chrono::NaiveDateTime, table_handle_to_owner: &TableHandleToOwner, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result> { let table_item_data = table_item.data.as_ref().unwrap(); @@ -220,16 +222,27 @@ impl CollectionV2 { let mut creator_address = match maybe_creator_address { Some(ca) => ca, None => { - match Self::get_collection_creator_for_v1(conn, &table_handle) - .await - .context(format!( - "Failed to get collection creator for table handle {}, txn version {}", - table_handle, txn_version - )) { + match Self::get_collection_creator_for_v1( + conn, + &table_handle, + query_retries, + query_retry_delay_ms, + ) + .await + .context(format!( + "Failed to get collection creator for table handle {}, txn version {}", + table_handle, txn_version + )) { Ok(ca) => ca, Err(_) => { // Try our best by getting from the older collection data - match CollectionData::get_collection_creator(conn, &table_handle).await + match CollectionData::get_collection_creator( + conn, + &table_handle, + query_retries, + query_retry_delay_ms, + ) + .await { Ok(creator) => creator, Err(_) => { @@ -296,17 +309,21 @@ impl CollectionV2 { /// cannot change, we can just look in the current_collection_datas table. /// Retrying a few times since this collection could've been written in a separate thread. async fn get_collection_creator_for_v1( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, table_handle: &str, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; + let mut tried = 0; + while tried < query_retries { + tried += 1; match Self::get_by_table_handle(conn, table_handle).await { Ok(creator) => return Ok(creator), Err(_) => { - tokio::time::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)) - .await; + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } }, } } @@ -315,7 +332,7 @@ impl CollectionV2 { /// TODO: Change this to a KV store async fn get_by_table_handle( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, table_handle: &str, ) -> anyhow::Result { let mut res: Vec> = sql_query( diff --git a/rust/processor/src/models/token_v2_models/v2_token_activities.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_activities.rs similarity index 74% rename from rust/processor/src/models/token_v2_models/v2_token_activities.rs rename to rust/processor/src/db/common/models/token_v2_models/v2_token_activities.rs index b311bc0c2..83ee38388 100644 --- a/rust/processor/src/models/token_v2_models/v2_token_activities.rs +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_activities.rs @@ -5,18 +5,14 @@ #![allow(clippy::extra_unused_lifetimes)] #![allow(clippy::unused_unit)] -use super::{ - v2_token_datas::TokenDataV2, - v2_token_utils::{TokenStandard, V2TokenEvent}, -}; +use super::v2_token_utils::{TokenStandard, V2TokenEvent}; use crate::{ - models::{ - fungible_asset_models::v2_fungible_asset_utils::FungibleAssetEvent, + db::common::models::{ object_models::v2_object_utils::ObjectAggregatedDataMapping, token_models::token_utils::{TokenDataIdType, TokenEvent}, }, schema::token_activities_v2, - utils::{database::PgPoolConnection, util::standardize_address}, + utils::util::standardize_address, }; use aptos_protos::transaction::v1::Event; use bigdecimal::{BigDecimal, One, Zero}; @@ -64,88 +60,6 @@ struct TokenActivityHelperV2 { } impl TokenActivityV2 { - /// We'll go from 0x1::fungible_asset::withdraw/deposit events. - /// We're guaranteed to find a 0x1::fungible_asset::FungibleStore which has a pointer to the - /// fungible asset metadata which could be a token. We'll either find that token in token_v2_metadata - /// or by looking up the postgres table. - /// TODO: Create artificial events for mint and burn. There are no mint and burn events so we'll have to - /// add all the deposits/withdrawals and if it's positive/negative it's a mint/burn. - pub async fn get_ft_v2_from_parsed_event( - event: &Event, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - event_index: i64, - entry_function_id_str: &Option, - object_metadatas: &ObjectAggregatedDataMapping, - conn: &mut PgPoolConnection<'_>, - ) -> anyhow::Result> { - let event_type = event.type_str.clone(); - if let Some(fa_event) = - &FungibleAssetEvent::from_event(event_type.as_str(), &event.data, txn_version)? - { - let event_account_address = - standardize_address(&event.key.as_ref().unwrap().account_address); - - // The event account address will also help us find fungible store which tells us where to find - // the metadata - if let Some(object_data) = object_metadatas.get(&event_account_address) { - let object_core = &object_data.object.object_core; - let fungible_asset = object_data.fungible_asset_store.as_ref().unwrap(); - let token_data_id = fungible_asset.metadata.get_reference_address(); - // Exit early if it's not a token - if !TokenDataV2::is_address_fungible_token( - conn, - &token_data_id, - object_metadatas, - txn_version, - ) - .await - { - return Ok(None); - } - - let token_activity_helper = match fa_event { - FungibleAssetEvent::WithdrawEvent(inner) => TokenActivityHelperV2 { - from_address: Some(object_core.get_owner_address()), - to_address: None, - token_amount: inner.amount.clone(), - before_value: None, - after_value: None, - event_type: event_type.clone(), - }, - FungibleAssetEvent::DepositEvent(inner) => TokenActivityHelperV2 { - from_address: None, - to_address: Some(object_core.get_owner_address()), - token_amount: inner.amount.clone(), - before_value: None, - after_value: None, - event_type: event_type.clone(), - }, - _ => return Ok(None), - }; - - return Ok(Some(Self { - transaction_version: txn_version, - event_index, - event_account_address, - token_data_id: token_data_id.clone(), - property_version_v1: BigDecimal::zero(), - type_: token_activity_helper.event_type, - from_address: token_activity_helper.from_address, - to_address: token_activity_helper.to_address, - token_amount: token_activity_helper.token_amount, - before_value: token_activity_helper.before_value, - after_value: token_activity_helper.after_value, - entry_function_id_str: entry_function_id_str.clone(), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(true), - transaction_timestamp: txn_timestamp, - })); - } - } - Ok(None) - } - pub async fn get_nft_v2_from_parsed_event( event: &Event, txn_version: i64, @@ -236,7 +150,7 @@ impl TokenActivityV2 { after_value: token_activity_helper.after_value, entry_function_id_str: entry_function_id_str.clone(), token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(false), + is_fungible_v2: None, transaction_timestamp: txn_timestamp, })); } else { @@ -244,7 +158,7 @@ impl TokenActivityV2 { // the new burn event has owner address now! let owner_address = if let V2TokenEvent::Burn(inner) = token_event { - Some(inner.get_previous_owner_address()) + inner.get_previous_owner_address() } else { // To handle a case with the old burn events, when a token is minted and burnt in the same transaction None @@ -264,7 +178,7 @@ impl TokenActivityV2 { after_value: None, entry_function_id_str: entry_function_id_str.clone(), token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(false), + is_fungible_v2: None, transaction_timestamp: txn_timestamp, })); } diff --git a/rust/processor/src/models/token_v2_models/v2_token_datas.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_datas.rs similarity index 57% rename from rust/processor/src/models/token_v2_models/v2_token_datas.rs rename to rust/processor/src/db/common/models/token_v2_models/v2_token_datas.rs index b1c497ddc..3be211eb0 100644 --- a/rust/processor/src/models/token_v2_models/v2_token_datas.rs +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_datas.rs @@ -5,22 +5,18 @@ #![allow(clippy::extra_unused_lifetimes)] #![allow(clippy::unused_unit)] -use super::v2_token_utils::{TokenStandard, TokenV2}; +use super::v2_token_utils::{TokenStandard, TokenV2, TokenV2Burned}; use crate::{ - models::{ + db::common::models::{ object_models::v2_object_utils::ObjectAggregatedDataMapping, - token_models::{ - collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, - token_utils::TokenWriteSet, - }, + token_models::token_utils::TokenWriteSet, }, schema::{current_token_datas_v2, token_datas_v2}, - utils::{database::PgPoolConnection, util::standardize_address}, + utils::util::standardize_address, }; -use aptos_protos::transaction::v1::{WriteResource, WriteTableItem}; -use bigdecimal::{BigDecimal, Zero}; +use aptos_protos::transaction::v1::{DeleteResource, WriteResource, WriteTableItem}; +use bigdecimal::BigDecimal; use diesel::prelude::*; -use diesel_async::RunQueryDsl; use field_count::FieldCount; use serde::{Deserialize, Serialize}; @@ -37,7 +33,7 @@ pub struct TokenDataV2 { pub collection_id: String, pub token_name: String, pub maximum: Option, - pub supply: BigDecimal, + pub supply: Option, pub largest_property_version_v1: Option, pub token_uri: String, pub token_properties: serde_json::Value, @@ -45,7 +41,10 @@ pub struct TokenDataV2 { pub token_standard: String, pub is_fungible_v2: Option, pub transaction_timestamp: chrono::NaiveDateTime, - pub decimals: i64, + // Deprecated, but still here for backwards compatibility + pub decimals: Option, + // Here for consistency but we don't need to actually fill it + // pub is_deleted_v2: Option, } #[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] @@ -56,7 +55,7 @@ pub struct CurrentTokenDataV2 { pub collection_id: String, pub token_name: String, pub maximum: Option, - pub supply: BigDecimal, + pub supply: Option, pub largest_property_version_v1: Option, pub token_uri: String, pub token_properties: serde_json::Value, @@ -65,28 +64,9 @@ pub struct CurrentTokenDataV2 { pub is_fungible_v2: Option, pub last_transaction_version: i64, pub last_transaction_timestamp: chrono::NaiveDateTime, - pub decimals: i64, -} - -#[derive(Debug, Deserialize, Identifiable, Queryable, Serialize)] -#[diesel(primary_key(token_data_id))] -#[diesel(table_name = current_token_datas_v2)] -pub struct CurrentTokenDataV2Query { - pub token_data_id: String, - pub collection_id: String, - pub token_name: String, - pub maximum: Option, - pub supply: BigDecimal, - pub largest_property_version_v1: Option, - pub token_uri: String, - pub description: String, - pub token_properties: serde_json::Value, - pub token_standard: String, - pub is_fungible_v2: Option, - pub last_transaction_version: i64, - pub last_transaction_timestamp: chrono::NaiveDateTime, - pub inserted_at: chrono::NaiveDateTime, - pub decimals: i64, + // Deprecated, but still here for backwards compatibility + pub decimals: Option, + pub is_deleted_v2: Option, } impl TokenDataV2 { @@ -102,21 +82,15 @@ impl TokenDataV2 { if let Some(inner) = &TokenV2::from_write_resource(write_resource, txn_version)? { let token_data_id = standardize_address(&write_resource.address.to_string()); let mut token_name = inner.get_name_trunc(); - // Get maximum, supply, and is fungible from fungible asset if this is a fungible token - let (mut maximum, mut supply, mut decimals, mut is_fungible_v2) = - (None, BigDecimal::zero(), 0, Some(false)); + let is_fungible_v2; // Get token properties from 0x4::property_map::PropertyMap let mut token_properties = serde_json::Value::Null; if let Some(object_metadata) = object_metadatas.get(&token_data_id) { let fungible_asset_metadata = object_metadata.fungible_asset_metadata.as_ref(); - let fungible_asset_supply = object_metadata.fungible_asset_supply.as_ref(); - if let Some(metadata) = fungible_asset_metadata { - if let Some(fa_supply) = fungible_asset_supply { - maximum = fa_supply.get_maximum(); - supply = fa_supply.current.clone(); - decimals = metadata.decimals as i64; - is_fungible_v2 = Some(true); - } + if fungible_asset_metadata.is_some() { + is_fungible_v2 = Some(true); + } else { + is_fungible_v2 = Some(false); } token_properties = object_metadata .property_map @@ -142,8 +116,8 @@ impl TokenDataV2 { token_data_id: token_data_id.clone(), collection_id: collection_id.clone(), token_name: token_name.clone(), - maximum: maximum.clone(), - supply: supply.clone(), + maximum: None, + supply: None, largest_property_version_v1: None, token_uri: token_uri.clone(), token_properties: token_properties.clone(), @@ -151,14 +125,14 @@ impl TokenDataV2 { token_standard: TokenStandard::V2.to_string(), is_fungible_v2, transaction_timestamp: txn_timestamp, - decimals, + decimals: None, }, CurrentTokenDataV2 { token_data_id, collection_id, token_name, - maximum, - supply, + maximum: None, + supply: None, largest_property_version_v1: None, token_uri, token_properties, @@ -167,7 +141,8 @@ impl TokenDataV2 { is_fungible_v2, last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, - decimals, + decimals: None, + is_deleted_v2: Some(false), }, ))) } else { @@ -175,6 +150,70 @@ impl TokenDataV2 { } } + /// This handles the case where token is burned but objectCore is still there + pub async fn get_burned_nft_v2_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + tokens_burned: &TokenV2Burned, + ) -> anyhow::Result> { + let token_data_id = standardize_address(&write_resource.address.to_string()); + // reminder that v1 events won't get to this codepath + if let Some(burn_event_v2) = tokens_burned.get(&standardize_address(&token_data_id)) { + Ok(Some(CurrentTokenDataV2 { + token_data_id, + collection_id: burn_event_v2.get_collection_address(), + token_name: "".to_string(), + maximum: None, + supply: None, + largest_property_version_v1: None, + token_uri: "".to_string(), + token_properties: serde_json::Value::Null, + description: "".to_string(), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + decimals: None, + is_deleted_v2: Some(true), + })) + } else { + Ok(None) + } + } + + /// This handles the case where token is burned and objectCore is deleted + pub async fn get_burned_nft_v2_from_delete_resource( + delete_resource: &DeleteResource, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + tokens_burned: &TokenV2Burned, + ) -> anyhow::Result> { + let token_data_id = standardize_address(&delete_resource.address.to_string()); + // reminder that v1 events won't get to this codepath + if let Some(burn_event_v2) = tokens_burned.get(&standardize_address(&token_data_id)) { + Ok(Some(CurrentTokenDataV2 { + token_data_id, + collection_id: burn_event_v2.get_collection_address(), + token_name: "".to_string(), + maximum: None, + supply: None, + largest_property_version_v1: None, + token_uri: "".to_string(), + token_properties: serde_json::Value::Null, + description: "".to_string(), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + decimals: None, + is_deleted_v2: Some(true), + })) + } else { + Ok(None) + } + } + pub fn get_v1_from_write_table_item( table_item: &WriteTableItem, txn_version: i64, @@ -215,7 +254,7 @@ impl TokenDataV2 { collection_id: collection_id.clone(), token_name: token_name.clone(), maximum: Some(token_data.maximum.clone()), - supply: token_data.supply.clone(), + supply: Some(token_data.supply.clone()), largest_property_version_v1: Some( token_data.largest_property_version.clone(), ), @@ -225,14 +264,14 @@ impl TokenDataV2 { token_standard: TokenStandard::V1.to_string(), is_fungible_v2: None, transaction_timestamp: txn_timestamp, - decimals: 0, + decimals: None, }, CurrentTokenDataV2 { token_data_id, collection_id, token_name, maximum: Some(token_data.maximum), - supply: token_data.supply, + supply: Some(token_data.supply), largest_property_version_v1: Some(token_data.largest_property_version), token_uri, token_properties: token_data.default_properties, @@ -241,7 +280,8 @@ impl TokenDataV2 { is_fungible_v2: None, last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, - decimals: 0, + decimals: None, + is_deleted_v2: None, }, ))); } else { @@ -255,98 +295,4 @@ impl TokenDataV2 { } Ok(None) } - - /// A fungible asset can also be a token. We will make a best effort guess at whether this is a fungible token. - /// 1. If metadata is present with a token object, then is a token - /// 2. If metadata is not present, we will do a lookup in the db. - pub async fn is_address_fungible_token( - conn: &mut PgPoolConnection<'_>, - token_data_id: &str, - object_aggregated_data_mapping: &ObjectAggregatedDataMapping, - txn_version: i64, - ) -> bool { - // 1. If metadata is present, the object is a token iff token struct is also present in the object - if let Some(object_data) = object_aggregated_data_mapping.get(token_data_id) { - if object_data.fungible_asset_metadata.is_some() { - return object_data.token.is_some(); - } - } - // 2. If metadata is not present, we will do a lookup in the db. - match CurrentTokenDataV2::get_current_token_data_v2(conn, txn_version, token_data_id).await - { - Ok(token_data) => { - if let Some(is_fungible_v2) = token_data.is_fungible_v2 { - return is_fungible_v2; - } - // If is_fungible_v2 is null, that's likely because it's a v1 token, which are not fungible - false - }, - Err(_) => { - tracing::error!( - transaction_version = txn_version, - lookup_key = token_data_id, - "Missing current_token_data_v2 for token_data_id: {}. You probably should backfill db.", - token_data_id, - ); - // Default - false - }, - } - } -} - -impl CurrentTokenDataV2 { - pub async fn get_current_token_data_v2( - conn: &mut PgPoolConnection<'_>, - txn_version: i64, - token_data_id: &str, - ) -> anyhow::Result { - let mut retries = 0; - while retries < QUERY_RETRIES { - retries += 1; - match CurrentTokenDataV2Query::get_by_token_data_id(conn, token_data_id).await { - Ok(res) => { - return Ok(CurrentTokenDataV2 { - token_data_id: res.token_data_id, - collection_id: res.collection_id, - token_name: res.token_name, - maximum: res.maximum, - supply: res.supply, - largest_property_version_v1: res.largest_property_version_v1, - token_uri: res.token_uri, - token_properties: res.token_properties, - description: res.description, - token_standard: res.token_standard, - is_fungible_v2: res.is_fungible_v2, - last_transaction_version: res.last_transaction_version, - last_transaction_timestamp: res.last_transaction_timestamp, - decimals: res.decimals, - }); - }, - Err(_) => { - tracing::error!( - transaction_version = txn_version, - lookup_key = token_data_id, - "Missing current_token_data_v2 for token_data_id: {}. You probably should backfill db.", - token_data_id, - ); - tokio::time::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)) - .await; - }, - } - } - Err(anyhow::anyhow!("Failed to get token data")) - } -} - -impl CurrentTokenDataV2Query { - pub async fn get_by_token_data_id( - conn: &mut PgPoolConnection<'_>, - token_data_id: &str, - ) -> diesel::QueryResult { - current_token_datas_v2::table - .filter(current_token_datas_v2::token_data_id.eq(token_data_id)) - .first::(conn) - .await - } } diff --git a/rust/processor/src/models/token_v2_models/v2_token_metadata.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_metadata.rs similarity index 99% rename from rust/processor/src/models/token_v2_models/v2_token_metadata.rs rename to rust/processor/src/db/common/models/token_v2_models/v2_token_metadata.rs index 3a2599b28..d059a3da9 100644 --- a/rust/processor/src/models/token_v2_models/v2_token_metadata.rs +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_metadata.rs @@ -7,7 +7,7 @@ use super::v2_token_utils::TOKEN_V2_ADDR; use crate::{ - models::{ + db::common::models::{ coin_models::coin_utils::COIN_ADDR, default_models::move_resources::MoveResource, object_models::v2_object_utils::ObjectAggregatedDataMapping, diff --git a/rust/processor/src/models/token_v2_models/v2_token_ownerships.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_ownerships.rs similarity index 82% rename from rust/processor/src/models/token_v2_models/v2_token_ownerships.rs rename to rust/processor/src/db/common/models/token_v2_models/v2_token_ownerships.rs index e7059ac0c..9dbc94533 100644 --- a/rust/processor/src/models/token_v2_models/v2_token_ownerships.rs +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_ownerships.rs @@ -10,20 +10,14 @@ use super::{ v2_token_utils::{TokenStandard, TokenV2Burned}, }; use crate::{ - models::{ - default_models::move_resources::MoveResource, - fungible_asset_models::v2_fungible_asset_utils::V2FungibleAssetResource, + db::common::models::{ object_models::v2_object_utils::{ObjectAggregatedDataMapping, ObjectWithMetadata}, - token_models::{ - collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, - token_utils::TokenWriteSet, - tokens::TableHandleToOwner, - }, + token_models::{token_utils::TokenWriteSet, tokens::TableHandleToOwner}, token_v2_models::v2_token_utils::DEFAULT_OWNER_ADDRESS, }, schema::{current_token_ownerships_v2, token_ownerships_v2}, utils::{ - database::PgPoolConnection, + database::DbPoolConnection, util::{ensure_not_negative, standardize_address}, }, }; @@ -119,10 +113,6 @@ impl TokenOwnershipV2 { Vec, AHashMap, )> { - // We should be indexing v1 token or v2 fungible token here - if token_data.is_fungible_v2 != Some(false) { - return Ok((vec![], AHashMap::new())); - } let mut ownerships = vec![]; let mut current_ownerships = AHashMap::new(); @@ -133,7 +123,16 @@ impl TokenOwnershipV2 { let token_data_id = token_data.token_data_id.clone(); let owner_address = object_core.get_owner_address(); let storage_id = token_data_id.clone(); - let is_soulbound = !object_core.allow_ungated_transfer; + + // is_soulbound currently means if an object is completely untransferrable + // OR if only admin can transfer. Only the former is true soulbound but + // people might already be using it with the latter meaning so let's include both. + let is_soulbound = if object_data.untransferable.as_ref().is_some() { + true + } else { + !object_core.allow_ungated_transfer + }; + let non_transferrable_by_owner = !object_core.allow_ungated_transfer; ownerships.push(Self { transaction_version: token_data.transaction_version, @@ -147,9 +146,9 @@ impl TokenOwnershipV2 { token_properties_mutated_v1: None, is_soulbound_v2: Some(is_soulbound), token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: token_data.is_fungible_v2, + is_fungible_v2: None, transaction_timestamp: token_data.transaction_timestamp, - non_transferrable_by_owner: Some(is_soulbound), + non_transferrable_by_owner: Some(non_transferrable_by_owner), }); current_ownerships.insert( ( @@ -168,10 +167,10 @@ impl TokenOwnershipV2 { token_properties_mutated_v1: None, is_soulbound_v2: Some(is_soulbound), token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: token_data.is_fungible_v2, + is_fungible_v2: None, last_transaction_version: token_data.transaction_version, last_transaction_timestamp: token_data.transaction_timestamp, - non_transferrable_by_owner: Some(is_soulbound), + non_transferrable_by_owner: Some(non_transferrable_by_owner), }, ); @@ -196,7 +195,7 @@ impl TokenOwnershipV2 { token_properties_mutated_v1: None, is_soulbound_v2: Some(is_soulbound), token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: token_data.is_fungible_v2, + is_fungible_v2: None, transaction_timestamp: token_data.transaction_timestamp, non_transferrable_by_owner: Some(is_soulbound), }); @@ -219,7 +218,7 @@ impl TokenOwnershipV2 { token_properties_mutated_v1: None, is_soulbound_v2: Some(is_soulbound), token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: token_data.is_fungible_v2, + is_fungible_v2: None, last_transaction_version: token_data.transaction_version, last_transaction_timestamp: token_data.transaction_timestamp, non_transferrable_by_owner: Some(is_soulbound), @@ -230,12 +229,17 @@ impl TokenOwnershipV2 { } /// This handles the case where token is burned but objectCore is still there - pub fn get_burned_nft_v2_from_write_resource( + pub async fn get_burned_nft_v2_from_write_resource( write_resource: &WriteResource, txn_version: i64, write_set_change_index: i64, txn_timestamp: chrono::NaiveDateTime, + prior_nft_ownership: &AHashMap, tokens_burned: &TokenV2Burned, + object_metadatas: &ObjectAggregatedDataMapping, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result> { let token_data_id = standardize_address(&write_resource.address.to_string()); if tokens_burned @@ -248,7 +252,20 @@ impl TokenOwnershipV2 { let object_core = &object.object_core; let owner_address = object_core.get_owner_address(); let storage_id = token_data_id.clone(); - let is_soulbound = !object_core.allow_ungated_transfer; + + // is_soulbound currently means if an object is completely untransferrable + // OR if only admin can transfer. Only the former is true soulbound but + // people might already be using it with the latter meaning so let's include both. + let is_soulbound = if object_metadatas + .get(&token_data_id) + .map(|obj| obj.untransferable.as_ref()) + .is_some() + { + true + } else { + !object_core.allow_ungated_transfer + }; + let non_transferrable_by_owner = !object_core.allow_ungated_transfer; return Ok(Some(( Self { @@ -265,7 +282,7 @@ impl TokenOwnershipV2 { token_standard: TokenStandard::V2.to_string(), is_fungible_v2: Some(false), transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: Some(is_soulbound), + non_transferrable_by_owner: Some(non_transferrable_by_owner), }, CurrentTokenOwnershipV2 { token_data_id, @@ -280,9 +297,22 @@ impl TokenOwnershipV2 { is_fungible_v2: Some(false), last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: Some(is_soulbound), + non_transferrable_by_owner: Some(non_transferrable_by_owner), }, ))); + } else { + return Self::get_burned_nft_v2_helper( + &token_data_id, + txn_version, + write_set_change_index, + txn_timestamp, + prior_nft_ownership, + tokens_burned, + conn, + query_retries, + query_retry_delay_ms, + ) + .await; } } Ok(None) @@ -290,28 +320,60 @@ impl TokenOwnershipV2 { /// This handles the case where token is burned and objectCore is deleted pub async fn get_burned_nft_v2_from_delete_resource( - write_resource: &DeleteResource, + delete_resource: &DeleteResource, txn_version: i64, write_set_change_index: i64, txn_timestamp: chrono::NaiveDateTime, prior_nft_ownership: &AHashMap, tokens_burned: &TokenV2Burned, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result> { - let token_address = standardize_address(&write_resource.address.to_string()); + let token_address = standardize_address(&delete_resource.address.to_string()); + Self::get_burned_nft_v2_helper( + &token_address, + txn_version, + write_set_change_index, + txn_timestamp, + prior_nft_ownership, + tokens_burned, + conn, + query_retries, + query_retry_delay_ms, + ) + .await + } + + async fn get_burned_nft_v2_helper( + token_address: &str, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + prior_nft_ownership: &AHashMap, + tokens_burned: &TokenV2Burned, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + let token_address = standardize_address(token_address); if let Some(burn_event) = tokens_burned.get(&token_address) { // 1. Try to lookup token address in burn event mapping - let previous_owner = if let Some(burn_event) = burn_event { + let previous_owner = if let Some(previous_owner) = burn_event.get_previous_owner_address() + { + previous_owner } else { // 2. If it doesn't exist in burn event mapping, then it must be an old burn event that doesn't contain previous_owner. - // Do a lookup to get previous owner. This is necessary because preivous owner is part of current token ownerships primary key. + // Do a lookup to get previous owner. This is necessary because previous owner is part of current token ownerships primary key. match prior_nft_ownership.get(&token_address) { Some(inner) => inner.owner_address.clone(), None => { match CurrentTokenOwnershipV2Query::get_latest_owned_nft_by_token_data_id( conn, &token_address, + query_retries, + query_retry_delay_ms, ) .await { @@ -369,90 +431,6 @@ impl TokenOwnershipV2 { Ok(None) } - // Getting this from 0x1::fungible_asset::FungibleStore - pub async fn get_ft_v2_from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - object_metadatas: &ObjectAggregatedDataMapping, - conn: &mut PgPoolConnection<'_>, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2FungibleAssetResource::FungibleAssetStore(inner) = - V2FungibleAssetResource::from_resource( - &type_str, - resource.data.as_ref().unwrap(), - txn_version, - )? - { - if let Some(object_data) = object_metadatas.get(&resource.address) { - let object_core = &object_data.object.object_core; - let token_data_id = inner.metadata.get_reference_address(); - // Exit early if it's not a token - if !TokenDataV2::is_address_fungible_token( - conn, - &token_data_id, - object_metadatas, - txn_version, - ) - .await - { - return Ok(None); - } - let storage_id = resource.address.clone(); - let is_soulbound = inner.frozen; - let amount = inner.balance; - let owner_address = object_core.get_owner_address(); - - return Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - token_data_id: token_data_id.clone(), - property_version_v1: BigDecimal::zero(), - owner_address: Some(owner_address.clone()), - storage_id: storage_id.clone(), - amount: amount.clone(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(true), - transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }, - CurrentTokenOwnershipV2 { - token_data_id, - property_version_v1: BigDecimal::zero(), - owner_address, - storage_id, - amount, - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(true), - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }, - ))); - } - } - Ok(None) - } - /// We want to track tokens in any offer/claims and tokenstore pub fn get_v1_from_write_table_item( table_item: &WriteTableItem, @@ -614,12 +592,14 @@ impl TokenOwnershipV2 { impl CurrentTokenOwnershipV2Query { pub async fn get_latest_owned_nft_by_token_data_id( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, token_data_id: &str, + query_retries: u32, + query_retry_delay_ms: u64, ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; + let mut tried = 0; + while tried < query_retries { + tried += 1; match Self::get_latest_owned_nft_by_token_data_id_impl(conn, token_data_id).await { Ok(inner) => { return Ok(NFTOwnershipV2 { @@ -629,8 +609,10 @@ impl CurrentTokenOwnershipV2Query { }); }, Err(_) => { - tokio::time::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)) - .await; + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } }, } } @@ -641,7 +623,7 @@ impl CurrentTokenOwnershipV2Query { } async fn get_latest_owned_nft_by_token_data_id_impl( - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, token_data_id: &str, ) -> diesel::QueryResult { current_token_ownerships_v2::table diff --git a/rust/processor/src/models/token_v2_models/v2_token_utils.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_utils.rs similarity index 90% rename from rust/processor/src/models/token_v2_models/v2_token_utils.rs rename to rust/processor/src/db/common/models/token_v2_models/v2_token_utils.rs index e39be436a..714d852f2 100644 --- a/rust/processor/src/models/token_v2_models/v2_token_utils.rs +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_utils.rs @@ -5,22 +5,22 @@ #![allow(clippy::extra_unused_lifetimes)] use crate::{ - models::{ + db::common::models::{ coin_models::coin_utils::COIN_ADDR, default_models::move_resources::MoveResource, - object_models::v2_object_utils::{CurrentObjectPK, ObjectCore}, + object_models::v2_object_utils::{CurrentObjectPK, ObjectCore, Untransferable}, token_models::token_utils::{NAME_LENGTH, URI_LENGTH}, }, utils::util::{ deserialize_from_string, deserialize_token_object_property_map_from_bcs_hexstring, - standardize_address, truncate_str, AggregatorSnapshotU64, AggregatorU64, - DerivedStringSnapshot, + standardize_address, truncate_str, Aggregator, AggregatorSnapshot, DerivedStringSnapshot, }, }; use ahash::{AHashMap, AHashSet}; use anyhow::{Context, Result}; use aptos_protos::transaction::v1::{Event, WriteResource}; use bigdecimal::BigDecimal; +use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use std::fmt::{self, Formatter}; @@ -29,11 +29,14 @@ pub const TOKEN_V2_ADDR: &str = pub const DEFAULT_OWNER_ADDRESS: &str = "unknown"; +lazy_static! { + pub static ref V2_STANDARD: String = TokenStandard::V2.to_string(); +} + /// Tracks all token related data in a hashmap for quick access (keyed on address of the object core) -/// Maps address to burn event (new). The event is None if it's an old burn event. -pub type TokenV2Burned = AHashMap>; +/// Maps address to burn event. If it's an old event previous_owner will be empty +pub type TokenV2Burned = AHashMap; pub type TokenV2Minted = AHashSet; -pub type TokenV2MintedPK = (CurrentObjectPK, i64); /// Tracks which token standard a token / collection is built upon #[derive(Serialize)] @@ -87,7 +90,7 @@ impl AptosCollection { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -134,7 +137,7 @@ impl TokenV2 { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -191,7 +194,7 @@ impl FixedSupply { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -225,7 +228,7 @@ impl UnlimitedSupply { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -248,8 +251,8 @@ impl UnlimitedSupply { #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ConcurrentSupply { - pub current_supply: AggregatorU64, - pub total_minted: AggregatorU64, + pub current_supply: Aggregator, + pub total_minted: Aggregator, } impl ConcurrentSupply { @@ -257,7 +260,7 @@ impl ConcurrentSupply { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -305,7 +308,7 @@ impl MintEvent { #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Mint { collection: String, - pub index: AggregatorSnapshotU64, + pub index: AggregatorSnapshot, token: String, } @@ -348,13 +351,19 @@ impl BurnEvent { #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Burn { collection: String, - #[serde(deserialize_with = "deserialize_from_string")] - index: BigDecimal, token: String, previous_owner: String, } impl Burn { + pub fn new(collection: String, token: String, previous_owner: String) -> Self { + Burn { + collection, + token, + previous_owner, + } + } + pub fn from_event(event: &Event, txn_version: i64) -> anyhow::Result> { if let Some(V2TokenEvent::Burn(inner)) = V2TokenEvent::from_event(event.type_str.as_str(), &event.data, txn_version).unwrap() @@ -369,8 +378,16 @@ impl Burn { standardize_address(&self.token) } - pub fn get_previous_owner_address(&self) -> String { - standardize_address(&self.previous_owner) + pub fn get_previous_owner_address(&self) -> Option { + if self.previous_owner.is_empty() { + None + } else { + Some(standardize_address(&self.previous_owner)) + } + } + + pub fn get_collection_address(&self) -> String { + standardize_address(&self.collection) } } @@ -417,7 +434,7 @@ impl PropertyMapModel { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -448,7 +465,7 @@ impl TokenIdentifiers { write_resource: &WriteResource, txn_version: i64, ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); if !V2TokenResource::is_resource_supported(type_str.as_str()) { return Ok(None); } @@ -481,6 +498,7 @@ pub enum V2TokenResource { FixedSupply(FixedSupply), ObjectCore(ObjectCore), UnlimitedSupply(UnlimitedSupply), + Untransferable(Untransferable), TokenV2(TokenV2), PropertyMapModel(PropertyMapModel), TokenIdentifiers(TokenIdentifiers), @@ -490,6 +508,7 @@ impl V2TokenResource { pub fn is_resource_supported(data_type: &str) -> bool { [ format!("{}::object::ObjectCore", COIN_ADDR), + format!("{}::object::Untransferable", COIN_ADDR), format!("{}::collection::Collection", TOKEN_V2_ADDR), format!("{}::collection::ConcurrentSupply", TOKEN_V2_ADDR), format!("{}::collection::FixedSupply", TOKEN_V2_ADDR), @@ -511,6 +530,9 @@ impl V2TokenResource { x if x == format!("{}::object::ObjectCore", COIN_ADDR) => { serde_json::from_value(data.clone()).map(|inner| Some(Self::ObjectCore(inner))) }, + x if x == format!("{}::object::Untransferable", COIN_ADDR) => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::Untransferable(inner))) + }, x if x == format!("{}::collection::Collection", TOKEN_V2_ADDR) => { serde_json::from_value(data.clone()).map(|inner| Some(Self::Collection(inner))) }, diff --git a/rust/processor/src/models/transaction_metadata_model/event_size_info.rs b/rust/processor/src/db/common/models/transaction_metadata_model/event_size_info.rs similarity index 100% rename from rust/processor/src/models/transaction_metadata_model/event_size_info.rs rename to rust/processor/src/db/common/models/transaction_metadata_model/event_size_info.rs diff --git a/rust/processor/src/models/transaction_metadata_model/mod.rs b/rust/processor/src/db/common/models/transaction_metadata_model/mod.rs similarity index 100% rename from rust/processor/src/models/transaction_metadata_model/mod.rs rename to rust/processor/src/db/common/models/transaction_metadata_model/mod.rs diff --git a/rust/processor/src/models/transaction_metadata_model/transaction_size_info.rs b/rust/processor/src/db/common/models/transaction_metadata_model/transaction_size_info.rs similarity index 100% rename from rust/processor/src/models/transaction_metadata_model/transaction_size_info.rs rename to rust/processor/src/db/common/models/transaction_metadata_model/transaction_size_info.rs diff --git a/rust/processor/src/models/transaction_metadata_model/write_set_size_info.rs b/rust/processor/src/db/common/models/transaction_metadata_model/write_set_size_info.rs similarity index 100% rename from rust/processor/src/models/transaction_metadata_model/write_set_size_info.rs rename to rust/processor/src/db/common/models/transaction_metadata_model/write_set_size_info.rs diff --git a/rust/processor/src/models/user_transactions_models/mod.rs b/rust/processor/src/db/common/models/user_transactions_models/mod.rs similarity index 100% rename from rust/processor/src/models/user_transactions_models/mod.rs rename to rust/processor/src/db/common/models/user_transactions_models/mod.rs diff --git a/rust/processor/src/models/user_transactions_models/signatures.rs b/rust/processor/src/db/common/models/user_transactions_models/signatures.rs similarity index 82% rename from rust/processor/src/models/user_transactions_models/signatures.rs rename to rust/processor/src/db/common/models/user_transactions_models/signatures.rs index fe2e3a5f2..0ff2ea1c1 100644 --- a/rust/processor/src/models/user_transactions_models/signatures.rs +++ b/rust/processor/src/db/common/models/user_transactions_models/signatures.rs @@ -9,10 +9,11 @@ use crate::{ }; use anyhow::{Context, Result}; use aptos_protos::transaction::v1::{ - account_signature::Signature as AccountSignatureEnum, any_signature::SignatureVariant, - signature::Signature as SignatureEnum, AccountSignature as ProtoAccountSignature, - Ed25519Signature as Ed25519SignaturePB, FeePayerSignature as ProtoFeePayerSignature, - MultiAgentSignature as ProtoMultiAgentSignature, + account_signature::Signature as AccountSignatureEnum, + any_signature::{SignatureVariant, Type as AnySignatureTypeEnumPb}, + signature::Signature as SignatureEnum, + AccountSignature as ProtoAccountSignature, Ed25519Signature as Ed25519SignaturePB, + FeePayerSignature as ProtoFeePayerSignature, MultiAgentSignature as ProtoMultiAgentSignature, MultiEd25519Signature as MultiEd25519SignaturePb, MultiKeySignature as MultiKeySignaturePb, Signature as TransactionSignaturePb, SingleKeySignature as SingleKeySignaturePb, SingleSender as SingleSenderPb, @@ -326,6 +327,7 @@ impl Signature { } } + #[allow(deprecated)] fn parse_single_key_signature( s: &SingleKeySignaturePb, sender: &String, @@ -338,9 +340,33 @@ impl Signature { let signer = standardize_address(override_address.unwrap_or(sender)); let signature = s.signature.as_ref().unwrap(); let signature_bytes = - Self::get_any_signature_bytes(&signature.signature_variant, transaction_version); - let type_ = - Self::get_any_signature_type(&signature.signature_variant, true, transaction_version); + Self::get_any_signature_bytes(&signature.signature_variant, transaction_version) + // old way of getting signature bytes prior to node 1.10 + .unwrap_or(signature.signature.clone()); + let type_ = if let Some(t) = + Self::get_any_signature_type(&signature.signature_variant, true, transaction_version) + { + t + } else { + // old way of getting signature type prior to node 1.10 + match AnySignatureTypeEnumPb::try_from(signature.r#type) { + Ok(AnySignatureTypeEnumPb::Ed25519) => String::from("single_key_ed25519_signature"), + Ok(AnySignatureTypeEnumPb::Secp256k1Ecdsa) => { + String::from("single_key_secp256k1_ecdsa_signature") + }, + wildcard => { + tracing::warn!( + transaction_version = transaction_version, + "Unspecified signature type or un-recognized type is not supported: {:?}", + wildcard + ); + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["unspecified_signature_type"]) + .inc(); + "".to_string() + }, + } + }; Self { transaction_version, transaction_block_height, @@ -359,6 +385,7 @@ impl Signature { } } + #[allow(deprecated)] fn parse_multi_key_signature( s: &MultiKeySignaturePb, sender: &String, @@ -385,12 +412,39 @@ impl Signature { let signature_bytes = Self::get_any_signature_bytes( &signature.signature.as_ref().unwrap().signature_variant, transaction_version, - ); - let type_ = Self::get_any_signature_type( + ) + // old way of getting signature bytes prior to node 1.10 + .unwrap_or(signature.signature.as_ref().unwrap().signature.clone()); + + let type_ = if let Some(t) = Self::get_any_signature_type( &signature.signature.as_ref().unwrap().signature_variant, false, transaction_version, - ); + ) { + t + } else { + // old way of getting signature type prior to node 1.10 + match AnySignatureTypeEnumPb::try_from(signature.signature.as_ref().unwrap().r#type) + { + Ok(AnySignatureTypeEnumPb::Ed25519) => { + String::from("multi_key_ed25519_signature") + }, + Ok(AnySignatureTypeEnumPb::Secp256k1Ecdsa) => { + String::from("multi_key_secp256k1_ecdsa_signature") + }, + wildcard => { + tracing::warn!( + transaction_version = transaction_version, + "Unspecified signature type or un-recognized type is not supported: {:?}", + wildcard + ); + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["unspecified_signature_type"]) + .inc(); + "unknown".to_string() + }, + } + }; signatures.push(Self { transaction_version, transaction_block_height, @@ -418,12 +472,12 @@ impl Signature { fn get_any_signature_bytes( signature_variant: &Option, transaction_version: i64, - ) -> Vec { + ) -> Option> { match signature_variant { - Some(SignatureVariant::Ed25519(sig)) => sig.signature.clone(), - Some(SignatureVariant::Keyless(sig)) => sig.signature.clone(), - Some(SignatureVariant::Webauthn(sig)) => sig.signature.clone(), - Some(SignatureVariant::Secp256k1Ecdsa(sig)) => sig.signature.clone(), + Some(SignatureVariant::Ed25519(sig)) => Some(sig.signature.clone()), + Some(SignatureVariant::Keyless(sig)) => Some(sig.signature.clone()), + Some(SignatureVariant::Webauthn(sig)) => Some(sig.signature.clone()), + Some(SignatureVariant::Secp256k1Ecdsa(sig)) => Some(sig.signature.clone()), None => { PROCESSOR_UNKNOWN_TYPE_COUNT .with_label_values(&["SignatureVariant"]) @@ -432,7 +486,7 @@ impl Signature { transaction_version = transaction_version, "Signature variant doesn't exist", ); - 0u8.to_be_bytes().to_vec() + None }, } } @@ -441,18 +495,18 @@ impl Signature { signature_variant: &Option, is_single_sender: bool, transaction_version: i64, - ) -> String { + ) -> Option { let prefix = if is_single_sender { "single_sender" } else { "multi_key" }; match signature_variant { - Some(SignatureVariant::Ed25519(_)) => format!("{}_ed25519_signature", prefix), - Some(SignatureVariant::Keyless(_)) => format!("{}_keyless_signature", prefix), - Some(SignatureVariant::Webauthn(_)) => format!("{}_webauthn_signature", prefix), + Some(SignatureVariant::Ed25519(_)) => Some(format!("{}_ed25519_signature", prefix)), + Some(SignatureVariant::Keyless(_)) => Some(format!("{}_keyless_signature", prefix)), + Some(SignatureVariant::Webauthn(_)) => Some(format!("{}_webauthn_signature", prefix)), Some(SignatureVariant::Secp256k1Ecdsa(_)) => { - format!("{}_secp256k1_ecdsa_signature", prefix) + Some(format!("{}_secp256k1_ecdsa_signature", prefix)) }, None => { PROCESSOR_UNKNOWN_TYPE_COUNT @@ -462,7 +516,7 @@ impl Signature { transaction_version = transaction_version, "Signature variant doesn't exist", ); - "unknown".to_string() + None }, } } diff --git a/rust/processor/src/models/user_transactions_models/user_transactions.rs b/rust/processor/src/db/common/models/user_transactions_models/user_transactions.rs similarity index 100% rename from rust/processor/src/models/user_transactions_models/user_transactions.rs rename to rust/processor/src/db/common/models/user_transactions_models/user_transactions.rs diff --git a/rust/processor/src/db/mod.rs b/rust/processor/src/db/mod.rs new file mode 100644 index 000000000..34994bf5a --- /dev/null +++ b/rust/processor/src/db/mod.rs @@ -0,0 +1 @@ +pub mod common; diff --git a/rust/processor/src/db/postgres/diesel.toml b/rust/processor/src/db/postgres/diesel.toml new file mode 100644 index 000000000..9a59970bb --- /dev/null +++ b/rust/processor/src/db/postgres/diesel.toml @@ -0,0 +1,8 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "schema.rs" + +[migrations_directory] +dir = "migrations" diff --git a/rust/processor/migrations/.keep b/rust/processor/src/db/postgres/migrations/.keep similarity index 100% rename from rust/processor/migrations/.keep rename to rust/processor/src/db/postgres/migrations/.keep diff --git a/rust/processor/migrations/00000000000000_diesel_initial_setup/down.sql b/rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/down.sql similarity index 100% rename from rust/processor/migrations/00000000000000_diesel_initial_setup/down.sql rename to rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/down.sql diff --git a/rust/processor/migrations/00000000000000_diesel_initial_setup/up.sql b/rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/up.sql similarity index 100% rename from rust/processor/migrations/00000000000000_diesel_initial_setup/up.sql rename to rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/up.sql diff --git a/rust/processor/migrations/2022-08-08-043603_core_tables/down.sql b/rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/down.sql similarity index 100% rename from rust/processor/migrations/2022-08-08-043603_core_tables/down.sql rename to rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/down.sql diff --git a/rust/processor/migrations/2022-08-08-043603_core_tables/up.sql b/rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/up.sql similarity index 99% rename from rust/processor/migrations/2022-08-08-043603_core_tables/up.sql rename to rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/up.sql index 9bcdd9c79..7bc2c6caa 100644 --- a/rust/processor/migrations/2022-08-08-043603_core_tables/up.sql +++ b/rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/up.sql @@ -302,4 +302,4 @@ CREATE TABLE table_metadatas ( inserted_at TIMESTAMP NOT NULL DEFAULT NOW() ); CREATE INDEX tm_insat_index ON table_metadatas (inserted_at); -CREATE TABLE ledger_infos (chain_id BIGINT UNIQUE PRIMARY KEY NOT NULL); \ No newline at end of file +CREATE TABLE ledger_infos (chain_id BIGINT UNIQUE PRIMARY KEY NOT NULL); diff --git a/rust/processor/migrations/2022-09-04-194128_add_token_data/down.sql b/rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/down.sql similarity index 100% rename from rust/processor/migrations/2022-09-04-194128_add_token_data/down.sql rename to rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/down.sql diff --git a/rust/processor/migrations/2022-09-04-194128_add_token_data/up.sql b/rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/up.sql similarity index 100% rename from rust/processor/migrations/2022-09-04-194128_add_token_data/up.sql rename to rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/up.sql diff --git a/rust/processor/migrations/2022-09-20-055651_add_current_token_data/down.sql b/rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/down.sql similarity index 100% rename from rust/processor/migrations/2022-09-20-055651_add_current_token_data/down.sql rename to rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/down.sql diff --git a/rust/processor/migrations/2022-09-20-055651_add_current_token_data/up.sql b/rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/up.sql similarity index 100% rename from rust/processor/migrations/2022-09-20-055651_add_current_token_data/up.sql rename to rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/up.sql diff --git a/rust/processor/migrations/2022-09-22-185845_token_offers/down.sql b/rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/down.sql similarity index 100% rename from rust/processor/migrations/2022-09-22-185845_token_offers/down.sql rename to rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/down.sql diff --git a/rust/processor/migrations/2022-09-22-185845_token_offers/up.sql b/rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/up.sql similarity index 100% rename from rust/processor/migrations/2022-09-22-185845_token_offers/up.sql rename to rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/up.sql diff --git a/rust/processor/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql similarity index 100% rename from rust/processor/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql rename to rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql diff --git a/rust/processor/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql similarity index 100% rename from rust/processor/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql rename to rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql diff --git a/rust/processor/migrations/2022-10-02-011020_ans_lookup_table/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/down.sql similarity index 100% rename from rust/processor/migrations/2022-10-02-011020_ans_lookup_table/down.sql rename to rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/down.sql diff --git a/rust/processor/migrations/2022-10-02-011020_ans_lookup_table/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/up.sql similarity index 100% rename from rust/processor/migrations/2022-10-02-011020_ans_lookup_table/up.sql rename to rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/up.sql diff --git a/rust/processor/migrations/2022-10-04-073529_add_coin_tables/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/down.sql similarity index 100% rename from rust/processor/migrations/2022-10-04-073529_add_coin_tables/down.sql rename to rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/down.sql diff --git a/rust/processor/migrations/2022-10-04-073529_add_coin_tables/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/up.sql similarity index 100% rename from rust/processor/migrations/2022-10-04-073529_add_coin_tables/up.sql rename to rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/up.sql diff --git a/rust/processor/migrations/2022-10-06-193846_add_indexer_status/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/down.sql similarity index 100% rename from rust/processor/migrations/2022-10-06-193846_add_indexer_status/down.sql rename to rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/down.sql diff --git a/rust/processor/migrations/2022-10-06-193846_add_indexer_status/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/up.sql similarity index 100% rename from rust/processor/migrations/2022-10-06-193846_add_indexer_status/up.sql rename to rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/up.sql diff --git a/rust/processor/migrations/2022-10-07-231825_add_coin_supply/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/down.sql similarity index 100% rename from rust/processor/migrations/2022-10-07-231825_add_coin_supply/down.sql rename to rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/down.sql diff --git a/rust/processor/migrations/2022-10-07-231825_add_coin_supply/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/up.sql similarity index 100% rename from rust/processor/migrations/2022-10-07-231825_add_coin_supply/up.sql rename to rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/up.sql diff --git a/rust/processor/migrations/2022-10-15-185912_improve_processor_recovery/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/down.sql similarity index 100% rename from rust/processor/migrations/2022-10-15-185912_improve_processor_recovery/down.sql rename to rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/down.sql diff --git a/rust/processor/migrations/2022-10-15-185912_improve_processor_recovery/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/up.sql similarity index 100% rename from rust/processor/migrations/2022-10-15-185912_improve_processor_recovery/up.sql rename to rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/up.sql diff --git a/rust/processor/migrations/2022-10-21-055518_stake_to_voter/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/down.sql similarity index 100% rename from rust/processor/migrations/2022-10-21-055518_stake_to_voter/down.sql rename to rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/down.sql diff --git a/rust/processor/migrations/2022-10-21-055518_stake_to_voter/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/up.sql similarity index 100% rename from rust/processor/migrations/2022-10-21-055518_stake_to_voter/up.sql rename to rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/up.sql diff --git a/rust/processor/migrations/2022-10-30-053525_add_vote_data/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/down.sql similarity index 100% rename from rust/processor/migrations/2022-10-30-053525_add_vote_data/down.sql rename to rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/down.sql diff --git a/rust/processor/migrations/2022-10-30-053525_add_vote_data/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/up.sql similarity index 100% rename from rust/processor/migrations/2022-10-30-053525_add_vote_data/up.sql rename to rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/up.sql diff --git a/rust/processor/migrations/2022-12-29-222902_curr_table_items/down.sql b/rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/down.sql similarity index 100% rename from rust/processor/migrations/2022-12-29-222902_curr_table_items/down.sql rename to rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/down.sql diff --git a/rust/processor/migrations/2022-12-29-222902_curr_table_items/up.sql b/rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/up.sql similarity index 100% rename from rust/processor/migrations/2022-12-29-222902_curr_table_items/up.sql rename to rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/up.sql diff --git a/rust/processor/migrations/2023-02-15-070116_stake_delegation/down.sql b/rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/down.sql similarity index 100% rename from rust/processor/migrations/2023-02-15-070116_stake_delegation/down.sql rename to rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/down.sql diff --git a/rust/processor/migrations/2023-02-15-070116_stake_delegation/up.sql b/rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/up.sql similarity index 100% rename from rust/processor/migrations/2023-02-15-070116_stake_delegation/up.sql rename to rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/up.sql diff --git a/rust/processor/migrations/2023-03-08-205402_nft_points/down.sql b/rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/down.sql similarity index 100% rename from rust/processor/migrations/2023-03-08-205402_nft_points/down.sql rename to rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/down.sql diff --git a/rust/processor/migrations/2023-03-08-205402_nft_points/up.sql b/rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/up.sql similarity index 100% rename from rust/processor/migrations/2023-03-08-205402_nft_points/up.sql rename to rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/up.sql diff --git a/rust/processor/migrations/2023-04-02-032121_delegator_pools/down.sql b/rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/down.sql similarity index 100% rename from rust/processor/migrations/2023-04-02-032121_delegator_pools/down.sql rename to rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/down.sql diff --git a/rust/processor/migrations/2023-04-02-032121_delegator_pools/up.sql b/rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/up.sql similarity index 100% rename from rust/processor/migrations/2023-04-02-032121_delegator_pools/up.sql rename to rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/up.sql diff --git a/rust/processor/migrations/2023-04-14-033932_optimize_queries/down.sql b/rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/down.sql similarity index 100% rename from rust/processor/migrations/2023-04-14-033932_optimize_queries/down.sql rename to rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/down.sql diff --git a/rust/processor/migrations/2023-04-14-033932_optimize_queries/up.sql b/rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/up.sql similarity index 100% rename from rust/processor/migrations/2023-04-14-033932_optimize_queries/up.sql rename to rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/up.sql diff --git a/rust/processor/migrations/2023-04-27-233343_delegation_pool_balances/down.sql b/rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/down.sql similarity index 100% rename from rust/processor/migrations/2023-04-27-233343_delegation_pool_balances/down.sql rename to rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/down.sql diff --git a/rust/processor/migrations/2023-04-27-233343_delegation_pool_balances/up.sql b/rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/up.sql similarity index 100% rename from rust/processor/migrations/2023-04-27-233343_delegation_pool_balances/up.sql rename to rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/up.sql diff --git a/rust/processor/migrations/2023-04-28-053048_object_token_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/down.sql similarity index 100% rename from rust/processor/migrations/2023-04-28-053048_object_token_v2/down.sql rename to rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/down.sql diff --git a/rust/processor/migrations/2023-04-28-053048_object_token_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/up.sql similarity index 100% rename from rust/processor/migrations/2023-04-28-053048_object_token_v2/up.sql rename to rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/up.sql diff --git a/rust/processor/migrations/2023-05-17-010107_activities_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/down.sql similarity index 100% rename from rust/processor/migrations/2023-05-17-010107_activities_v2/down.sql rename to rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/down.sql diff --git a/rust/processor/migrations/2023-05-17-010107_activities_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/up.sql similarity index 100% rename from rust/processor/migrations/2023-05-17-010107_activities_v2/up.sql rename to rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/up.sql diff --git a/rust/processor/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql b/rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql similarity index 100% rename from rust/processor/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql rename to rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql diff --git a/rust/processor/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql b/rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql similarity index 100% rename from rust/processor/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql rename to rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql diff --git a/rust/processor/migrations/2023-05-24-052435_token_properties_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/down.sql similarity index 100% rename from rust/processor/migrations/2023-05-24-052435_token_properties_v2/down.sql rename to rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/down.sql diff --git a/rust/processor/migrations/2023-05-24-052435_token_properties_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/up.sql similarity index 100% rename from rust/processor/migrations/2023-05-24-052435_token_properties_v2/up.sql rename to rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/up.sql diff --git a/rust/processor/migrations/2023-07-06-042159_minor_optimizations/down.sql b/rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/down.sql similarity index 100% rename from rust/processor/migrations/2023-07-06-042159_minor_optimizations/down.sql rename to rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/down.sql diff --git a/rust/processor/migrations/2023-07-06-042159_minor_optimizations/up.sql b/rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/up.sql similarity index 100% rename from rust/processor/migrations/2023-07-06-042159_minor_optimizations/up.sql rename to rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/up.sql diff --git a/rust/processor/migrations/2023-07-13-060328_transactions_by_address/down.sql b/rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/down.sql similarity index 100% rename from rust/processor/migrations/2023-07-13-060328_transactions_by_address/down.sql rename to rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/down.sql diff --git a/rust/processor/migrations/2023-07-13-060328_transactions_by_address/up.sql b/rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/up.sql similarity index 100% rename from rust/processor/migrations/2023-07-13-060328_transactions_by_address/up.sql rename to rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/up.sql diff --git a/rust/processor/migrations/2023-07-28-053854_entry_function/down.sql b/rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/down.sql similarity index 100% rename from rust/processor/migrations/2023-07-28-053854_entry_function/down.sql rename to rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/down.sql diff --git a/rust/processor/migrations/2023-07-28-053854_entry_function/up.sql b/rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/up.sql similarity index 100% rename from rust/processor/migrations/2023-07-28-053854_entry_function/up.sql rename to rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/up.sql diff --git a/rust/processor/migrations/2023-08-01-042050_fungible_assets/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/down.sql similarity index 100% rename from rust/processor/migrations/2023-08-01-042050_fungible_assets/down.sql rename to rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/down.sql diff --git a/rust/processor/migrations/2023-08-01-042050_fungible_assets/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/up.sql similarity index 100% rename from rust/processor/migrations/2023-08-01-042050_fungible_assets/up.sql rename to rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/up.sql diff --git a/rust/processor/migrations/2023-08-12-190707_add_ans_is_primary/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/down.sql similarity index 100% rename from rust/processor/migrations/2023-08-12-190707_add_ans_is_primary/down.sql rename to rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/down.sql diff --git a/rust/processor/migrations/2023-08-12-190707_add_ans_is_primary/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/up.sql similarity index 100% rename from rust/processor/migrations/2023-08-12-190707_add_ans_is_primary/up.sql rename to rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/up.sql diff --git a/rust/processor/migrations/2023-08-14-235438_add_current_delegated_voter_table/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/down.sql similarity index 100% rename from rust/processor/migrations/2023-08-14-235438_add_current_delegated_voter_table/down.sql rename to rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/down.sql diff --git a/rust/processor/migrations/2023-08-14-235438_add_current_delegated_voter_table/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/up.sql similarity index 100% rename from rust/processor/migrations/2023-08-14-235438_add_current_delegated_voter_table/up.sql rename to rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/up.sql diff --git a/rust/processor/migrations/2023-08-22-232603_add_ans_view/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/down.sql similarity index 100% rename from rust/processor/migrations/2023-08-22-232603_add_ans_view/down.sql rename to rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/down.sql diff --git a/rust/processor/migrations/2023-08-22-232603_add_ans_view/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/up.sql similarity index 100% rename from rust/processor/migrations/2023-08-22-232603_add_ans_view/up.sql rename to rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/up.sql diff --git a/rust/processor/migrations/2023-08-23-192343_fix_ans_view/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/down.sql similarity index 100% rename from rust/processor/migrations/2023-08-23-192343_fix_ans_view/down.sql rename to rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/down.sql diff --git a/rust/processor/migrations/2023-08-23-192343_fix_ans_view/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/up.sql similarity index 100% rename from rust/processor/migrations/2023-08-23-192343_fix_ans_view/up.sql rename to rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/up.sql diff --git a/rust/processor/migrations/2023-09-01-231248_events_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/down.sql similarity index 100% rename from rust/processor/migrations/2023-09-01-231248_events_v2/down.sql rename to rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/down.sql diff --git a/rust/processor/migrations/2023-09-01-231248_events_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/up.sql similarity index 100% rename from rust/processor/migrations/2023-09-01-231248_events_v2/up.sql rename to rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/up.sql diff --git a/rust/processor/migrations/2023-09-07-175640_storage_refund/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/down.sql similarity index 100% rename from rust/processor/migrations/2023-09-07-175640_storage_refund/down.sql rename to rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/down.sql diff --git a/rust/processor/migrations/2023-09-07-175640_storage_refund/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/up.sql similarity index 100% rename from rust/processor/migrations/2023-09-07-175640_storage_refund/up.sql rename to rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/up.sql diff --git a/rust/processor/migrations/2023-09-11-164718_ut_remove_constraint/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/down.sql similarity index 100% rename from rust/processor/migrations/2023-09-11-164718_ut_remove_constraint/down.sql rename to rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/down.sql diff --git a/rust/processor/migrations/2023-09-11-164718_ut_remove_constraint/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/up.sql similarity index 100% rename from rust/processor/migrations/2023-09-11-164718_ut_remove_constraint/up.sql rename to rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/up.sql diff --git a/rust/processor/migrations/2023-09-22-161603_add_ans_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/down.sql similarity index 100% rename from rust/processor/migrations/2023-09-22-161603_add_ans_v2/down.sql rename to rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/down.sql diff --git a/rust/processor/migrations/2023-09-22-161603_add_ans_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/up.sql similarity index 100% rename from rust/processor/migrations/2023-09-22-161603_add_ans_v2/up.sql rename to rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/up.sql diff --git a/rust/processor/migrations/2023-09-28-210956_nft_metadata/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/down.sql similarity index 100% rename from rust/processor/migrations/2023-09-28-210956_nft_metadata/down.sql rename to rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/down.sql diff --git a/rust/processor/migrations/2023-09-28-210956_nft_metadata/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/up.sql similarity index 100% rename from rust/processor/migrations/2023-09-28-210956_nft_metadata/up.sql rename to rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/up.sql diff --git a/rust/processor/migrations/2023-10-16-002253_alter_the_pubkey_column_length/down.sql b/rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/down.sql similarity index 100% rename from rust/processor/migrations/2023-10-16-002253_alter_the_pubkey_column_length/down.sql rename to rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/down.sql diff --git a/rust/processor/migrations/2023-10-16-002253_alter_the_pubkey_column_length/up.sql b/rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/up.sql similarity index 100% rename from rust/processor/migrations/2023-10-16-002253_alter_the_pubkey_column_length/up.sql rename to rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/up.sql diff --git a/rust/processor/migrations/2023-10-27-030502_event_type/down.sql b/rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/down.sql similarity index 100% rename from rust/processor/migrations/2023-10-27-030502_event_type/down.sql rename to rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/down.sql diff --git a/rust/processor/migrations/2023-10-27-030502_event_type/up.sql b/rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/up.sql similarity index 100% rename from rust/processor/migrations/2023-10-27-030502_event_type/up.sql rename to rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/up.sql diff --git a/rust/processor/migrations/2023-11-09-234724_delegator_balances/down.sql b/rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/down.sql similarity index 100% rename from rust/processor/migrations/2023-11-09-234724_delegator_balances/down.sql rename to rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/down.sql diff --git a/rust/processor/migrations/2023-11-09-234724_delegator_balances/up.sql b/rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/up.sql similarity index 100% rename from rust/processor/migrations/2023-11-09-234724_delegator_balances/up.sql rename to rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/up.sql diff --git a/rust/processor/migrations/2023-12-15-221028_payload_type/down.sql b/rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/down.sql similarity index 100% rename from rust/processor/migrations/2023-12-15-221028_payload_type/down.sql rename to rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/down.sql diff --git a/rust/processor/migrations/2023-12-15-221028_payload_type/up.sql b/rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/up.sql similarity index 100% rename from rust/processor/migrations/2023-12-15-221028_payload_type/up.sql rename to rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/up.sql diff --git a/rust/processor/migrations/2023-12-16-233224_add_objects_model/down.sql b/rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/down.sql similarity index 100% rename from rust/processor/migrations/2023-12-16-233224_add_objects_model/down.sql rename to rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/down.sql diff --git a/rust/processor/migrations/2023-12-16-233224_add_objects_model/up.sql b/rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/up.sql similarity index 100% rename from rust/processor/migrations/2023-12-16-233224_add_objects_model/up.sql rename to rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/up.sql diff --git a/rust/processor/migrations/2024-01-11-224315_update_process_status/down.sql b/rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/down.sql similarity index 100% rename from rust/processor/migrations/2024-01-11-224315_update_process_status/down.sql rename to rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/down.sql diff --git a/rust/processor/migrations/2024-01-11-224315_update_process_status/up.sql b/rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/up.sql similarity index 100% rename from rust/processor/migrations/2024-01-11-224315_update_process_status/up.sql rename to rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/up.sql diff --git a/rust/processor/migrations/2024-02-16-234847_any_signature/down.sql b/rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/down.sql similarity index 100% rename from rust/processor/migrations/2024-02-16-234847_any_signature/down.sql rename to rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/down.sql diff --git a/rust/processor/migrations/2024-02-16-234847_any_signature/up.sql b/rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/up.sql similarity index 100% rename from rust/processor/migrations/2024-02-16-234847_any_signature/up.sql rename to rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/up.sql diff --git a/rust/processor/migrations/2024-02-29-210322_transaction_metadata/down.sql b/rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/down.sql similarity index 100% rename from rust/processor/migrations/2024-02-29-210322_transaction_metadata/down.sql rename to rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/down.sql diff --git a/rust/processor/migrations/2024-02-29-210322_transaction_metadata/up.sql b/rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/up.sql similarity index 100% rename from rust/processor/migrations/2024-02-29-210322_transaction_metadata/up.sql rename to rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/up.sql diff --git a/rust/processor/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/down.sql b/rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/down.sql similarity index 100% rename from rust/processor/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/down.sql rename to rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/down.sql diff --git a/rust/processor/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/up.sql b/rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/up.sql similarity index 100% rename from rust/processor/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/up.sql rename to rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/up.sql diff --git a/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/down.sql b/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/down.sql new file mode 100644 index 000000000..29d6c0618 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/down.sql @@ -0,0 +1,40 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS current_aptos_names; +CREATE OR REPLACE VIEW current_aptos_names AS +SELECT cal.domain, + cal.subdomain, + cal.token_name, + cal.token_standard, + cal.registered_address, + cal.expiration_timestamp, + greatest( + cal.last_transaction_version, + capn.last_transaction_version + ) as last_transaction_version, + coalesce(not capn.is_deleted, false) as is_primary, + concat(cal.domain, '.apt') as domain_with_suffix, + c.owner_address as owner_address, + cal.expiration_timestamp >= CURRENT_TIMESTAMP as is_active +FROM current_ans_lookup_v2 cal + LEFT JOIN current_ans_primary_name_v2 capn ON cal.token_name = capn.token_name + AND cal.token_standard = capn.token_standard + JOIN current_token_datas_v2 b ON cal.token_name = b.token_name + AND cal.token_standard = b.token_standard + JOIN current_token_ownerships_v2 c ON b.token_data_id = c.token_data_id + AND b.token_standard = c.token_standard +WHERE cal.is_deleted IS false + AND c.amount > 0 + AND b.collection_id IN ( + '0x1c380887f0cfcc8a82c0df44b24116985a92c58e686a0ea4a441c9f423a72b47', + -- Testnet ANS v1 domain collection + '0x56654f4bf4e528bfef33094d11a3475f0638e949b0976ec831ca0d66a2efb673', + -- Testnet ANS v2 domain collection + '0x3a2c902067bb4f0e37a2a89675d5cbceb07cf1a27479229b269fb1afffa62230', + -- Testnet ANS v2 subdomain collection + '0x09e63a48047b1c2bc51c0abc4b67ffcd9922e0adc99a6cc36532662172976a4b', + -- Mainnet ANS v1 domain collection + '0x63d26a4e3a8aeececf9b878e46bad78997fb38e50936efeabb2c4453f4d7f746', + -- Mainnet ANS v2 domain collection + '0x63d26a4e3a8aeececf9b878e46bad78997fb38e50936efeabb2c4453f4d7f746' -- Mainnet ANS v2 subdomain collection + ); +ALTER TABLE current_ans_lookup_v2 DROP COLUMN IF EXISTS subdomain_expiration_policy; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/up.sql b/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/up.sql new file mode 100644 index 000000000..7933cc972 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/up.sql @@ -0,0 +1,47 @@ +ALTER TABLE current_ans_lookup_v2 +ADD COLUMN IF NOT EXISTS subdomain_expiration_policy BIGINT; +ALTER TABLE ans_lookup_v2 +ADD COLUMN IF NOT EXISTS subdomain_expiration_policy BIGINT; +CREATE OR REPLACE VIEW current_aptos_names AS +SELECT cal.domain, + cal.subdomain, + cal.token_name, + cal.token_standard, + cal.registered_address, + cal.expiration_timestamp, + greatest( + cal.last_transaction_version, + capn.last_transaction_version + ) as last_transaction_version, + coalesce(not capn.is_deleted, false) as is_primary, + concat(cal.domain, '.apt') as domain_with_suffix, + c.owner_address as owner_address, + cal.expiration_timestamp >= CURRENT_TIMESTAMP as is_active, + cal2.expiration_timestamp as domain_expiration_timestamp, + b.token_data_id as token_data_id, + cal.subdomain_expiration_policy as subdomain_expiration_policy +FROM current_ans_lookup_v2 cal + LEFT JOIN current_ans_primary_name_v2 capn ON cal.token_name = capn.token_name + AND cal.token_standard = capn.token_standard + JOIN current_token_datas_v2 b ON cal.token_name = b.token_name + AND cal.token_standard = b.token_standard + JOIN current_token_ownerships_v2 c ON b.token_data_id = c.token_data_id + AND b.token_standard = c.token_standard + LEFT JOIN current_ans_lookup_v2 cal2 ON cal.domain = cal2.domain + AND cal2.subdomain = '' + AND cal.token_standard = cal2.token_standard +WHERE cal.is_deleted IS false + AND c.amount > 0 + AND b.collection_id IN ( + '0x1c380887f0cfcc8a82c0df44b24116985a92c58e686a0ea4a441c9f423a72b47', + -- Testnet ANS v1 domain collection + '0x56654f4bf4e528bfef33094d11a3475f0638e949b0976ec831ca0d66a2efb673', + -- Testnet ANS v2 domain collection + '0x3a2c902067bb4f0e37a2a89675d5cbceb07cf1a27479229b269fb1afffa62230', + -- Testnet ANS v2 subdomain collection + '0x09e63a48047b1c2bc51c0abc4b67ffcd9922e0adc99a6cc36532662172976a4b', + -- Mainnet ANS v1 domain collection + '0x63d26a4e3a8aeececf9b878e46bad78997fb38e50936efeabb2c4453f4d7f746', + -- Mainnet ANS v2 domain collection + '0x30fbc956f0f38db2d314bd9c018d34be3e047a804a71e30a4e5d43d8b7c539eb' -- Mainnet ANS v2 subdomain collection + ); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/down.sql b/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/down.sql new file mode 100644 index 000000000..a97ee07d9 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/down.sql @@ -0,0 +1,4 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE fungible_asset_metadata +DROP COLUMN supply_v2, +DROP COLUMN maximum_v2; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/up.sql b/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/up.sql new file mode 100644 index 000000000..186d5fa15 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/up.sql @@ -0,0 +1,12 @@ +-- Your SQL goes here +ALTER TABLE fungible_asset_metadata +ADD COLUMN supply_v2 NUMERIC, +ADD COLUMN maximum_v2 NUMERIC; + +ALTER TABLE current_token_datas_v2 +ALTER COLUMN supply DROP NOT NULL, +ALTER COLUMN decimals DROP NOT NULL; + +ALTER TABLE token_datas_v2 +ALTER COLUMN supply DROP NOT NULL, +ALTER COLUMN decimals DROP NOT NULL; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/down.sql b/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/down.sql new file mode 100644 index 000000000..cd15c8946 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE IF EXISTS current_token_datas_v2 DROP COLUMN is_deleted_v2; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/up.sql b/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/up.sql new file mode 100644 index 000000000..f7fac2f87 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/up.sql @@ -0,0 +1,3 @@ +-- Your SQL goes here +ALTER TABLE current_token_datas_v2 +ADD COLUMN IF NOT EXISTS is_deleted_v2 BOOLEAN; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/down.sql b/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/down.sql new file mode 100644 index 000000000..425c1d6da --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/down.sql @@ -0,0 +1,4 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS current_unified_fungible_asset_balances; +DROP INDEX IF EXISTS cufab_owner_at_index; +DROP INDEX IF EXISTS cufab_insat_index; diff --git a/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/up.sql b/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/up.sql new file mode 100644 index 000000000..073cf8014 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/up.sql @@ -0,0 +1,21 @@ +-- current fungible asset balances +CREATE TABLE IF NOT EXISTS current_unified_fungible_asset_balances ( + storage_id VARCHAR(66) PRIMARY KEY NOT NULL, + owner_address VARCHAR(66) NOT NULL, + asset_type VARCHAR(66) NOT NULL, + coin_type VARCHAR(1000), + is_primary BOOLEAN, + is_frozen BOOLEAN NOT NULL, + amount_v1 NUMERIC, + amount_v2 NUMERIC, + amount NUMERIC GENERATED ALWAYS AS (COALESCE(amount_v1, 0) + COALESCE(amount_v2, 0)) STORED, + last_transaction_version_v1 BIGINT, + last_transaction_version_v2 BIGINT, + last_transaction_version BIGINT GENERATED ALWAYS AS (GREATEST(last_transaction_version_v1, last_transaction_version_v2)) STORED, + last_transaction_timestamp_v1 TIMESTAMP, + last_transaction_timestamp_v2 TIMESTAMP, + last_transaction_timestamp TIMESTAMP GENERATED ALWAYS AS (GREATEST(last_transaction_timestamp_v1, last_transaction_timestamp_v2)) STORED, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS cufab_owner_at_index ON current_unified_fungible_asset_balances (owner_address, asset_type); +CREATE INDEX IF NOT EXISTS cufab_insat_index ON current_unified_fungible_asset_balances (inserted_at); diff --git a/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/down.sql b/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/down.sql new file mode 100644 index 000000000..6ec036407 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE IF EXISTS token_datas_v2 DROP COLUMN is_deleted_v2; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/up.sql b/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/up.sql new file mode 100644 index 000000000..266d4375c --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/up.sql @@ -0,0 +1,3 @@ +-- Your SQL goes here +ALTER TABLE token_datas_v2 +ADD COLUMN IF NOT EXISTS is_deleted_v2 BOOLEAN; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/down.sql b/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/down.sql new file mode 100644 index 000000000..dadb0f6d6 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS current_token_royalty_v1; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/up.sql b/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/up.sql new file mode 100644 index 000000000..d7b57adc9 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/up.sql @@ -0,0 +1,11 @@ +-- Your SQL goes here +-- This'll only work with royalty v1 because royalty_v2 requires collection id +CREATE TABLE IF NOT EXISTS current_token_royalty_v1 ( + token_data_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + payee_address VARCHAR(66) NOT NULL, + royalty_points_numerator NUMERIC NOT NULL, + royalty_points_denominator NUMERIC NOT NULL, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/down.sql b/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/down.sql new file mode 100644 index 000000000..af3ba489d --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/down.sql @@ -0,0 +1,30 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS legacy_migration_v1.move_resources; +DROP VIEW IF EXISTS legacy_migration_v1.address_version_from_move_resources; +DROP VIEW IF EXISTS legacy_migration_v1.coin_activities; +DROP VIEW IF EXISTS legacy_migration_v1.coin_balances; +DROP VIEW IF EXISTS legacy_migration_v1.coin_infos; +DROP VIEW IF EXISTS legacy_migration_v1.current_coin_balances; +DROP VIEW IF EXISTS legacy_migration_v1.token_activities; +DROP VIEW IF EXISTS legacy_migration_v1.token_ownerships; +DROP VIEW IF EXISTS legacy_migration_v1.current_token_ownerships; +DROP VIEW IF EXISTS legacy_migration_v1.tokens; +DROP VIEW IF EXISTS legacy_migration_v1.token_datas; +DROP VIEW IF EXISTS legacy_migration_v1.current_token_datas; +DROP VIEW IF EXISTS legacy_migration_v1.collection_datas; +DROP VIEW IF EXISTS legacy_migration_v1.current_ans_primary_name; +DROP VIEW IF EXISTS legacy_migration_v1.current_ans_lookup; +DROP INDEX IF EXISTS lm1_cv_ci_tv_index; +DROP INDEX IF EXISTS lm1_ta_tdih_pv_index; +DROP INDEX IF EXISTS lm1_cb_tv_oa_ct_index; +DROP INDEX IF EXISTS lm1_curr_to_oa_tt_ltv_index; +DROP INDEX IF EXISTS lm1_ccb_ct_a_index; +DROP INDEX IF EXISTS lm1_tdv_tdi_tv_index; +DROP INDEX IF EXISTS lm1_curr_to_oa_tt_am_ltv_index; +DROP INDEX IF EXISTS lm1_ca_ct_a_index; +DROP INDEX IF EXISTS lm1_ca_ct_at_a_index; +DROP INDEX IF EXISTS lm1_ca_oa_ct_at_index; +DROP INDEX IF EXISTS lm1_ca_oa_igf_index; +DROP INDEX IF EXISTS lm1_ans_d_s_et_index; +DROP INDEX IF EXISTS lm1_ans_ra_et_index; +DROP SCHEMA IF EXISTS legacy_migration_v1; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/up.sql b/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/up.sql new file mode 100644 index 000000000..4c8864c1c --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/up.sql @@ -0,0 +1,285 @@ +-- Your SQL goes here +-- Create the schema +CREATE SCHEMA IF NOT EXISTS legacy_migration_v1; +-- Replace `move_resources` with account transactions +CREATE OR REPLACE VIEW legacy_migration_v1.move_resources AS +SELECT transaction_version, + account_address as address +FROM account_transactions at2; +CREATE OR REPLACE VIEW legacy_migration_v1.address_version_from_move_resources AS +SELECT transaction_version, + account_address as address +FROM account_transactions at2; +-- replace `coin_activities` with `fungible_asset_activities` +CREATE OR REPLACE VIEW legacy_migration_v1.coin_activities AS +SElECT transaction_version, + owner_address as event_account_address, + -- these two below are mildly concerning + 0 as event_creation_number, + 0 as event_sequence_number, + owner_address, + asset_type AS coin_type, + amount, + "type" AS activity_type, + is_gas_fee, + is_transaction_success, + entry_function_id_str, + block_height, + transaction_timestamp, + inserted_at, + event_index, + gas_fee_payer_address, + storage_refund_amount +FROM public.fungible_asset_activities +WHERE token_standard = 'v1'; +-- replace `coin_balances` with `fungible_asset_balances` +CREATE OR REPLACE VIEW legacy_migration_v1.coin_balances AS +SELECT transaction_version, + owner_address, + -- this is mainly for hashing the coin type for primary key + encode(sha256(asset_type::bytea), 'hex') as coin_type_hash, + asset_type as coin_type, + amount, + transaction_timestamp, + inserted_at +FROM public.fungible_asset_balances +WHERE token_standard = 'v1'; +-- replace `coin_infos` with `fungible_asset_metadata` +CREATE OR REPLACE VIEW legacy_migration_v1.coin_infos AS +SELECT encode(sha256(asset_type::bytea), 'hex') as coin_type_hash, + asset_type as coin_type, + last_transaction_version as transaction_version_created, + creator_address, + name, + symbol, + decimals, + last_transaction_timestamp as transaction_created_timestamp, + inserted_at, + supply_aggregator_table_handle_v1 as supply_aggregator_table_handle, + supply_aggregator_table_key_v1 as supply_aggregator_table_key +FROM public.fungible_asset_metadata +WHERE token_standard = 'v1'; +-- replace `current_coin_balances` with `current_fungible_asset_balances` +CREATE OR REPLACE VIEW legacy_migration_v1.current_coin_balances AS +SELECT owner_address, + encode(sha256(asset_type::bytea), 'hex') as coin_type_hash, + asset_type as coin_type, + amount, + last_transaction_version, + last_transaction_timestamp, + inserted_at +FROM public.current_fungible_asset_balances +WHERE token_standard = 'v1'; +-- replace `token_activities` with `token_activities_v2` +-- token_activities_v2.token_data_id is 0x prefixed, but token_activities.token_data_id is not. We need to create an index on the substring +CREATE OR REPLACE VIEW legacy_migration_v1.token_activities AS +SELECT tav.transaction_version, + event_account_address, + -- These were only used for hashing pk in v1 table + 0 as event_creation_number, + 0 as event_sequence_number, + tdv.collection_id as collection_data_id_hash, + ltrim(tav.token_data_id, '0x') as token_data_id_hash, + property_version_v1 AS property_version, + cv.creator_address, + cv.collection_name, + tdv.token_name AS "name", + "type" AS transfer_type, + from_address, + to_address, + token_amount, + -- These are not columns in v2 + NULL AS coin_type, + NULL AS coin_amount, + tav.inserted_at, + tav.transaction_timestamp, + event_index +FROM public.token_activities_v2 tav + JOIN token_datas_v2 tdv ON tav.token_data_id = tdv.token_data_id + AND tav.transaction_version = tdv.transaction_version + JOIN collections_v2 cv ON tdv.collection_id = cv.collection_id + AND tdv.transaction_version = cv.transaction_version +WHERE tav.token_standard = 'v1'; +-- replace `token_ownerships` with `token_ownerships_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.token_ownerships AS +SELECT tov.token_data_id AS token_data_id_hash, + property_version_v1 AS property_version, + tov.transaction_version, + -- this is a bit concerning + '' AS table_handle, + creator_address, + collection_name, + tdv.token_name AS name, + owner_address, + amount, + table_type_v1 AS table_type, + tov.inserted_at, + tdv.collection_id AS collection_data_id_hash, + tov.transaction_timestamp +FROM public.token_ownerships_v2 tov + JOIN public.token_datas_v2 tdv ON tov.token_data_id = tdv.token_data_id + AND tov.transaction_version = tdv.transaction_version + JOIN public.collections_v2 cv ON tdv.collection_id = cv.collection_id + AND tdv.transaction_version = cv.transaction_version +WHERE tov.token_standard = 'v1'; +-- replace `current_token_ownerships` with `current_token_ownerships_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.current_token_ownerships AS +SELECT ctov.token_data_id AS token_data_id_hash, + ctov.property_version_v1 AS property_version, + ctov.owner_address, + ccv.creator_address, + ccv.collection_name, + ctdv.token_name AS "name", + ctov.amount, + ctov.token_properties_mutated_v1 AS token_properties, + ctov.last_transaction_version, + ctov.inserted_at, + ctdv.collection_id AS collection_data_id_hash, + ctov.table_type_v1 AS table_type, + ctov.last_transaction_timestamp +FROM current_token_ownerships_v2 ctov + JOIN current_token_datas_v2 ctdv ON ctov.token_data_id = ctdv.token_data_id + JOIN current_collections_v2 ccv ON ctdv.collection_id = ccv.collection_id +WHERE ctov.token_standard = 'v1'; +-- replace `tokens` with `current_token_datas_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.tokens AS +SELECT tdv.token_data_id AS token_data_id_hash, + tdv.largest_property_version_v1 AS property_version, + tdv.transaction_version, + ccv.creator_address, + ccv.collection_name, + tdv.token_name AS "name", + tdv.token_properties, + tdv.inserted_at, + tdv.collection_id AS collection_data_id_hash, + tdv.transaction_timestamp +FROM token_datas_v2 tdv + JOIN current_collections_v2 ccv ON tdv.collection_id = ccv.collection_id +WHERE tdv.token_standard = 'v1'; +-- replace `token_datas` with `token_datas_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.token_datas AS +SELECT token_data_id AS token_data_id_hash, + tdv.transaction_version, + creator_address, + collection_name, + token_name AS "name", + maximum, + supply, + largest_property_version_v1 AS largest_property_version, + token_uri AS metadata_uri, + -- Null b/c we're not tracking royalty on transaction level + '' as payee_address, + null as royalty_points_numerator, + null as royalty_points_denominator, + -- Validated this is fine, since most are true anyway + TRUE AS maximum_mutable, + TRUE AS uri_mutable, + TRUE AS description_mutable, + TRUE AS properties_mutable, + TRUE AS royalty_mutable, + token_properties AS default_properties, + tdv.inserted_at, + tdv.collection_id AS collection_data_id_hash, + tdv.transaction_timestamp, + tdv.description +FROM token_datas_v2 tdv + JOIN collections_v2 cv ON tdv.collection_id = cv.collection_id + AND tdv.transaction_version = cv.transaction_version +WHERE tdv.token_standard = 'v1'; +-- replace `current_token_datas` with `current_token_datas_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.current_token_datas AS +SELECT ctdv.token_data_id AS token_data_id_hash, + creator_address, + collection_name, + token_name AS "name", + COALESCE(maximum, 0) AS maximum, + COALESCE(supply, 0) AS supply, + largest_property_version_v1 AS largest_property_version, + token_uri AS metadata_uri, + COALESCE(payee_address, '') as payee_address, + royalty_points_numerator, + royalty_points_denominator, + -- Validated this is fine, since most are true anyway + TRUE AS maximum_mutable, + TRUE AS uri_mutable, + TRUE AS description_mutable, + TRUE AS properties_mutable, + TRUE AS royalty_mutable, + token_properties AS default_properties, + ctdv.last_transaction_version, + ctdv.inserted_at, + ctdv.collection_id AS collection_data_id_hash, + ctdv.last_transaction_timestamp, + ctdv."description" AS "description" +FROM current_token_datas_v2 ctdv + JOIN current_collections_v2 ccv ON ctdv.collection_id = ccv.collection_id + LEFT JOIN current_token_royalty_v1 ctrv on ctdv.token_data_id = ctrv.token_data_id +WHERE ctdv.token_standard = 'v1'; +-- replace `collection_datas` with `collection_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.collection_datas AS +SELECT collection_id AS collection_data_id_hash, + transaction_version, + creator_address, + collection_name, + description, + uri AS metadata_uri, + current_supply AS supply, + max_supply AS maximum, + -- Validated this is fine, since most are true anyway + TRUE AS maximum_mutable, + TRUE AS uri_mutable, + TRUE AS description_mutable, + inserted_at, + table_handle_v1 AS table_handle, + transaction_timestamp +FROM collections_v2 +WHERE token_standard = 'v1'; +-- replace `current_ans_primary_name` with `current_ans_primary_name_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.current_ans_primary_name AS +SELECT registered_address, + domain, + subdomain, + token_name, + is_deleted, + last_transaction_version, + 0 AS last_transaction_timestamp +FROM current_ans_primary_name_v2 +WHERE token_standard = 'v1'; +-- replace `current_ans_lookup` with `current_ans_lookup_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.current_ans_lookup AS +SELECT domain, + subdomain, + registered_address, + expiration_timestamp, + last_transaction_version, + inserted_at, + token_name, + is_deleted +FROM current_ans_lookup_v2 +WHERE token_standard = 'v1'; +----- +----- +----- +-- If you would like to run these indices, please do it outside of diesel migration since it will be blocking processing +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ca_ct_a_index ON public.fungible_asset_activities USING btree (asset_type, amount); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ca_ct_at_a_index ON public.fungible_asset_activities USING btree (asset_type, "type", amount); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ca_oa_ct_at_index ON public.fungible_asset_activities USING btree (owner_address, asset_type, "type", amount); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ca_oa_igf_index ON public.fungible_asset_activities USING btree (owner_address, is_gas_fee); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_cb_tv_oa_ct_index ON public.fungible_asset_balances USING btree (transaction_version, owner_address, asset_type); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ccb_ct_a_index ON public.current_fungible_asset_balances USING btree (asset_type, amount); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_tdv_tdi_tv_index ON public.token_datas_v2 USING btree (token_data_id, transaction_version); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_cv_ci_tv_index ON public.collections_v2 USING btree (collection_id, transaction_version); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ta_tdih_pv_index ON public.token_activities_v2 USING btree (token_data_id, property_version_v1); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ans_d_s_et_index ON public.current_ans_lookup_v2 USING btree (domain, subdomain, expiration_timestamp); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ans_ra_et_index ON public.current_ans_lookup_v2 USING btree (registered_address, expiration_timestamp); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_curr_to_oa_tt_am_ltv_index ON current_token_ownerships_v2 USING btree ( +-- owner_address, +-- table_type_v1, +-- amount, +-- last_transaction_version DESC +-- ); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_curr_to_oa_tt_ltv_index ON current_token_ownerships_v2 USING btree ( +-- owner_address, +-- table_type_v1, +-- last_transaction_version DESC +-- ); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/down.sql b/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/down.sql new file mode 100644 index 000000000..ed4fe2e46 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE public.current_objects DROP COLUMN IF EXISTS untransferrable; +ALTER TABLE public.objects DROP COLUMN IF EXISTS untransferrable; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/up.sql b/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/up.sql new file mode 100644 index 000000000..77d13bd34 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/up.sql @@ -0,0 +1,5 @@ +-- Your SQL goes here +ALTER TABLE public.current_objects +ADD COLUMN IF NOT EXISTS untransferrable BOOLEAN NOT NULL DEFAULT FALSE; +ALTER TABLE public.objects +ADD COLUMN IF NOT EXISTS untransferrable BOOLEAN NOT NULL DEFAULT FALSE; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/down.sql b/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/down.sql new file mode 100644 index 000000000..671e83e4d --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/down.sql @@ -0,0 +1,13 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE current_unified_fungible_asset_balances_to_be_renamed + RENAME TO current_unified_fungible_asset_balances; +DROP INDEX IF EXISTS cufab_owner_at_index; +ALTER TABLE current_unified_fungible_asset_balances DROP COLUMN asset_type; +ALTER TABLE current_unified_fungible_asset_balances + RENAME COLUMN asset_type_v2 TO asset_type; +ALTER TABLE current_unified_fungible_asset_balances + RENAME COLUMN asset_type_v1 TO coin_type; +ALTER TABLE current_unified_fungible_asset_balances +ALTER COLUMN asset_type +SET NOT NULL; +CREATE INDEX IF NOT EXISTS cufab_owner_at_index ON current_unified_fungible_asset_balances (owner_address, asset_type); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/up.sql b/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/up.sql new file mode 100644 index 000000000..6aea4a3cc --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/up.sql @@ -0,0 +1,15 @@ +-- Your SQL goes here +-- Rename asset_type and coin_type to v1 and v2, and make a generated asset_type to be v2 if exists, else v1. +DROP INDEX IF EXISTS cufab_owner_at_index; +ALTER TABLE current_unified_fungible_asset_balances +ALTER COLUMN asset_type DROP NOT NULL; +ALTER TABLE current_unified_fungible_asset_balances + RENAME COLUMN asset_type TO asset_type_v2; +ALTER TABLE current_unified_fungible_asset_balances + RENAME COLUMN coin_type TO asset_type_v1; +ALTER TABLE current_unified_fungible_asset_balances +ADD COLUMN asset_type VARCHAR(1000) GENERATED ALWAYS AS (COALESCE(asset_type_v2, asset_type_v1)) STORED; +CREATE INDEX IF NOT EXISTS cufab_owner_at_index ON current_unified_fungible_asset_balances (owner_address, asset_type); +-- Rename table to set expectation that we'll rename this table to current_fungible_asset_balances after testing +ALTER TABLE current_unified_fungible_asset_balances + RENAME TO current_unified_fungible_asset_balances_to_be_renamed; \ No newline at end of file diff --git a/rust/processor/src/schema.rs b/rust/processor/src/db/postgres/schema.rs similarity index 94% rename from rust/processor/src/schema.rs rename to rust/processor/src/db/postgres/schema.rs index 5fb567a42..5e23ad755 100644 --- a/rust/processor/src/schema.rs +++ b/rust/processor/src/db/postgres/schema.rs @@ -44,6 +44,7 @@ diesel::table! { token_name -> Varchar, is_deleted -> Bool, inserted_at -> Timestamp, + subdomain_expiration_policy -> Nullable, } } @@ -263,6 +264,7 @@ diesel::table! { last_transaction_version -> Int8, is_deleted -> Bool, inserted_at -> Timestamp, + subdomain_expiration_policy -> Nullable, } } @@ -449,6 +451,7 @@ diesel::table! { last_transaction_version -> Int8, is_deleted -> Bool, inserted_at -> Timestamp, + untransferrable -> Bool, } } @@ -523,7 +526,7 @@ diesel::table! { #[max_length = 128] token_name -> Varchar, maximum -> Nullable, - supply -> Numeric, + supply -> Nullable, largest_property_version_v1 -> Nullable, #[max_length = 512] token_uri -> Varchar, @@ -535,7 +538,8 @@ diesel::table! { last_transaction_version -> Int8, last_transaction_timestamp -> Timestamp, inserted_at -> Timestamp, - decimals -> Int8, + decimals -> Nullable, + is_deleted_v2 -> Nullable, } } @@ -617,6 +621,20 @@ diesel::table! { } } +diesel::table! { + current_token_royalty_v1 (token_data_id) { + #[max_length = 66] + token_data_id -> Varchar, + #[max_length = 66] + payee_address -> Varchar, + royalty_points_numerator -> Numeric, + royalty_points_denominator -> Numeric, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + diesel::table! { current_token_v2_metadata (object_address, resource_type) { #[max_length = 66] @@ -631,6 +649,33 @@ diesel::table! { } } +diesel::table! { + current_unified_fungible_asset_balances_to_be_renamed (storage_id) { + #[max_length = 66] + storage_id -> Varchar, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 66] + asset_type_v2 -> Nullable, + #[max_length = 1000] + asset_type_v1 -> Nullable, + is_primary -> Nullable, + is_frozen -> Bool, + amount_v1 -> Nullable, + amount_v2 -> Nullable, + amount -> Nullable, + last_transaction_version_v1 -> Nullable, + last_transaction_version_v2 -> Nullable, + last_transaction_version -> Nullable, + last_transaction_timestamp_v1 -> Nullable, + last_transaction_timestamp_v2 -> Nullable, + last_transaction_timestamp -> Nullable, + inserted_at -> Timestamp, + #[max_length = 1000] + asset_type -> Nullable, + } +} + diesel::table! { delegated_staking_activities (transaction_version, event_index) { transaction_version -> Int8, @@ -790,6 +835,8 @@ diesel::table! { token_standard -> Varchar, inserted_at -> Timestamp, is_token_v2 -> Nullable, + supply_v2 -> Nullable, + maximum_v2 -> Nullable, } } @@ -872,6 +919,7 @@ diesel::table! { allow_ungated_transfer -> Bool, is_deleted -> Bool, inserted_at -> Timestamp, + untransferrable -> Bool, } } @@ -1060,7 +1108,7 @@ diesel::table! { #[max_length = 128] token_name -> Varchar, maximum -> Nullable, - supply -> Numeric, + supply -> Nullable, largest_property_version_v1 -> Nullable, #[max_length = 512] token_uri -> Varchar, @@ -1071,7 +1119,8 @@ diesel::table! { is_fungible_v2 -> Nullable, transaction_timestamp -> Timestamp, inserted_at -> Timestamp, - decimals -> Int8, + decimals -> Nullable, + is_deleted_v2 -> Nullable, } } @@ -1259,7 +1308,9 @@ diesel::allow_tables_to_appear_in_same_query!( current_token_ownerships, current_token_ownerships_v2, current_token_pending_claims, + current_token_royalty_v1, current_token_v2_metadata, + current_unified_fungible_asset_balances_to_be_renamed, delegated_staking_activities, delegated_staking_pool_balances, delegated_staking_pools, diff --git a/rust/processor/src/gap_detector.rs b/rust/processor/src/gap_detector.rs deleted file mode 100644 index 96f84b3cc..000000000 --- a/rust/processor/src/gap_detector.rs +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - processors::{ProcessingResult, Processor, ProcessorTrait}, - utils::counters::PROCESSOR_DATA_GAP_COUNT, - worker::PROCESSOR_SERVICE_TYPE, -}; -use ahash::AHashMap; -use kanal::AsyncReceiver; -use tracing::{error, info}; - -// Size of a gap (in txn version) before gap detected -pub const DEFAULT_GAP_DETECTION_BATCH_SIZE: u64 = 500; -// Number of seconds between each processor status update -const UPDATE_PROCESSOR_STATUS_SECS: u64 = 1; - -pub struct GapDetector { - next_version_to_process: u64, - seen_versions: AHashMap, - last_success_batch: Option, -} - -pub struct GapDetectorResult { - pub next_version_to_process: u64, - pub num_gaps: u64, - pub last_success_batch: Option, -} - -impl GapDetector { - pub fn new(starting_version: u64) -> Self { - Self { - next_version_to_process: starting_version, - seen_versions: AHashMap::new(), - last_success_batch: None, - } - } - - pub fn process_versions( - &mut self, - result: ProcessingResult, - ) -> anyhow::Result { - // Check for gaps - if self.next_version_to_process != result.start_version { - self.seen_versions.insert(result.start_version, result); - tracing::debug!("Gap detected"); - } else { - // If no gap is detected, find the latest processed batch without gaps - self.update_prev_batch(result); - tracing::debug!("No gap detected"); - } - - Ok(GapDetectorResult { - next_version_to_process: self.next_version_to_process, - num_gaps: self.seen_versions.len() as u64, - last_success_batch: self.last_success_batch.clone(), - }) - } - - fn update_prev_batch(&mut self, result: ProcessingResult) { - let mut new_prev_batch = result; - while let Some(next_version) = self.seen_versions.remove(&(new_prev_batch.end_version + 1)) - { - new_prev_batch = next_version; - } - self.next_version_to_process = new_prev_batch.end_version + 1; - self.last_success_batch = Some(new_prev_batch); - } -} - -pub async fn create_gap_detector_status_tracker_loop( - gap_detector_receiver: AsyncReceiver, - processor: Processor, - starting_version: u64, - gap_detection_batch_size: u64, -) { - let processor_name = processor.name(); - info!( - processor_name = processor_name, - service_type = PROCESSOR_SERVICE_TYPE, - "[Parser] Starting gap detector task", - ); - - let mut gap_detector = GapDetector::new(starting_version); - let mut last_update_time = std::time::Instant::now(); - - loop { - let result = match gap_detector_receiver.recv().await { - Ok(result) => result, - Err(e) => { - info!( - processor_name, - service_type = PROCESSOR_SERVICE_TYPE, - error = ?e, - "[Parser] Gap detector channel has been closed", - ); - return; - }, - }; - - match gap_detector.process_versions(result) { - Ok(res) => { - PROCESSOR_DATA_GAP_COUNT - .with_label_values(&[processor_name]) - .set(res.num_gaps as i64); - if res.num_gaps >= gap_detection_batch_size { - tracing::debug!( - processor_name, - gap_start_version = res.next_version_to_process, - num_gaps = res.num_gaps, - "[Parser] Processed {gap_detection_batch_size} batches with a gap", - ); - // We don't panic as everything downstream will panic if it doesn't work/receive - } - - if let Some(res_last_success_batch) = res.last_success_batch { - if last_update_time.elapsed().as_secs() >= UPDATE_PROCESSOR_STATUS_SECS { - processor - .update_last_processed_version( - res_last_success_batch.end_version, - res_last_success_batch.last_transaction_timestamp.clone(), - ) - .await - .unwrap(); - last_update_time = std::time::Instant::now(); - } - } - }, - Err(e) => { - error!( - processor_name, - service_type = PROCESSOR_SERVICE_TYPE, - error = ?e, - "[Parser] Gap detector task has panicked" - ); - panic!("[Parser] Gap detector task has panicked: {:?}", e); - }, - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[tokio::test] - async fn detect_gap_test() { - let starting_version = 0; - let mut gap_detector = GapDetector::new(starting_version); - - // Processing batches with gaps - for i in 0..DEFAULT_GAP_DETECTION_BATCH_SIZE { - let result = ProcessingResult { - start_version: 100 + i * 100, - end_version: 199 + i * 100, - last_transaction_timestamp: None, - processing_duration_in_secs: 0.0, - db_insertion_duration_in_secs: 0.0, - }; - let gap_detector_result = gap_detector.process_versions(result).unwrap(); - assert_eq!(gap_detector_result.num_gaps, i + 1); - assert_eq!(gap_detector_result.next_version_to_process, 0); - assert_eq!(gap_detector_result.last_success_batch, None); - } - - // Process a batch without a gap - let gap_detector_result = gap_detector - .process_versions(ProcessingResult { - start_version: 0, - end_version: 99, - last_transaction_timestamp: None, - processing_duration_in_secs: 0.0, - db_insertion_duration_in_secs: 0.0, - }) - .unwrap(); - assert_eq!(gap_detector_result.num_gaps, 0); - assert_eq!( - gap_detector_result.next_version_to_process, - 100 + (DEFAULT_GAP_DETECTION_BATCH_SIZE) * 100 - ); - assert_eq!( - gap_detector_result - .last_success_batch - .clone() - .unwrap() - .start_version, - 100 + (DEFAULT_GAP_DETECTION_BATCH_SIZE - 1) * 100 - ); - assert_eq!( - gap_detector_result.last_success_batch.unwrap().end_version, - 199 + (DEFAULT_GAP_DETECTION_BATCH_SIZE - 1) * 100 - ); - } -} diff --git a/rust/processor/src/gap_detectors/gap_detector.rs b/rust/processor/src/gap_detectors/gap_detector.rs new file mode 100644 index 000000000..c5707f798 --- /dev/null +++ b/rust/processor/src/gap_detectors/gap_detector.rs @@ -0,0 +1,144 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + gap_detectors::{GapDetectorResult, ProcessingResult}, + processors::DefaultProcessingResult, +}; +use ahash::AHashMap; + +pub trait GapDetectorTrait { + fn process_versions(&mut self, result: ProcessingResult) -> anyhow::Result; +} + +pub struct DefaultGapDetector { + next_version_to_process: u64, + seen_versions: AHashMap, + last_success_batch: Option, +} + +pub struct DefaultGapDetectorResult { + pub next_version_to_process: u64, + pub num_gaps: u64, + pub last_success_batch: Option, +} + +impl GapDetectorTrait for DefaultGapDetector { + fn process_versions(&mut self, result: ProcessingResult) -> anyhow::Result { + match result { + ProcessingResult::DefaultProcessingResult(result) => { + // Check for gaps + if self.next_version_to_process != result.start_version { + self.seen_versions.insert(result.start_version, result); + tracing::debug!("Gap detected"); + } else { + // If no gap is detected, find the latest processed batch without gaps + self.update_prev_batch(result); + tracing::debug!("No gap detected"); + } + + Ok(GapDetectorResult::DefaultGapDetectorResult( + DefaultGapDetectorResult { + next_version_to_process: self.next_version_to_process, + num_gaps: self.seen_versions.len() as u64, + last_success_batch: self.last_success_batch.clone(), + }, + )) + }, + _ => { + panic!("Invalid result type"); + }, + } + } +} + +impl DefaultGapDetector { + pub fn new(starting_version: u64) -> Self { + Self { + next_version_to_process: starting_version, + seen_versions: AHashMap::new(), + last_success_batch: None, + } + } + + fn update_prev_batch(&mut self, result: DefaultProcessingResult) { + let mut new_prev_batch = result; + while let Some(next_version) = self.seen_versions.remove(&(new_prev_batch.end_version + 1)) + { + new_prev_batch = next_version; + } + self.next_version_to_process = new_prev_batch.end_version + 1; + self.last_success_batch = Some(new_prev_batch); + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::gap_detectors::DEFAULT_GAP_DETECTION_BATCH_SIZE; + + #[tokio::test] + async fn detect_gap_test() { + let starting_version = 0; + let mut default_gap_detector = DefaultGapDetector::new(starting_version); + + // Processing batches with gaps + for i in 0..DEFAULT_GAP_DETECTION_BATCH_SIZE { + let result = DefaultProcessingResult { + start_version: 100 + i * 100, + end_version: 199 + i * 100, + last_transaction_timestamp: None, + processing_duration_in_secs: 0.0, + db_insertion_duration_in_secs: 0.0, + }; + let default_gap_detector_result = default_gap_detector + .process_versions(ProcessingResult::DefaultProcessingResult(result)) + .unwrap(); + let default_gap_detector_result = match default_gap_detector_result { + GapDetectorResult::DefaultGapDetectorResult(res) => res, + _ => panic!("Invalid result type"), + }; + + assert_eq!(default_gap_detector_result.num_gaps, i + 1); + assert_eq!(default_gap_detector_result.next_version_to_process, 0); + assert_eq!(default_gap_detector_result.last_success_batch, None); + } + + // Process a batch without a gap + let default_gap_detector_result = default_gap_detector + .process_versions(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version: 0, + end_version: 99, + last_transaction_timestamp: None, + processing_duration_in_secs: 0.0, + db_insertion_duration_in_secs: 0.0, + }, + )) + .unwrap(); + let default_gap_detector_result = match default_gap_detector_result { + GapDetectorResult::DefaultGapDetectorResult(res) => res, + _ => panic!("Invalid result type"), + }; + assert_eq!(default_gap_detector_result.num_gaps, 0); + assert_eq!( + default_gap_detector_result.next_version_to_process, + 100 + (DEFAULT_GAP_DETECTION_BATCH_SIZE) * 100 + ); + assert_eq!( + default_gap_detector_result + .last_success_batch + .clone() + .unwrap() + .start_version, + 100 + (DEFAULT_GAP_DETECTION_BATCH_SIZE - 1) * 100 + ); + assert_eq!( + default_gap_detector_result + .last_success_batch + .unwrap() + .end_version, + 199 + (DEFAULT_GAP_DETECTION_BATCH_SIZE - 1) * 100 + ); + } +} diff --git a/rust/processor/src/gap_detectors/mod.rs b/rust/processor/src/gap_detectors/mod.rs new file mode 100644 index 000000000..d0b35c621 --- /dev/null +++ b/rust/processor/src/gap_detectors/mod.rs @@ -0,0 +1,182 @@ +use crate::{ + bq_analytics::ParquetProcessingResult, + gap_detectors::{ + gap_detector::{DefaultGapDetector, DefaultGapDetectorResult, GapDetectorTrait}, + parquet_gap_detector::{ParquetFileGapDetector, ParquetFileGapDetectorResult}, + }, + processors::{DefaultProcessingResult, Processor, ProcessorTrait}, + utils::counters::{PARQUET_PROCESSOR_DATA_GAP_COUNT, PROCESSOR_DATA_GAP_COUNT}, + worker::PROCESSOR_SERVICE_TYPE, +}; +use enum_dispatch::enum_dispatch; +use kanal::AsyncReceiver; +use tracing::{error, info}; + +pub mod gap_detector; +pub mod parquet_gap_detector; + +// Size of a gap (in txn version) before gap detected +pub const DEFAULT_GAP_DETECTION_BATCH_SIZE: u64 = 500; +// Number of seconds between each processor status update +const UPDATE_PROCESSOR_STATUS_SECS: u64 = 1; + +#[enum_dispatch(GapDetectorTrait)] +pub enum GapDetector { + DefaultGapDetector, + ParquetFileGapDetector, +} + +#[enum_dispatch(GapDetectorTrait)] +pub enum GapDetectorResult { + DefaultGapDetectorResult, + ParquetFileGapDetectorResult, +} +pub enum ProcessingResult { + DefaultProcessingResult(DefaultProcessingResult), + ParquetProcessingResult(ParquetProcessingResult), +} + +pub async fn create_gap_detector_status_tracker_loop( + gap_detector_receiver: AsyncReceiver, + processor: Processor, + starting_version: u64, + gap_detection_batch_size: u64, +) { + let processor_name = processor.name(); + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + "[Parser] Starting gap detector task", + ); + + let mut default_gap_detector = DefaultGapDetector::new(starting_version); + let mut parquet_gap_detector = ParquetFileGapDetector::new(starting_version); + let mut last_update_time = std::time::Instant::now(); + loop { + match gap_detector_receiver.recv().await { + Ok(ProcessingResult::DefaultProcessingResult(result)) => { + match default_gap_detector + .process_versions(ProcessingResult::DefaultProcessingResult(result)) + { + Ok(res) => { + match res { + GapDetectorResult::DefaultGapDetectorResult(res) => { + PROCESSOR_DATA_GAP_COUNT + .with_label_values(&[processor_name]) + .set(res.num_gaps as i64); + if res.num_gaps >= gap_detection_batch_size { + tracing::debug!( + processor_name, + gap_start_version = res.next_version_to_process, + num_gaps = res.num_gaps, + "[Parser] Processed {gap_detection_batch_size} batches with a gap", + ); + // We don't panic as everything downstream will panic if it doesn't work/receive + } + if let Some(res_last_success_batch) = res.last_success_batch { + if last_update_time.elapsed().as_secs() + >= UPDATE_PROCESSOR_STATUS_SECS + { + processor + .update_last_processed_version( + res_last_success_batch.end_version, + res_last_success_batch + .last_transaction_timestamp + .clone(), + ) + .await + .unwrap(); + last_update_time = std::time::Instant::now(); + } + } + }, + _ => { + panic!("Invalid result type"); + }, + } + }, + Err(e) => { + error!( + processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + error = ?e, + "[Parser] Gap detector task has panicked" + ); + panic!("[Parser] Gap detector task has panicked: {:?}", e); + }, + } + }, + Ok(ProcessingResult::ParquetProcessingResult(result)) => { + info!( + processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + "[ParquetGapDetector] received parquet gap detector task", + ); + match parquet_gap_detector + .process_versions(ProcessingResult::ParquetProcessingResult(result)) + { + Ok(res) => { + match res { + GapDetectorResult::ParquetFileGapDetectorResult(res) => { + PARQUET_PROCESSOR_DATA_GAP_COUNT + .with_label_values(&[processor_name]) + .set(res.num_gaps as i64); + // we need a new gap detection batch size + if res.num_gaps >= gap_detection_batch_size { + tracing::debug!( + processor_name, + gap_start_version = res.next_version_to_process, + num_gaps = res.num_gaps, + "[Parser] Processed {gap_detection_batch_size} batches with a gap", + ); + // We don't panic as everything downstream will panic if it doesn't work/receive + } + + if let Some(res_last_success_batch) = res.last_success_batch { + if last_update_time.elapsed().as_secs() + >= UPDATE_PROCESSOR_STATUS_SECS + { + tracing::info!("Updating last processed version"); + processor + .update_last_processed_version( + res_last_success_batch.end_version as u64, + res_last_success_batch + .last_transaction_timestamp + .clone(), + ) + .await + .unwrap(); + last_update_time = std::time::Instant::now(); + } else { + tracing::info!("Not Updating last processed version"); + } + } + }, + _ => { + panic!("Invalid result type"); + }, + } + }, + Err(e) => { + error!( + processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + error = ?e, + "[Parser] Gap detector task has panicked" + ); + panic!("[Parser] Gap detector task has panicked: {:?}", e); + }, + } + }, + Err(e) => { + info!( + processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + error = ?e, + "[Parser] Gap detector channel has been closed", + ); + return; + }, + }; + } +} diff --git a/rust/processor/src/gap_detectors/parquet_gap_detector.rs b/rust/processor/src/gap_detectors/parquet_gap_detector.rs new file mode 100644 index 000000000..c0b5ae6cc --- /dev/null +++ b/rust/processor/src/gap_detectors/parquet_gap_detector.rs @@ -0,0 +1,89 @@ +// // Copyright © Aptos Foundation +// // SPDX-License-Identifier: Apache-2.0 + +use crate::{ + bq_analytics::ParquetProcessingResult, + gap_detectors::{gap_detector::GapDetectorTrait, GapDetectorResult, ProcessingResult}, +}; +use ahash::AHashMap; +use std::cmp::max; +use tracing::{debug, info}; + +pub struct ParquetFileGapDetector { + next_version_to_process: i64, + last_success_batch: Option, + version_counters: AHashMap, + max_version: i64, +} + +pub struct ParquetFileGapDetectorResult { + pub next_version_to_process: u64, + pub num_gaps: u64, + pub last_success_batch: Option, +} + +impl ParquetFileGapDetector { + pub fn new(starting_version: u64) -> Self { + Self { + next_version_to_process: starting_version as i64, + last_success_batch: None, + version_counters: AHashMap::new(), + max_version: 0, + } + } +} +impl GapDetectorTrait for ParquetFileGapDetector { + fn process_versions(&mut self, result: ProcessingResult) -> anyhow::Result { + // Update counts of structures for each transaction version + let result = match result { + ProcessingResult::ParquetProcessingResult(r) => r, + _ => panic!("Invalid result type"), + }; + for (version, count) in result.txn_version_to_struct_count.iter() { + if !self.version_counters.contains_key(version) { + // info!("Inserting version {} with count {} into parquet gap detector", version, count); + self.version_counters.insert(*version, *count); + } + self.max_version = max(self.max_version, *version); + + *self.version_counters.entry(*version).or_default() -= 1; + } + + // Update next version to process and move forward + let mut current_version = result.start_version; + + while current_version <= result.end_version { + match self.version_counters.get_mut(¤t_version) { + Some(count) => { + if *count == 0 && current_version == self.next_version_to_process { + while let Some(&count) = + self.version_counters.get(&self.next_version_to_process) + { + if count == 0 { + self.version_counters.remove(&self.next_version_to_process); // Remove the fully processed version + self.next_version_to_process += 1; // Increment to the next version + info!("Version {} fully processed. Next version to process updated to {}", self.next_version_to_process - 1, self.next_version_to_process); + } else { + break; + } + } + } + }, + None => { + // TODO: validate this that we shouldn't reach this b/c we already added default count. + // or it could mean that we have duplicates. + debug!("No struct count found for version {}", current_version); + }, + } + current_version += 1; // Move to the next version in sequence + } + + Ok(GapDetectorResult::ParquetFileGapDetectorResult( + ParquetFileGapDetectorResult { + next_version_to_process: self.next_version_to_process as u64, + num_gaps: (self.max_version - self.next_version_to_process) as u64, + last_success_batch: self.last_success_batch.clone(), + }, + )) + } +} diff --git a/rust/processor/src/grpc_stream.rs b/rust/processor/src/grpc_stream.rs index fec7468ec..e1674dcd0 100644 --- a/rust/processor/src/grpc_stream.rs +++ b/rust/processor/src/grpc_stream.rs @@ -74,6 +74,7 @@ pub async fn get_stream( indexer_grpc_data_service_address: Url, indexer_grpc_http2_ping_interval: Duration, indexer_grpc_http2_ping_timeout: Duration, + indexer_grpc_reconnection_timeout_secs: Duration, starting_version: u64, ending_version: Option, auth_token: String, @@ -121,7 +122,7 @@ pub async fn get_stream( let mut connect_retries = 0; let connect_res = loop { let res = timeout( - Duration::from_secs(5), + indexer_grpc_reconnection_timeout_secs, RawDataClient::connect(channel.clone()), ) .await; @@ -150,7 +151,8 @@ pub async fn get_stream( let mut rpc_client = match connect_res { Ok(client) => client .accept_compressed(tonic::codec::CompressionEncoding::Gzip) - .send_compressed(tonic::codec::CompressionEncoding::Gzip) + .accept_compressed(tonic::codec::CompressionEncoding::Zstd) + .send_compressed(tonic::codec::CompressionEncoding::Zstd) .max_decoding_message_size(MAX_RESPONSE_SIZE) .max_encoding_message_size(MAX_RESPONSE_SIZE), Err(e) => { @@ -176,17 +178,65 @@ pub async fn get_stream( num_of_transactions = ?count, "[Parser] Setting up GRPC stream", ); - let request = grpc_request_builder(starting_version, count, auth_token, processor_name); - rpc_client - .get_transactions(request) - .await - .expect("[Parser] Failed to get grpc response. Is the server running?") + + // TODO: move this to a config file + // Retry this connection a few times before giving up + let mut connect_retries = 0; + let stream_res = loop { + let timeout_res = timeout(indexer_grpc_reconnection_timeout_secs, async { + let request = grpc_request_builder( + starting_version, + count, + auth_token.clone(), + processor_name.clone(), + ); + rpc_client.get_transactions(request).await + }) + .await; + match timeout_res { + Ok(client) => break Ok(client), + Err(e) => { + error!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + end_version = ending_version, + retries = connect_retries, + error = ?e, + "[Parser] Timeout making grpc request. Retrying...", + ); + connect_retries += 1; + if connect_retries >= RECONNECTION_MAX_RETRIES { + break Err(e); + } + }, + } + } + .expect("[Parser] Timed out making grpc request after max retries."); + + match stream_res { + Ok(stream) => stream, + Err(e) => { + error!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + ending_version = ending_version, + error = ?e, + "[Parser] Failed to get grpc response. Is the server running?" + ); + panic!("[Parser] Failed to get grpc response. Is the server running?"); + }, + } } pub async fn get_chain_id( indexer_grpc_data_service_address: Url, indexer_grpc_http2_ping_interval: Duration, indexer_grpc_http2_ping_timeout: Duration, + indexer_grpc_reconnection_timeout_secs: Duration, auth_token: String, processor_name: String, ) -> u64 { @@ -200,6 +250,7 @@ pub async fn get_chain_id( indexer_grpc_data_service_address.clone(), indexer_grpc_http2_ping_interval, indexer_grpc_http2_ping_timeout, + indexer_grpc_reconnection_timeout_secs, 1, Some(2), auth_token.clone(), @@ -250,12 +301,14 @@ pub async fn get_chain_id( /// There could be several special scenarios: /// 1. If we lose the connection, we will try reconnecting X times within Y seconds before crashing. /// 2. If we specified an end version and we hit that, we will stop fetching, but we will make sure that -/// all existing transactions are processed +/// all existing transactions are processed pub async fn create_fetcher_loop( txn_sender: AsyncSender, indexer_grpc_data_service_address: Url, indexer_grpc_http2_ping_interval: Duration, indexer_grpc_http2_ping_timeout: Duration, + indexer_grpc_reconnection_timeout_secs: Duration, + indexer_grpc_response_item_timeout_secs: Duration, starting_version: u64, request_ending_version: Option, auth_token: String, @@ -276,6 +329,7 @@ pub async fn create_fetcher_loop( indexer_grpc_data_service_address.clone(), indexer_grpc_http2_ping_interval, indexer_grpc_http2_ping_timeout, + indexer_grpc_reconnection_timeout_secs, starting_version, request_ending_version, auth_token.clone(), @@ -305,202 +359,228 @@ pub async fn create_fetcher_loop( let mut send_ma = MovingAverage::new(3000); loop { - let is_success = match resp_stream.next().await { - Some(Ok(mut r)) => { - reconnection_retries = 0; - let start_version = r.transactions.as_slice().first().unwrap().version; - let start_txn_timestamp = - r.transactions.as_slice().first().unwrap().timestamp.clone(); - let end_version = r.transactions.as_slice().last().unwrap().version; - let end_txn_timestamp = r.transactions.as_slice().last().unwrap().timestamp.clone(); - - next_version_to_fetch = end_version + 1; - - let size_in_bytes = r.encoded_len() as u64; - let chain_id: u64 = r.chain_id.expect("[Parser] Chain Id doesn't exist."); - let num_txns = r.transactions.len(); - let duration_in_secs = grpc_channel_recv_latency.elapsed().as_secs_f64(); - fetch_ma.tick_now(num_txns as u64); - - let num_txns = r.transactions.len(); - - // Filter out the txns we don't care about - r.transactions.retain(|txn| transaction_filter.include(txn)); - - let num_txn_post_filter = r.transactions.len(); - let num_filtered_txns = num_txns - num_txn_post_filter; - let step = ProcessorStep::ReceivedTxnsFromGrpc.get_step(); - let label = ProcessorStep::ReceivedTxnsFromGrpc.get_label(); - - info!( - processor_name = processor_name, - service_type = crate::worker::PROCESSOR_SERVICE_TYPE, - stream_address = indexer_grpc_data_service_address.to_string(), - connection_id, - start_version, - end_version, - start_txn_timestamp_iso = start_txn_timestamp - .as_ref() - .map(timestamp_to_iso) - .unwrap_or_default(), - end_txn_timestamp_iso = end_txn_timestamp - .as_ref() - .map(timestamp_to_iso) - .unwrap_or_default(), - num_of_transactions = end_version - start_version + 1, - num_filtered_txns, - channel_size = txn_sender.len(), - size_in_bytes, - duration_in_secs, - tps = fetch_ma.avg() as u64, - bytes_per_sec = size_in_bytes as f64 / duration_in_secs, - step, - "{}", - label, - ); - - if last_fetched_version + 1 != start_version as i64 { - error!( - batch_start_version = last_fetched_version + 1, - last_fetched_version, - current_fetched_version = start_version, - "[Parser] Received batch with gap from GRPC stream" - ); - panic!("[Parser] Received batch with gap from GRPC stream"); - } - last_fetched_version = end_version as i64; - - LATEST_PROCESSED_VERSION - .with_label_values(&[&processor_name, step, label, "-"]) - .set(end_version as i64); - TRANSACTION_UNIX_TIMESTAMP - .with_label_values(&[&processor_name, step, label, "-"]) - .set( - start_txn_timestamp - .as_ref() - .map(timestamp_to_unixtime) - .unwrap_or_default(), - ); - PROCESSED_BYTES_COUNT - .with_label_values(&[&processor_name, step, label, "-"]) - .inc_by(size_in_bytes); - NUM_TRANSACTIONS_PROCESSED_COUNT - .with_label_values(&[&processor_name, step, label, "-"]) - .inc_by(end_version - start_version + 1); - - let txn_channel_send_latency = std::time::Instant::now(); - - //potentially break txn_pb into many `TransactionsPBResponse` that are each `pb_channel_txn_chunk_size` txns max in size - if num_txn_post_filter < pb_channel_txn_chunk_size { - // We only need to send one; avoid the chunk/clone - let txn_pb = TransactionsPBResponse { - transactions: r.transactions, - chain_id, - start_version, - end_version, - start_txn_timestamp, - end_txn_timestamp, - size_in_bytes, - }; - - match txn_sender.send(txn_pb).await { - Ok(()) => {}, - Err(e) => { + let is_success = match tokio::time::timeout( + indexer_grpc_response_item_timeout_secs, + resp_stream.next(), + ) + .await + { + // Received datastream response + Ok(response) => { + match response { + Some(Ok(mut r)) => { + reconnection_retries = 0; + let start_version = r.transactions.as_slice().first().unwrap().version; + let start_txn_timestamp = + r.transactions.as_slice().first().unwrap().timestamp.clone(); + let end_version = r.transactions.as_slice().last().unwrap().version; + let end_txn_timestamp = + r.transactions.as_slice().last().unwrap().timestamp.clone(); + + next_version_to_fetch = end_version + 1; + + let size_in_bytes = r.encoded_len() as u64; + let chain_id: u64 = r.chain_id.expect("[Parser] Chain Id doesn't exist."); + let num_txns = r.transactions.len(); + let duration_in_secs = grpc_channel_recv_latency.elapsed().as_secs_f64(); + fetch_ma.tick_now(num_txns as u64); + + let num_txns = r.transactions.len(); + + // Filter out the txns we don't care about + r.transactions.retain(|txn| transaction_filter.include(txn)); + + let num_txn_post_filter = r.transactions.len(); + let num_filtered_txns = num_txns - num_txn_post_filter; + let step = ProcessorStep::ReceivedTxnsFromGrpc.get_step(); + let label = ProcessorStep::ReceivedTxnsFromGrpc.get_label(); + + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + start_version, + end_version, + start_txn_timestamp_iso = start_txn_timestamp + .as_ref() + .map(timestamp_to_iso) + .unwrap_or_default(), + end_txn_timestamp_iso = end_txn_timestamp + .as_ref() + .map(timestamp_to_iso) + .unwrap_or_default(), + num_of_transactions = end_version - start_version + 1, + num_filtered_txns, + channel_size = txn_sender.len(), + size_in_bytes, + duration_in_secs, + tps = fetch_ma.avg().ceil() as u64, + bytes_per_sec = size_in_bytes as f64 / duration_in_secs, + step, + "{}", + label, + ); + + if last_fetched_version + 1 != start_version as i64 { error!( - processor_name = processor_name, - stream_address = indexer_grpc_data_service_address.to_string(), - connection_id, - error = ?e, - "[Parser] Error sending GRPC response to channel." + batch_start_version = last_fetched_version + 1, + last_fetched_version, + current_fetched_version = start_version, + "[Parser] Received batch with gap from GRPC stream" + ); + panic!("[Parser] Received batch with gap from GRPC stream"); + } + last_fetched_version = end_version as i64; + + LATEST_PROCESSED_VERSION + .with_label_values(&[&processor_name, step, label, "-"]) + .set(end_version as i64); + TRANSACTION_UNIX_TIMESTAMP + .with_label_values(&[&processor_name, step, label, "-"]) + .set( + start_txn_timestamp + .as_ref() + .map(timestamp_to_unixtime) + .unwrap_or_default(), ); - panic!("[Parser] Error sending GRPC response to channel.") - }, - } - } else { - // We are breaking down a big batch into small batches; this involves an iterator - let average_size_in_bytes = size_in_bytes / num_txns as u64; - - let pb_txn_chunks: Vec> = r - .transactions - .into_iter() - .chunks(pb_channel_txn_chunk_size) - .into_iter() - .map(|chunk| chunk.collect()) - .collect(); - for txns in pb_txn_chunks { - let size_in_bytes = average_size_in_bytes * txns.len() as u64; - let txn_pb = TransactionsPBResponse { - transactions: txns, - chain_id, + PROCESSED_BYTES_COUNT + .with_label_values(&[&processor_name, step, label, "-"]) + .inc_by(size_in_bytes); + NUM_TRANSACTIONS_PROCESSED_COUNT + .with_label_values(&[&processor_name, step, label, "-"]) + .inc_by(end_version - start_version + 1); + + let txn_channel_send_latency = std::time::Instant::now(); + + //potentially break txn_pb into many `TransactionsPBResponse` that are each `pb_channel_txn_chunk_size` txns max in size + if num_txn_post_filter < pb_channel_txn_chunk_size { + // We only need to send one; avoid the chunk/clone + let txn_pb = TransactionsPBResponse { + transactions: r.transactions, + chain_id, + start_version, + end_version, + start_txn_timestamp, + end_txn_timestamp, + size_in_bytes, + }; + + match txn_sender.send(txn_pb).await { + Ok(()) => {}, + Err(e) => { + error!( + processor_name = processor_name, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + error = ?e, + "[Parser] Error sending GRPC response to channel." + ); + panic!("[Parser] Error sending GRPC response to channel.") + }, + } + } else { + // We are breaking down a big batch into small batches; this involves an iterator + let average_size_in_bytes = size_in_bytes / num_txns as u64; + + let pb_txn_chunks: Vec> = r + .transactions + .into_iter() + .chunks(pb_channel_txn_chunk_size) + .into_iter() + .map(|chunk| chunk.collect()) + .collect(); + for txns in pb_txn_chunks { + let size_in_bytes = average_size_in_bytes * txns.len() as u64; + let txn_pb = TransactionsPBResponse { + transactions: txns, + chain_id, + start_version, + end_version, + // TODO: this is only for gap checker + filtered txns, but this is wrong + start_txn_timestamp: start_txn_timestamp.clone(), + end_txn_timestamp: end_txn_timestamp.clone(), + size_in_bytes, + }; + + match txn_sender.send(txn_pb).await { + Ok(()) => {}, + Err(e) => { + error!( + processor_name = processor_name, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + error = ?e, + "[Parser] Error sending GRPC response to channel." + ); + panic!("[Parser] Error sending GRPC response to channel.") + }, + } + } + } + + let duration_in_secs = txn_channel_send_latency.elapsed().as_secs_f64(); + send_ma.tick_now(num_txns as u64); + let tps = send_ma.avg().ceil() as u64; + let bytes_per_sec = size_in_bytes as f64 / duration_in_secs; + + let channel_size = txn_sender.len(); + debug!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, start_version, end_version, - // TODO: this is only for gap checker + filtered txns, but this is wrong - start_txn_timestamp: start_txn_timestamp.clone(), - end_txn_timestamp: end_txn_timestamp.clone(), + channel_size, size_in_bytes, - }; - - match txn_sender.send(txn_pb).await { - Ok(()) => {}, - Err(e) => { - error!( - processor_name = processor_name, - stream_address = indexer_grpc_data_service_address.to_string(), - connection_id, - error = ?e, - "[Parser] Error sending GRPC response to channel." - ); - panic!("[Parser] Error sending GRPC response to channel.") - }, - } - } + duration_in_secs, + bytes_per_sec, + tps, + num_filtered_txns, + "[Parser] Successfully sent transactions to channel." + ); + FETCHER_THREAD_CHANNEL_SIZE + .with_label_values(&[&processor_name]) + .set(channel_size as i64); + grpc_channel_recv_latency = std::time::Instant::now(); + + NUM_TRANSACTIONS_FILTERED_OUT_COUNT + .with_label_values(&[&processor_name]) + .inc_by(num_filtered_txns as u64); + true + }, + // Error receiving datastream response + Some(Err(rpc_error)) => { + tracing::warn!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + start_version = starting_version, + end_version = request_ending_version, + error = ?rpc_error, + "[Parser] Error receiving datastream response." + ); + false + }, + // Stream is finished + None => { + tracing::warn!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + start_version = starting_version, + end_version = request_ending_version, + "[Parser] Stream ended." + ); + false + }, } - - let duration_in_secs = txn_channel_send_latency.elapsed().as_secs_f64(); - send_ma.tick_now(num_txns as u64); - let tps = send_ma.avg() as u64; - let bytes_per_sec = size_in_bytes as f64 / duration_in_secs; - - let channel_size = txn_sender.len(); - debug!( - processor_name = processor_name, - service_type = crate::worker::PROCESSOR_SERVICE_TYPE, - stream_address = indexer_grpc_data_service_address.to_string(), - connection_id, - start_version, - end_version, - channel_size, - size_in_bytes, - duration_in_secs, - bytes_per_sec, - tps, - num_filtered_txns, - "[Parser] Successfully sent transactions to channel." - ); - FETCHER_THREAD_CHANNEL_SIZE - .with_label_values(&[&processor_name]) - .set(channel_size as i64); - grpc_channel_recv_latency = std::time::Instant::now(); - - NUM_TRANSACTIONS_FILTERED_OUT_COUNT - .with_label_values(&[&processor_name]) - .inc_by(num_filtered_txns as u64); - true - }, - Some(Err(rpc_error)) => { - tracing::warn!( - processor_name = processor_name, - service_type = crate::worker::PROCESSOR_SERVICE_TYPE, - stream_address = indexer_grpc_data_service_address.to_string(), - connection_id, - start_version = starting_version, - end_version = request_ending_version, - error = ?rpc_error, - "[Parser] Error receiving datastream response." - ); - false }, - None => { + // Timeout receiving datastream response + Err(e) => { tracing::warn!( processor_name = processor_name, service_type = crate::worker::PROCESSOR_SERVICE_TYPE, @@ -508,7 +588,8 @@ pub async fn create_fetcher_loop( connection_id, start_version = starting_version, end_version = request_ending_version, - "[Parser] Stream ended." + error = ?e, + "[Parser] Timeout receiving datastream response." ); false }, @@ -550,7 +631,7 @@ pub async fn create_fetcher_loop( service_type = crate::worker::PROCESSOR_SERVICE_TYPE, stream_address = indexer_grpc_data_service_address.to_string(), connection_id, - "[Parser] The stream is ended." + "[Parser] Transaction fetcher send channel is closed." ); break; } else { @@ -568,9 +649,9 @@ pub async fn create_fetcher_loop( processor_name = processor_name, service_type = crate::worker::PROCESSOR_SERVICE_TYPE, stream_address = indexer_grpc_data_service_address.to_string(), - "[Parser] Reconnected more than 100 times. Will not retry.", + "[Parser] Reconnected more than {RECONNECTION_MAX_RETRIES} times. Will not retry.", ); - panic!("[Parser] Reconnected more than 100 times. Will not retry.") + panic!("[Parser] Reconnected more than {RECONNECTION_MAX_RETRIES} times. Will not retry.") } reconnection_retries += 1; info!( @@ -586,6 +667,7 @@ pub async fn create_fetcher_loop( indexer_grpc_data_service_address.clone(), indexer_grpc_http2_ping_interval, indexer_grpc_http2_ping_timeout, + indexer_grpc_reconnection_timeout_secs, next_version_to_fetch, request_ending_version, auth_token.clone(), diff --git a/rust/processor/src/lib.rs b/rust/processor/src/lib.rs index ecb81ff3b..8dcb43fa3 100644 --- a/rust/processor/src/lib.rs +++ b/rust/processor/src/lib.rs @@ -7,17 +7,24 @@ // #[macro_use] // extern crate diesel_migrations; -// Need to use this for because src/schema.rs uses the macros and is autogenerated +// Need to use this for because schema.rs uses the macros and is autogenerated #[macro_use] extern crate diesel; +// for parquet_derive +extern crate canonical_json; +extern crate parquet; +extern crate parquet_derive; + pub use config::IndexerGrpcProcessorConfig; +pub mod bq_analytics; mod config; -pub mod gap_detector; +mod db; +pub mod gap_detectors; pub mod grpc_stream; -pub mod models; pub mod processors; +#[path = "db/postgres/schema.rs"] pub mod schema; pub mod transaction_filter; pub mod utils; diff --git a/rust/processor/src/main.rs b/rust/processor/src/main.rs index 3e4b6cfdf..f26edb921 100644 --- a/rust/processor/src/main.rs +++ b/rust/processor/src/main.rs @@ -6,6 +6,10 @@ use clap::Parser; use processor::IndexerGrpcProcessorConfig; use server_framework::ServerArgs; +#[cfg(unix)] +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + const RUNTIME_WORKER_MULTIPLIER: usize = 2; fn main() -> Result<()> { diff --git a/rust/processor/src/models/events_models/events.rs b/rust/processor/src/models/events_models/events.rs deleted file mode 100644 index 6747636ab..000000000 --- a/rust/processor/src/models/events_models/events.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![allow(clippy::extra_unused_lifetimes)] - -use crate::{ - schema::events, - utils::util::{standardize_address, truncate_str}, -}; -use aptos_protos::transaction::v1::Event as EventPB; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -// p99 currently is 303 so using 300 as a safe max length -const EVENT_TYPE_MAX_LENGTH: usize = 300; - -#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, event_index))] -#[diesel(table_name = events)] -pub struct Event { - pub sequence_number: i64, - pub creation_number: i64, - pub account_address: String, - pub transaction_version: i64, - pub transaction_block_height: i64, - pub type_: String, - pub data: serde_json::Value, - pub event_index: i64, - pub indexed_type: String, -} - -impl Event { - pub fn from_event( - event: &EventPB, - transaction_version: i64, - transaction_block_height: i64, - event_index: i64, - ) -> Self { - let t: &str = event.type_str.as_ref(); - Event { - account_address: standardize_address( - event.key.as_ref().unwrap().account_address.as_str(), - ), - creation_number: event.key.as_ref().unwrap().creation_number as i64, - sequence_number: event.sequence_number as i64, - transaction_version, - transaction_block_height, - type_: t.to_string(), - data: serde_json::from_str(event.data.as_str()).unwrap(), - event_index, - indexed_type: truncate_str(t, EVENT_TYPE_MAX_LENGTH), - } - } - - pub fn from_events( - events: &[EventPB], - transaction_version: i64, - transaction_block_height: i64, - ) -> Vec { - events - .iter() - .enumerate() - .map(|(index, event)| { - Self::from_event( - event, - transaction_version, - transaction_block_height, - index as i64, - ) - }) - .collect::>() - } -} - -// Prevent conflicts with other things named `Event` -pub type EventModel = Event; diff --git a/rust/processor/src/processors/account_transactions_processor.rs b/rust/processor/src/processors/account_transactions_processor.rs index 1b984b17c..f7ef88344 100644 --- a/rust/processor/src/processors/account_transactions_processor.rs +++ b/rust/processor/src/processors/account_transactions_processor.rs @@ -1,11 +1,12 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::account_transaction_models::account_transactions::AccountTransaction, + db::common::models::account_transaction_models::account_transactions::AccountTransaction, + gap_detectors::ProcessingResult, schema, - utils::database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, }; use ahash::AHashMap; use anyhow::bail; @@ -16,12 +17,12 @@ use std::fmt::Debug; use tracing::error; pub struct AccountTransactionsProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap, } impl AccountTransactionsProcessor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { Self { connection_pool, per_table_chunk_sizes, @@ -41,7 +42,7 @@ impl Debug for AccountTransactionsProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -129,13 +130,15 @@ impl ProcessorTrait for AccountTransactionsProcessor { let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(err) => { error!( start_version = start_version, @@ -149,7 +152,7 @@ impl ProcessorTrait for AccountTransactionsProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/processors/ans_processor.rs b/rust/processor/src/processors/ans_processor.rs index a448ac3c6..a984c8de7 100644 --- a/rust/processor/src/processors/ans_processor.rs +++ b/rust/processor/src/processors/ans_processor.rs @@ -1,19 +1,20 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::ans_models::{ + db::common::models::ans_models::{ ans_lookup::{AnsLookup, AnsPrimaryName, CurrentAnsLookup, CurrentAnsPrimaryName}, ans_lookup_v2::{ AnsLookupV2, AnsPrimaryNameV2, CurrentAnsLookupV2, CurrentAnsPrimaryNameV2, }, ans_utils::{RenewNameEvent, SubdomainExtV2}, }, + gap_detectors::ProcessingResult, schema, utils::{ counters::PROCESSOR_UNKNOWN_TYPE_COUNT, - database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, util::standardize_address, }, }; @@ -41,16 +42,16 @@ pub struct AnsProcessorConfig { } pub struct AnsProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, config: AnsProcessorConfig, per_table_chunk_sizes: AHashMap, } impl AnsProcessor { pub fn new( - connection_pool: PgDbPool, - per_table_chunk_sizes: AHashMap, + connection_pool: ArcDbPool, config: AnsProcessorConfig, + per_table_chunk_sizes: AHashMap, ) -> Self { tracing::info!( ans_v1_primary_names_table_handle = config.ans_v1_primary_names_table_handle, @@ -60,8 +61,8 @@ impl AnsProcessor { ); Self { connection_pool, - per_table_chunk_sizes, config, + per_table_chunk_sizes, } } } @@ -78,7 +79,7 @@ impl Debug for AnsProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -285,6 +286,7 @@ fn insert_current_ans_lookups_v2_query( token_name.eq(excluded(token_name)), is_deleted.eq(excluded(is_deleted)), inserted_at.eq(excluded(inserted_at)), + subdomain_expiration_policy.eq(excluded(subdomain_expiration_policy)), )), Some(" WHERE current_ans_lookup_v2.last_transaction_version <= excluded.last_transaction_version "), ) @@ -302,7 +304,11 @@ fn insert_ans_lookups_v2_query( diesel::insert_into(schema::ans_lookup_v2::table) .values(item_to_insert) .on_conflict((transaction_version, write_set_change_index)) - .do_nothing(), + .do_update() + .set(( + inserted_at.eq(excluded(inserted_at)), + subdomain_expiration_policy.eq(excluded(subdomain_expiration_policy)), + )), None, ) } @@ -405,13 +411,15 @@ impl ProcessorTrait for AnsProcessor { let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(e) => { error!( start_version = start_version, @@ -425,7 +433,7 @@ impl ProcessorTrait for AnsProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/processors/coin_processor.rs b/rust/processor/src/processors/coin_processor.rs index 99928e5b2..f351ce40d 100644 --- a/rust/processor/src/processors/coin_processor.rs +++ b/rust/processor/src/processors/coin_processor.rs @@ -1,19 +1,19 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::{ + db::common::models::{ coin_models::{ coin_activities::CoinActivity, coin_balances::{CoinBalance, CurrentCoinBalance}, coin_infos::CoinInfo, - coin_supply::CoinSupply, }, fungible_asset_models::v2_fungible_asset_activities::CurrentCoinBalancePK, }, + gap_detectors::ProcessingResult, schema, - utils::database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, }; use ahash::AHashMap; use anyhow::{bail, Context}; @@ -27,15 +27,13 @@ use diesel::{ use std::fmt::Debug; use tracing::error; -pub const APTOS_COIN_TYPE_STR: &str = "0x1::aptos_coin::AptosCoin"; - pub struct CoinProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap, } impl CoinProcessor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { Self { connection_pool, per_table_chunk_sizes, @@ -55,7 +53,7 @@ impl Debug for CoinProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -63,7 +61,6 @@ async fn insert_to_db( coin_infos: &[CoinInfo], coin_balances: &[CoinBalance], current_coin_balances: &[CurrentCoinBalance], - coin_supply: &[CoinSupply], per_table_chunk_sizes: &AHashMap, ) -> Result<(), diesel::result::Error> { tracing::trace!( @@ -100,15 +97,9 @@ async fn insert_to_db( per_table_chunk_sizes, ), ); - let cs = execute_in_chunks( - conn, - inset_coin_supply_query, - coin_supply, - get_config_table_chunk_size::("coin_supply", per_table_chunk_sizes), - ); - let (ca_res, ci_res, cb_res, ccb_res, cs_res) = tokio::join!(ca, ci, cb, ccb, cs); - for res in [ca_res, ci_res, cb_res, ccb_res, cs_res] { + let (ca_res, ci_res, cb_res, ccb_res) = tokio::join!(ca, ci, cb, ccb); + for res in [ca_res, ci_res, cb_res, ccb_res] { res?; } Ok(()) @@ -208,23 +199,6 @@ fn insert_current_coin_balances_query( ) } -fn inset_coin_supply_query( - items_to_insert: Vec, -) -> ( - impl QueryFragment + diesel::query_builder::QueryId + Send, - Option<&'static str>, -) { - use schema::coin_supply::dsl::*; - - ( - diesel::insert_into(schema::coin_supply::table) - .values(items_to_insert) - .on_conflict((transaction_version, coin_type_hash)) - .do_nothing(), - None, - ) -} - #[async_trait] impl ProcessorTrait for CoinProcessor { fn name(&self) -> &'static str { @@ -246,26 +220,18 @@ impl ProcessorTrait for CoinProcessor { all_coin_infos, all_coin_balances, all_current_coin_balances, - all_coin_supply, ) = tokio::task::spawn_blocking(move || { let mut all_coin_activities = vec![]; let mut all_coin_balances = vec![]; let mut all_coin_infos: AHashMap = AHashMap::new(); let mut all_current_coin_balances: AHashMap = AHashMap::new(); - let mut all_coin_supply = vec![]; for txn in &transactions { - let ( - mut coin_activities, - mut coin_balances, - coin_infos, - current_coin_balances, - mut coin_supply, - ) = CoinActivity::from_transaction(txn); + let (mut coin_activities, mut coin_balances, coin_infos, current_coin_balances) = + CoinActivity::from_transaction(txn); all_coin_activities.append(&mut coin_activities); all_coin_balances.append(&mut coin_balances); - all_coin_supply.append(&mut coin_supply); // For coin infos, we only want to keep the first version, so insert only if key is not present already for (key, value) in coin_infos { all_coin_infos.entry(key).or_insert(value); @@ -288,7 +254,6 @@ impl ProcessorTrait for CoinProcessor { all_coin_infos, all_coin_balances, all_current_coin_balances, - all_coin_supply, ) }) .await @@ -306,7 +271,6 @@ impl ProcessorTrait for CoinProcessor { &all_coin_infos, &all_coin_balances, &all_current_coin_balances, - &all_coin_supply, &self.per_table_chunk_sizes, ) .await; @@ -314,13 +278,15 @@ impl ProcessorTrait for CoinProcessor { let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(err) => { error!( start_version = start_version, @@ -334,7 +300,7 @@ impl ProcessorTrait for CoinProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/processors/default_processor.rs b/rust/processor/src/processors/default_processor.rs index eb685e197..cf2bd3ce4 100644 --- a/rust/processor/src/processors/default_processor.rs +++ b/rust/processor/src/processors/default_processor.rs @@ -1,9 +1,9 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::default_models::{ + db::common::models::default_models::{ block_metadata_transactions::{BlockMetadataTransaction, BlockMetadataTransactionModel}, move_modules::MoveModule, move_resources::MoveResource, @@ -11,8 +11,10 @@ use crate::{ transactions::TransactionModel, write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel}, }, + gap_detectors::ProcessingResult, schema, - utils::database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + worker::TableFlags, }; use ahash::AHashMap; use anyhow::bail; @@ -28,15 +30,21 @@ use tokio::join; use tracing::error; pub struct DefaultProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap, + deprecated_tables: TableFlags, } impl DefaultProcessor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new( + connection_pool: ArcDbPool, + per_table_chunk_sizes: AHashMap, + deprecated_tables: TableFlags, + ) -> Self { Self { connection_pool, per_table_chunk_sizes, + deprecated_tables, } } } @@ -53,7 +61,7 @@ impl Debug for DefaultProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -82,6 +90,7 @@ async fn insert_to_db( txns, get_config_table_chunk_size::("transactions", per_table_chunk_sizes), ); + let bmt_res = execute_in_chunks( conn.clone(), insert_block_metadata_transactions_query, @@ -91,6 +100,7 @@ async fn insert_to_db( per_table_chunk_sizes, ), ); + let wst_res = execute_in_chunks( conn.clone(), insert_write_set_changes_query, @@ -100,6 +110,7 @@ async fn insert_to_db( per_table_chunk_sizes, ), ); + let mm_res = execute_in_chunks( conn.clone(), insert_move_modules_query, @@ -138,11 +149,11 @@ async fn insert_to_db( get_config_table_chunk_size::("table_metadatas", per_table_chunk_sizes), ); - let (txns_res, bmt_res, wst_res, mm_res, mr_res, ti_res, cti_res, tm_res) = - join!(txns_res, bmt_res, wst_res, mm_res, mr_res, ti_res, cti_res, tm_res); + let (txns_res, wst_res, bmt_res, mm_res, mr_res, ti_res, cti_res, tm_res) = + join!(txns_res, wst_res, bmt_res, mm_res, mr_res, ti_res, cti_res, tm_res); for res in [ - txns_res, bmt_res, wst_res, mm_res, mr_res, ti_res, cti_res, tm_res, + txns_res, wst_res, bmt_res, mm_res, mr_res, ti_res, cti_res, tm_res, ] { res?; } @@ -313,15 +324,15 @@ impl ProcessorTrait for DefaultProcessor { ) -> anyhow::Result { let processing_start = std::time::Instant::now(); let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + let flags = self.deprecated_tables; let ( txns, block_metadata_transactions, write_set_changes, (move_modules, move_resources, table_items, current_table_items, table_metadata), - ) = tokio::task::spawn_blocking(move || process_transactions(transactions)) + ) = tokio::task::spawn_blocking(move || process_transactions(transactions, flags)) .await .expect("Failed to spawn_blocking for TransactionModel::from_transactions"); - let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); let db_insertion_start = std::time::Instant::now(); @@ -346,13 +357,15 @@ impl ProcessorTrait for DefaultProcessor { let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(e) => { error!( start_version = start_version, @@ -366,15 +379,16 @@ impl ProcessorTrait for DefaultProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } fn process_transactions( transactions: Vec, + flags: TableFlags, ) -> ( - Vec, + Vec, Vec, Vec, ( @@ -385,7 +399,7 @@ fn process_transactions( Vec, ), ) { - let (txns, block_metadata_txns, write_set_changes, wsc_details) = + let (mut txns, block_metadata_txns, mut write_set_changes, wsc_details) = TransactionModel::from_transactions(&transactions); let mut block_metadata_transactions = vec![]; for block_metadata_txn in block_metadata_txns { @@ -426,6 +440,19 @@ fn process_transactions( .sort_by(|a, b| (&a.table_handle, &a.key_hash).cmp(&(&b.table_handle, &b.key_hash))); table_metadata.sort_by(|a, b| a.handle.cmp(&b.handle)); + if flags.contains(TableFlags::MOVE_RESOURCES) { + move_resources.clear(); + } + if flags.contains(TableFlags::TRANSACTIONS) { + txns.clear(); + } + if flags.contains(TableFlags::WRITE_SET_CHANGES) { + write_set_changes.clear(); + } + if flags.contains(TableFlags::TABLE_ITEMS) { + table_items.clear(); + } + ( txns, block_metadata_transactions, diff --git a/rust/processor/src/processors/event_stream_processor.rs b/rust/processor/src/processors/event_stream_processor.rs new file mode 100644 index 000000000..0004149cf --- /dev/null +++ b/rust/processor/src/processors/event_stream_processor.rs @@ -0,0 +1,170 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + db::common::models::{ + events_models::events::{CachedEvents, EventContext, EventModel, EventStreamMessage}, + fungible_asset_models::{ + v2_fungible_asset_activities::{EventToCoinType, FungibleAssetActivity}, + v2_fungible_asset_balances::FungibleAssetBalance, + }, + }, + processors::{DefaultProcessingResult, ProcessingResult, ProcessorName, ProcessorTrait}, + utils::{ + database::ArcDbPool, + in_memory_cache::InMemoryCache, + util::{get_entry_function_from_user_request, parse_timestamp}, + }, +}; +use ahash::AHashMap; +use aptos_in_memory_cache::Cache; +use aptos_protos::transaction::v1::{transaction::TxnData, write_set_change::Change, Transaction}; +use async_trait::async_trait; +use std::{fmt::Debug, sync::Arc}; + +pub struct EventStreamProcessor { + connection_pool: ArcDbPool, + cache: Arc, +} + +impl EventStreamProcessor { + pub fn new(connection_pool: ArcDbPool, cache: Arc) -> Self { + Self { + connection_pool, + cache, + } + } +} + +impl Debug for EventStreamProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "EventStreamProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +#[async_trait] +impl ProcessorTrait for EventStreamProcessor { + fn name(&self) -> &'static str { + ProcessorName::EventStreamProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let mut batch = vec![]; + for txn in &transactions { + let txn_version = txn.version as i64; + let block_height = txn.block_height as i64; + let txn_data = txn.txn_data.as_ref().expect("Txn Data doesn't exit!"); + let transaction_info = txn.info.as_ref().expect("Transaction info doesn't exist!"); + let txn_timestamp = parse_timestamp(txn.timestamp.as_ref().unwrap(), txn_version); + let default = vec![]; + let (raw_events, _user_request, entry_function_id_str) = match txn_data { + TxnData::BlockMetadata(tx_inner) => (&tx_inner.events, None, None), + TxnData::Genesis(tx_inner) => (&tx_inner.events, None, None), + TxnData::User(tx_inner) => { + let user_request = tx_inner + .request + .as_ref() + .expect("Sends is not present in user txn"); + let entry_function_id_str = get_entry_function_from_user_request(user_request); + (&tx_inner.events, Some(user_request), entry_function_id_str) + }, + _ => (&default, None, None), + }; + + // This is because v1 events (deposit/withdraw) don't have coin type so the only way is to match + // the event to the resource using the event guid + let mut event_to_v1_coin_type: EventToCoinType = AHashMap::new(); + + for (index, wsc) in transaction_info.changes.iter().enumerate() { + if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { + if let Some((_balance, _current_balance, event_to_coin)) = + FungibleAssetBalance::get_v1_from_write_resource( + write_resource, + index as i64, + txn_version, + txn_timestamp, + ) + .unwrap() + { + event_to_v1_coin_type.extend(event_to_coin); + } + } + } + + let mut event_context = AHashMap::new(); + for (index, event) in raw_events.iter().enumerate() { + // Only support v1 for now + if let Some(v1_activity) = FungibleAssetActivity::get_v1_from_event( + event, + txn_version, + block_height, + txn_timestamp, + &entry_function_id_str, + &event_to_v1_coin_type, + index as i64, + ) + .unwrap_or_else(|e| { + tracing::error!( + transaction_version = txn_version, + index = index, + error = ?e, + "[Parser] error parsing fungible asset activity v1"); + panic!("[Parser] error parsing fungible asset activity v1"); + }) { + event_context.insert((txn_version, index as i64), EventContext { + coin_type: v1_activity.asset_type.clone(), + }); + } + } + + batch.push(CachedEvents { + transaction_version: txn_version, + events: EventModel::from_events(raw_events, txn_version, block_height) + .iter() + .map(|event| { + let context = event_context + .get(&(txn_version, event.event_index)) + .cloned(); + Arc::new(EventStreamMessage::from_event( + event, + context, + txn_timestamp.clone(), + )) + }) + .collect(), + }); + } + + for events in batch { + self.cache + .insert(events.transaction_version, events.clone()); + } + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + last_transaction_timestamp: transactions.last().unwrap().timestamp.clone(), + processing_duration_in_secs, + db_insertion_duration_in_secs: 0.0, + }, + )) + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/processors/events_processor.rs b/rust/processor/src/processors/events_processor.rs index c382f82ec..409914275 100644 --- a/rust/processor/src/processors/events_processor.rs +++ b/rust/processor/src/processors/events_processor.rs @@ -1,13 +1,14 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::events_models::events::EventModel, + db::common::models::events_models::events::EventModel, + gap_detectors::ProcessingResult, schema, utils::{ counters::PROCESSOR_UNKNOWN_TYPE_COUNT, - database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, }, }; use ahash::AHashMap; @@ -23,12 +24,12 @@ use std::fmt::Debug; use tracing::error; pub struct EventsProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap, } impl EventsProcessor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { Self { connection_pool, per_table_chunk_sizes, @@ -48,7 +49,7 @@ impl Debug for EventsProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -151,13 +152,15 @@ impl ProcessorTrait for EventsProcessor { let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(e) => { error!( start_version = start_version, @@ -171,7 +174,7 @@ impl ProcessorTrait for EventsProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/processors/fungible_asset_processor.rs b/rust/processor/src/processors/fungible_asset_processor.rs index af5683749..12c36600a 100644 --- a/rust/processor/src/processors/fungible_asset_processor.rs +++ b/rust/processor/src/processors/fungible_asset_processor.rs @@ -1,26 +1,31 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::{ + db::common::models::{ + coin_models::coin_supply::CoinSupply, fungible_asset_models::{ v2_fungible_asset_activities::{EventToCoinType, FungibleAssetActivity}, v2_fungible_asset_balances::{ - CurrentFungibleAssetBalance, CurrentFungibleAssetMapping, FungibleAssetBalance, + CurrentFungibleAssetBalance, CurrentFungibleAssetMapping, + CurrentUnifiedFungibleAssetBalance, FungibleAssetBalance, + }, + v2_fungible_asset_utils::{ + ConcurrentFungibleAssetBalance, ConcurrentFungibleAssetSupply, FeeStatement, + FungibleAssetMetadata, FungibleAssetStore, FungibleAssetSupply, }, - v2_fungible_asset_utils::{FeeStatement, FungibleAssetMetadata, FungibleAssetStore}, v2_fungible_metadata::{FungibleAssetMetadataMapping, FungibleAssetMetadataModel}, }, object_models::v2_object_utils::{ - ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata, + ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata, Untransferable, }, - token_v2_models::v2_token_utils::TokenV2, }, + gap_detectors::ProcessingResult, schema, utils::{ counters::PROCESSOR_UNKNOWN_TYPE_COUNT, - database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool, PgPoolConnection}, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, util::{get_entry_function_from_user_request, standardize_address}, }, }; @@ -37,15 +42,13 @@ use diesel::{ use std::fmt::Debug; use tracing::error; -pub const APTOS_COIN_TYPE_STR: &str = "0x1::aptos_coin::AptosCoin"; - pub struct FungibleAssetProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap, } impl FungibleAssetProcessor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { Self { connection_pool, per_table_chunk_sizes, @@ -65,7 +68,7 @@ impl Debug for FungibleAssetProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -73,6 +76,11 @@ async fn insert_to_db( fungible_asset_metadata: &[FungibleAssetMetadataModel], fungible_asset_balances: &[FungibleAssetBalance], current_fungible_asset_balances: &[CurrentFungibleAssetBalance], + current_unified_fungible_asset_balances: ( + &[CurrentUnifiedFungibleAssetBalance], + &[CurrentUnifiedFungibleAssetBalance], + ), + coin_supply: &[CoinSupply], per_table_chunk_sizes: &AHashMap, ) -> Result<(), diesel::result::Error> { tracing::trace!( @@ -110,7 +118,7 @@ async fn insert_to_db( ), ); let cfab = execute_in_chunks( - conn, + conn.clone(), insert_current_fungible_asset_balances_query, current_fungible_asset_balances, get_config_table_chunk_size::( @@ -118,8 +126,35 @@ async fn insert_to_db( per_table_chunk_sizes, ), ); - let (faa_res, fam_res, fab_res, cfab_res) = tokio::join!(faa, fam, fab, cfab); - for res in [faa_res, fam_res, fab_res, cfab_res] { + let cufab_v1 = execute_in_chunks( + conn.clone(), + insert_current_unified_fungible_asset_balances_v1_query, + current_unified_fungible_asset_balances.0, + get_config_table_chunk_size::( + "current_unified_fungible_asset_balances", + per_table_chunk_sizes, + ), + ); + let cufab_v2 = execute_in_chunks( + conn.clone(), + insert_current_unified_fungible_asset_balances_v2_query, + current_unified_fungible_asset_balances.1, + get_config_table_chunk_size::( + "current_unified_fungible_asset_balances", + per_table_chunk_sizes, + ), + ); + let cs = execute_in_chunks( + conn, + insert_coin_supply_query, + coin_supply, + get_config_table_chunk_size::("coin_supply", per_table_chunk_sizes), + ); + let (faa_res, fam_res, fab_res, cfab_res, cufab1_res, cufab2_res, cs_res) = + tokio::join!(faa, fam, fab, cfab, cufab_v1, cufab_v2, cs); + for res in [ + faa_res, fam_res, fab_res, cfab_res, cufab1_res, cufab2_res, cs_res, + ] { res?; } @@ -171,6 +206,8 @@ fn insert_fungible_asset_metadata_query( token_standard.eq(excluded(token_standard)), inserted_at.eq(excluded(inserted_at)), is_token_v2.eq(excluded(is_token_v2)), + supply_v2.eq(excluded(supply_v2)), + maximum_v2.eq(excluded(maximum_v2)), ) ), Some(" WHERE fungible_asset_metadata.last_transaction_version <= excluded.last_transaction_version "), @@ -189,11 +226,7 @@ fn insert_fungible_asset_balances_query( diesel::insert_into(schema::fungible_asset_balances::table) .values(items_to_insert) .on_conflict((transaction_version, write_set_change_index)) - .do_update() - .set(( - is_frozen.eq(excluded(is_frozen)), - inserted_at.eq(excluded(inserted_at)), - )), + .do_nothing(), None, ) } @@ -228,6 +261,81 @@ fn insert_current_fungible_asset_balances_query( ) } +fn insert_current_unified_fungible_asset_balances_v1_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_unified_fungible_asset_balances_to_be_renamed::dsl::*; + + ( + diesel::insert_into(schema::current_unified_fungible_asset_balances_to_be_renamed::table) + .values(items_to_insert) + .on_conflict(storage_id) + .do_update() + .set( + ( + owner_address.eq(excluded(owner_address)), + asset_type_v1.eq(excluded(asset_type_v1)), + is_frozen.eq(excluded(is_frozen)), + amount_v1.eq(excluded(amount_v1)), + last_transaction_timestamp_v1.eq(excluded(last_transaction_timestamp_v1)), + last_transaction_version_v1.eq(excluded(last_transaction_version_v1)), + inserted_at.eq(excluded(inserted_at)), + ) + ), + Some(" WHERE current_unified_fungible_asset_balances_to_be_renamed.last_transaction_version_v1 IS NULL \ + OR current_unified_fungible_asset_balances_to_be_renamed.last_transaction_version_v1 <= excluded.last_transaction_version_v1"), + ) +} + +fn insert_current_unified_fungible_asset_balances_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_unified_fungible_asset_balances_to_be_renamed::dsl::*; + ( + diesel::insert_into(schema::current_unified_fungible_asset_balances_to_be_renamed::table) + .values(items_to_insert) + .on_conflict(storage_id) + .do_update() + .set( + ( + owner_address.eq(excluded(owner_address)), + asset_type_v2.eq(excluded(asset_type_v2)), + is_primary.eq(excluded(is_primary)), + is_frozen.eq(excluded(is_frozen)), + amount_v2.eq(excluded(amount_v2)), + last_transaction_timestamp_v2.eq(excluded(last_transaction_timestamp_v2)), + last_transaction_version_v2.eq(excluded(last_transaction_version_v2)), + inserted_at.eq(excluded(inserted_at)), + ) + ), + Some(" WHERE current_unified_fungible_asset_balances_to_be_renamed.last_transaction_version_v2 IS NULL \ + OR current_unified_fungible_asset_balances_to_be_renamed.last_transaction_version_v2 <= excluded.last_transaction_version_v2 "), + ) +} + +fn insert_coin_supply_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::coin_supply::dsl::*; + + ( + diesel::insert_into(schema::coin_supply::table) + .values(items_to_insert) + .on_conflict((transaction_version, coin_type_hash)) + .do_nothing(), + None, + ) +} + #[async_trait] impl ProcessorTrait for FungibleAssetProcessor { fn name(&self) -> &'static str { @@ -244,17 +352,21 @@ impl ProcessorTrait for FungibleAssetProcessor { let processing_start = std::time::Instant::now(); let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); - let mut conn = self.get_conn().await; let ( fungible_asset_activities, fungible_asset_metadata, fungible_asset_balances, current_fungible_asset_balances, - ) = parse_v2_coin(&transactions, &mut conn).await; + current_unified_fungible_asset_balances, + coin_supply, + ) = parse_v2_coin(&transactions).await; let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); let db_insertion_start = std::time::Instant::now(); + let (coin_balance, fa_balance): (Vec<_>, Vec<_>) = current_unified_fungible_asset_balances + .into_iter() + .partition(|x| x.is_primary.is_none()); let tx_result = insert_to_db( self.get_pool(), self.name(), @@ -264,18 +376,22 @@ impl ProcessorTrait for FungibleAssetProcessor { &fungible_asset_metadata, &fungible_asset_balances, ¤t_fungible_asset_balances, + (&coin_balance, &fa_balance), + &coin_supply, &self.per_table_chunk_sizes, ) .await; let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(err) => { error!( start_version = start_version, @@ -289,7 +405,7 @@ impl ProcessorTrait for FungibleAssetProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } @@ -297,15 +413,17 @@ impl ProcessorTrait for FungibleAssetProcessor { /// V2 coin is called fungible assets and this flow includes all data from V1 in coin_processor async fn parse_v2_coin( transactions: &[Transaction], - conn: &mut PgPoolConnection<'_>, ) -> ( Vec, Vec, Vec, Vec, + Vec, + Vec, ) { let mut fungible_asset_activities = vec![]; let mut fungible_asset_balances = vec![]; + let mut all_coin_supply = vec![]; let mut current_fungible_asset_balances: CurrentFungibleAssetMapping = AHashMap::new(); let mut fungible_asset_metadata: FungibleAssetMetadataMapping = AHashMap::new(); @@ -334,8 +452,10 @@ async fn parse_v2_coin( .as_ref() .expect("Transaction timestamp doesn't exist!") .seconds; + #[allow(deprecated)] let txn_timestamp = NaiveDateTime::from_timestamp_opt(txn_timestamp, 0).expect("Txn Timestamp is invalid!"); + let txn_epoch = txn.epoch as i64; let default = vec![]; let (events, user_request, entry_function_id_str) = match txn_data { @@ -367,18 +487,7 @@ async fn parse_v2_coin( standardize_address(&wr.address.to_string()), ObjectAggregatedData { object, - fungible_asset_metadata: None, - fungible_asset_store: None, - token: None, - // The following structs are unused in this processor - aptos_collection: None, - fixed_supply: None, - unlimited_supply: None, - concurrent_supply: None, - property_map: None, - transfer_events: vec![], - fungible_asset_supply: None, - token_identifier: None, + ..ObjectAggregatedData::default() }, ); } @@ -417,12 +526,53 @@ async fn parse_v2_coin( { aggregated_data.fungible_asset_store = Some(fungible_asset_store); } - if let Some(token) = - TokenV2::from_write_resource(write_resource, txn_version).unwrap() + if let Some(fungible_asset_supply) = + FungibleAssetSupply::from_write_resource(write_resource, txn_version) + .unwrap() + { + aggregated_data.fungible_asset_supply = Some(fungible_asset_supply); + } + if let Some(concurrent_fungible_asset_supply) = + ConcurrentFungibleAssetSupply::from_write_resource( + write_resource, + txn_version, + ) + .unwrap() + { + aggregated_data.concurrent_fungible_asset_supply = + Some(concurrent_fungible_asset_supply); + } + if let Some(concurrent_fungible_asset_balance) = + ConcurrentFungibleAssetBalance::from_write_resource( + write_resource, + txn_version, + ) + .unwrap() + { + aggregated_data.concurrent_fungible_asset_balance = + Some(concurrent_fungible_asset_balance); + } + if let Some(untransferable) = + Untransferable::from_write_resource(write_resource, txn_version).unwrap() { - aggregated_data.token = Some(token); + aggregated_data.untransferable = Some(untransferable); } } + } else if let Change::DeleteResource(delete_resource) = wsc.change.as_ref().unwrap() { + if let Some((balance, current_balance, event_to_coin)) = + FungibleAssetBalance::get_v1_from_delete_resource( + delete_resource, + index as i64, + txn_version, + txn_timestamp, + ) + .unwrap() + { + fungible_asset_balances.push(balance); + current_fungible_asset_balances + .insert(current_balance.storage_id.clone(), current_balance.clone()); + event_to_v1_coin_type.extend(event_to_coin); + } } } @@ -473,7 +623,6 @@ async fn parse_v2_coin( index as i64, &entry_function_id_str, &fungible_asset_object_helper, - conn, ) .await .unwrap_or_else(|e| { @@ -490,61 +639,79 @@ async fn parse_v2_coin( // Loop to handle all the other changes for (index, wsc) in transaction_info.changes.iter().enumerate() { - if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { - if let Some(fa_metadata) = FungibleAssetMetadataModel::get_v1_from_write_resource( - write_resource, - txn_version, - txn_timestamp, - ) - .unwrap_or_else(|e| { - tracing::error!( - transaction_version = txn_version, - index = index, - error = ?e, - "[Parser] error parsing fungible metadata v1"); - panic!("[Parser] error parsing fungible metadata v1"); - }) { - fungible_asset_metadata.insert(fa_metadata.asset_type.clone(), fa_metadata); - } - if let Some(fa_metadata) = FungibleAssetMetadataModel::get_v2_from_write_resource( - write_resource, - txn_version, - txn_timestamp, - &fungible_asset_object_helper, - ) - .unwrap_or_else(|e| { - tracing::error!( - transaction_version = txn_version, - index = index, - error = ?e, - "[Parser] error parsing fungible metadata v2"); - panic!("[Parser] error parsing fungible metadata v2"); - }) { - fungible_asset_metadata.insert(fa_metadata.asset_type.clone(), fa_metadata); - } - if let Some((balance, curr_balance)) = - FungibleAssetBalance::get_v2_from_write_resource( - write_resource, - index as i64, + match wsc.change.as_ref().unwrap() { + Change::WriteResource(write_resource) => { + if let Some(fa_metadata) = + FungibleAssetMetadataModel::get_v1_from_write_resource( + write_resource, + txn_version, + txn_timestamp, + ) + .unwrap_or_else(|e| { + tracing::error!( + transaction_version = txn_version, + index = index, + error = ?e, + "[Parser] error parsing fungible metadata v1"); + panic!("[Parser] error parsing fungible metadata v1"); + }) + { + fungible_asset_metadata.insert(fa_metadata.asset_type.clone(), fa_metadata); + } + if let Some(fa_metadata) = + FungibleAssetMetadataModel::get_v2_from_write_resource( + write_resource, + txn_version, + txn_timestamp, + &fungible_asset_object_helper, + ) + .unwrap_or_else(|e| { + tracing::error!( + transaction_version = txn_version, + index = index, + error = ?e, + "[Parser] error parsing fungible metadata v2"); + panic!("[Parser] error parsing fungible metadata v2"); + }) + { + fungible_asset_metadata.insert(fa_metadata.asset_type.clone(), fa_metadata); + } + if let Some((balance, curr_balance)) = + FungibleAssetBalance::get_v2_from_write_resource( + write_resource, + index as i64, + txn_version, + txn_timestamp, + &fungible_asset_object_helper, + ) + .await + .unwrap_or_else(|e| { + tracing::error!( + transaction_version = txn_version, + index = index, + error = ?e, + "[Parser] error parsing fungible balance v2"); + panic!("[Parser] error parsing fungible balance v2"); + }) + { + fungible_asset_balances.push(balance); + current_fungible_asset_balances + .insert(curr_balance.storage_id.clone(), curr_balance); + } + }, + Change::WriteTableItem(table_item) => { + if let Some(coin_supply) = CoinSupply::from_write_table_item( + table_item, txn_version, txn_timestamp, - &fungible_asset_object_helper, - conn, + txn_epoch, ) - .await - .unwrap_or_else(|e| { - tracing::error!( - transaction_version = txn_version, - index = index, - error = ?e, - "[Parser] error parsing fungible balance v2"); - panic!("[Parser] error parsing fungible balance v2"); - }) - { - fungible_asset_balances.push(balance); - current_fungible_asset_balances - .insert(curr_balance.storage_id.clone(), curr_balance); - } + .unwrap() + { + all_coin_supply.push(coin_supply); + } + }, + _ => {}, } } } @@ -557,14 +724,22 @@ async fn parse_v2_coin( let mut current_fungible_asset_balances = current_fungible_asset_balances .into_values() .collect::>(); + // Sort by PK fungible_asset_metadata.sort_by(|a, b| a.asset_type.cmp(&b.asset_type)); current_fungible_asset_balances.sort_by(|a, b| a.storage_id.cmp(&b.storage_id)); + // Process the unified balance + let current_unified_fungible_asset_balances = current_fungible_asset_balances + .iter() + .map(CurrentUnifiedFungibleAssetBalance::from) + .collect::>(); ( fungible_asset_activities, fungible_asset_metadata, fungible_asset_balances, current_fungible_asset_balances, + current_unified_fungible_asset_balances, + all_coin_supply, ) } diff --git a/rust/processor/src/processors/mod.rs b/rust/processor/src/processors/mod.rs index cb2349eb2..908a1a43d 100644 --- a/rust/processor/src/processors/mod.rs +++ b/rust/processor/src/processors/mod.rs @@ -4,18 +4,17 @@ // Note: For enum_dispatch to work nicely, it is easiest to have the trait and the enum // in the same file (ProcessorTrait and Processor). -// Note: For enum_dispatch to work nicely, it is easiest to have the trait and the enum -// in the same file (ProcessorTrait and Processor). - pub mod account_transactions_processor; pub mod ans_processor; pub mod coin_processor; pub mod default_processor; +pub mod event_stream_processor; pub mod events_processor; pub mod fungible_asset_processor; pub mod monitoring_processor; pub mod nft_metadata_processor; pub mod objects_processor; +pub mod parquet_default_processor; pub mod stake_processor; pub mod token_processor; pub mod token_v2_processor; @@ -27,23 +26,27 @@ use self::{ ans_processor::{AnsProcessor, AnsProcessorConfig}, coin_processor::CoinProcessor, default_processor::DefaultProcessor, + event_stream_processor::EventStreamProcessor, events_processor::EventsProcessor, fungible_asset_processor::FungibleAssetProcessor, monitoring_processor::MonitoringProcessor, nft_metadata_processor::{NftMetadataProcessor, NftMetadataProcessorConfig}, - objects_processor::ObjectsProcessor, - stake_processor::StakeProcessor, + objects_processor::{ObjectsProcessor, ObjectsProcessorConfig}, + parquet_default_processor::DefaultParquetProcessorConfig, + stake_processor::{StakeProcessor, StakeProcessorConfig}, token_processor::{TokenProcessor, TokenProcessorConfig}, - token_v2_processor::TokenV2Processor, + token_v2_processor::{TokenV2Processor, TokenV2ProcessorConfig}, transaction_metadata_processor::TransactionMetadataProcessor, user_transaction_processor::UserTransactionProcessor, }; use crate::{ - models::processor_status::ProcessorStatus, + db::common::models::processor_status::ProcessorStatus, + gap_detectors::ProcessingResult, + processors::parquet_default_processor::DefaultParquetProcessor, schema::processor_status, utils::{ counters::{GOT_CONNECTION_COUNT, UNABLE_TO_GET_CONNECTION_COUNT}, - database::{execute_with_better_error, PgDbPool, PgPoolConnection}, + database::{execute_with_better_error, ArcDbPool, DbPoolConnection}, util::parse_timestamp, }, }; @@ -55,7 +58,7 @@ use serde::{Deserialize, Serialize}; use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] -pub struct ProcessingResult { +pub struct DefaultProcessingResult { pub start_version: u64, pub end_version: u64, pub last_transaction_timestamp: Option, @@ -80,19 +83,19 @@ pub trait ProcessorTrait: Send + Sync + Debug { /// Gets a reference to the connection pool /// This is used by the `get_conn()` helper below - fn connection_pool(&self) -> &PgDbPool; + fn connection_pool(&self) -> &ArcDbPool; //* Below are helper methods that don't need to be implemented *// /// Gets an instance of the connection pool - fn get_pool(&self) -> PgDbPool { + fn get_pool(&self) -> ArcDbPool { let pool = self.connection_pool(); pool.clone() } /// Gets the connection. /// If it was unable to do so (default timeout: 30s), it will keep retrying until it can. - async fn get_conn(&self) -> PgPoolConnection { + async fn get_conn(&self) -> DbPoolConnection { let pool = self.connection_pool(); loop { match pool.get().await { @@ -183,16 +186,18 @@ pub enum ProcessorConfig { AnsProcessor(AnsProcessorConfig), CoinProcessor, DefaultProcessor, + EventStreamProcessor, EventsProcessor, FungibleAssetProcessor, MonitoringProcessor, NftMetadataProcessor(NftMetadataProcessorConfig), - ObjectsProcessor, - StakeProcessor, + ObjectsProcessor(ObjectsProcessorConfig), + StakeProcessor(StakeProcessorConfig), TokenProcessor(TokenProcessorConfig), - TokenV2Processor, + TokenV2Processor(TokenV2ProcessorConfig), TransactionMetadataProcessor, UserTransactionProcessor, + DefaultParquetProcessor(DefaultParquetProcessorConfig), } impl ProcessorConfig { @@ -201,6 +206,10 @@ impl ProcessorConfig { pub fn name(&self) -> &'static str { self.into() } + + pub fn is_parquet_processor(&self) -> bool { + matches!(self, ProcessorConfig::DefaultParquetProcessor(_)) + } } /// This enum contains all the processors defined in this crate. We use enum_dispatch @@ -225,6 +234,7 @@ pub enum Processor { AnsProcessor, CoinProcessor, DefaultProcessor, + EventStreamProcessor, EventsProcessor, FungibleAssetProcessor, MonitoringProcessor, @@ -235,6 +245,7 @@ pub enum Processor { TokenV2Processor, TransactionMetadataProcessor, UserTransactionProcessor, + DefaultParquetProcessor, } #[cfg(test)] diff --git a/rust/processor/src/processors/monitoring_processor.rs b/rust/processor/src/processors/monitoring_processor.rs index b82453886..c7e750f82 100644 --- a/rust/processor/src/processors/monitoring_processor.rs +++ b/rust/processor/src/processors/monitoring_processor.rs @@ -1,18 +1,18 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; -use crate::utils::database::PgDbPool; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{gap_detectors::ProcessingResult, utils::database::ArcDbPool}; use aptos_protos::transaction::v1::Transaction; use async_trait::async_trait; use std::fmt::Debug; pub struct MonitoringProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, } impl MonitoringProcessor { - pub fn new(connection_pool: PgDbPool) -> Self { + pub fn new(connection_pool: ArcDbPool) -> Self { Self { connection_pool } } } @@ -41,16 +41,18 @@ impl ProcessorTrait for MonitoringProcessor { end_version: u64, _: Option, ) -> anyhow::Result { - Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs: 0.0, - db_insertion_duration_in_secs: 0.0, - last_transaction_timestamp: transactions.last().unwrap().timestamp.clone(), - }) + Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs: 0.0, + db_insertion_duration_in_secs: 0.0, + last_transaction_timestamp: transactions.last().unwrap().timestamp.clone(), + }, + )) } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/processors/nft_metadata_processor.rs b/rust/processor/src/processors/nft_metadata_processor.rs index a7b8cd328..4fcb9a922 100644 --- a/rust/processor/src/processors/nft_metadata_processor.rs +++ b/rust/processor/src/processors/nft_metadata_processor.rs @@ -1,9 +1,9 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::{ + db::common::models::{ object_models::v2_object_utils::{ ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata, }, @@ -13,10 +13,12 @@ use crate::{ v2_token_datas::{CurrentTokenDataV2, CurrentTokenDataV2PK, TokenDataV2}, }, }, + gap_detectors::ProcessingResult, utils::{ - database::{PgDbPool, PgPoolConnection}, + database::{ArcDbPool, DbPoolConnection}, util::{parse_timestamp, remove_null_bytes, standardize_address}, }, + IndexerGrpcProcessorConfig, }; use ahash::AHashMap; use aptos_protos::transaction::v1::{write_set_change::Change, Transaction}; @@ -38,16 +40,20 @@ pub const CHUNK_SIZE: usize = 1000; pub struct NftMetadataProcessorConfig { pub pubsub_topic_name: String, pub google_application_credentials: Option, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, } pub struct NftMetadataProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, chain_id: u8, config: NftMetadataProcessorConfig, } impl NftMetadataProcessor { - pub fn new(connection_pool: PgDbPool, config: NftMetadataProcessorConfig) -> Self { + pub fn new(connection_pool: ArcDbPool, config: NftMetadataProcessorConfig) -> Self { tracing::info!("init NftMetadataProcessor"); // Crate reads from authentication from file specified in @@ -96,6 +102,9 @@ impl ProcessorTrait for NftMetadataProcessor { let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); let mut conn = self.get_conn().await; + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; + let db_chain_id = db_chain_id.unwrap_or_else(|| { error!("[NFT Metadata Crawler] db_chain_id must not be null"); panic!(); @@ -114,8 +123,14 @@ impl ProcessorTrait for NftMetadataProcessor { let ordering_key = get_current_timestamp(); // Publish CurrentTokenDataV2 and CurrentCollectionV2 from transactions - let (token_datas, collections) = - parse_v2_token(&transactions, &table_handle_to_owner, &mut conn).await; + let (token_datas, collections) = parse_v2_token( + &transactions, + &table_handle_to_owner, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await; let mut pubsub_messages: Vec = Vec::with_capacity(token_datas.len() + collections.len()); @@ -163,16 +178,18 @@ impl ProcessorTrait for NftMetadataProcessor { let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); - Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }) + Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )) } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } @@ -203,7 +220,9 @@ fn clean_collection_pubsub_message(cc: CurrentCollectionV2, db_chain_id: u64) -> async fn parse_v2_token( transactions: &[Transaction], table_handle_to_owner: &TableHandleToOwner, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> (Vec, Vec) { let mut current_token_datas_v2: AHashMap = AHashMap::new(); @@ -231,9 +250,12 @@ async fn parse_v2_token( unlimited_supply: None, property_map: None, transfer_events: vec![], + untransferable: None, token: None, fungible_asset_metadata: None, fungible_asset_supply: None, + concurrent_fungible_asset_supply: None, + concurrent_fungible_asset_balance: None, fungible_asset_store: None, token_identifier: None, }, @@ -266,6 +288,8 @@ async fn parse_v2_token( txn_timestamp, table_handle_to_owner, conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap() diff --git a/rust/processor/src/processors/objects_processor.rs b/rust/processor/src/processors/objects_processor.rs index 269fd9bdd..9228692ab 100644 --- a/rust/processor/src/processors/objects_processor.rs +++ b/rust/processor/src/processors/objects_processor.rs @@ -1,17 +1,19 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::object_models::{ + db::common::models::object_models::{ v2_object_utils::{ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata}, v2_objects::{CurrentObject, Object}, }, + gap_detectors::ProcessingResult, schema, utils::{ - database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, util::standardize_address, }, + IndexerGrpcProcessorConfig, }; use ahash::AHashMap; use anyhow::bail; @@ -22,18 +24,33 @@ use diesel::{ query_builder::QueryFragment, ExpressionMethods, }; +use serde::{Deserialize, Serialize}; use std::fmt::Debug; use tracing::error; +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct ObjectsProcessorConfig { + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, +} pub struct ObjectsProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, + config: ObjectsProcessorConfig, per_table_chunk_sizes: AHashMap, } impl ObjectsProcessor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new( + connection_pool: ArcDbPool, + config: ObjectsProcessorConfig, + per_table_chunk_sizes: AHashMap, + ) -> Self { Self { connection_pool, + config, per_table_chunk_sizes, } } @@ -51,7 +68,7 @@ impl Debug for ObjectsProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -122,6 +139,7 @@ fn insert_current_objects_query( last_transaction_version.eq(excluded(last_transaction_version)), is_deleted.eq(excluded(is_deleted)), inserted_at.eq(excluded(inserted_at)), + untransferrable.eq(excluded(untransferrable)), )), Some( " WHERE current_objects.last_transaction_version <= excluded.last_transaction_version ", @@ -146,6 +164,8 @@ impl ProcessorTrait for ObjectsProcessor { let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); let mut conn = self.get_conn().await; + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; // Moving object handling here because we need a single object // map through transactions for lookups @@ -186,7 +206,10 @@ impl ProcessorTrait for ObjectsProcessor { concurrent_supply: None, property_map: None, transfer_events: vec![], + untransferable: None, fungible_asset_supply: None, + concurrent_fungible_asset_supply: None, + concurrent_fungible_asset_balance: None, token_identifier: None, }); } @@ -220,6 +243,8 @@ impl ProcessorTrait for ObjectsProcessor { index, &all_current_objects, &mut conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap() @@ -255,13 +280,15 @@ impl ProcessorTrait for ObjectsProcessor { let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(e) => { error!( start_version = start_version, @@ -275,7 +302,7 @@ impl ProcessorTrait for ObjectsProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/processors/parquet_default_processor.rs b/rust/processor/src/processors/parquet_default_processor.rs new file mode 100644 index 000000000..0b8a30890 --- /dev/null +++ b/rust/processor/src/processors/parquet_default_processor.rs @@ -0,0 +1,271 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{ProcessorName, ProcessorTrait}; +use crate::{ + bq_analytics::{ + generic_parquet_processor::ParquetDataGeneric, + parquet_handler::create_parquet_handler_loop, ParquetProcessingResult, + }, + db::common::models::default_models::{ + parquet_move_resources::MoveResource, + parquet_move_tables::{CurrentTableItem, TableItem, TableMetadata}, + parquet_transactions::{Transaction as ParquetTransaction, TransactionModel}, + parquet_write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel}, + }, + gap_detectors::ProcessingResult, + utils::database::ArcDbPool, +}; +use ahash::AHashMap; +use anyhow::anyhow; +use aptos_protos::transaction::v1::Transaction; +use async_trait::async_trait; +use kanal::AsyncSender; +use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Formatter, Result}; + +const GOOGLE_APPLICATION_CREDENTIALS: &str = "GOOGLE_APPLICATION_CREDENTIALS"; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct DefaultParquetProcessorConfig { + pub google_application_credentials: Option, + pub bucket_name: String, + pub parquet_handler_response_channel_size: usize, + pub max_buffer_size: usize, +} + +pub struct DefaultParquetProcessor { + connection_pool: ArcDbPool, + transaction_sender: AsyncSender>, + move_resource_sender: AsyncSender>, + wsc_sender: AsyncSender>, + ti_sender: AsyncSender>, +} + +// TODO: Since each table item has different size allocated, the pace of being backfilled to PQ varies a lot. +// Maybe we can have also have a way to configure different starting version for each table later. +impl DefaultParquetProcessor { + pub fn new( + connection_pool: ArcDbPool, + config: DefaultParquetProcessorConfig, + new_gap_detector_sender: AsyncSender, + ) -> Self { + if let Some(credentials) = config.google_application_credentials.clone() { + std::env::set_var(GOOGLE_APPLICATION_CREDENTIALS, credentials); + } + + let transaction_sender = create_parquet_handler_loop::( + new_gap_detector_sender.clone(), + ProcessorName::DefaultParquetProcessor.into(), + config.bucket_name.clone(), + config.parquet_handler_response_channel_size, + config.max_buffer_size, + ); + + let move_resource_sender = create_parquet_handler_loop::( + new_gap_detector_sender.clone(), + ProcessorName::DefaultParquetProcessor.into(), + config.bucket_name.clone(), + config.parquet_handler_response_channel_size, + config.max_buffer_size, + ); + + let wsc_sender = create_parquet_handler_loop::( + new_gap_detector_sender.clone(), + ProcessorName::DefaultParquetProcessor.into(), + config.bucket_name.clone(), + config.parquet_handler_response_channel_size, + config.max_buffer_size, + ); + + let ti_sender = create_parquet_handler_loop::( + new_gap_detector_sender.clone(), + ProcessorName::DefaultParquetProcessor.into(), + config.bucket_name.clone(), + config.parquet_handler_response_channel_size, + config.max_buffer_size, + ); + + Self { + connection_pool, + transaction_sender, + move_resource_sender, + wsc_sender, + ti_sender, + } + } +} + +impl Debug for DefaultParquetProcessor { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + write!( + f, + "ParquetProcessor {{ capacity of t channel: {:?}, capacity of mr channel: {:?}, capacity of wsc channel: {:?}, capacity of ti channel: {:?} }}", + &self.transaction_sender.capacity(), + &self.move_resource_sender.capacity(), + &self.wsc_sender.capacity(), + &self.ti_sender.capacity(), + ) + } +} + +#[async_trait] +impl ProcessorTrait for DefaultParquetProcessor { + fn name(&self) -> &'static str { + ProcessorName::DefaultParquetProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let ((mr, wsc, t, ti), transaction_version_to_struct_count) = + tokio::task::spawn_blocking(move || process_transactions(transactions)) + .await + .expect("Failed to spawn_blocking for TransactionModel::from_transactions"); + + let mr_parquet_data = ParquetDataGeneric { + data: mr, + last_transaction_timestamp: last_transaction_timestamp.clone(), + transaction_version_to_struct_count: transaction_version_to_struct_count.clone(), + first_txn_version: start_version, + last_txn_version: end_version, + }; + + self.move_resource_sender + .send(mr_parquet_data) + .await + .map_err(|e| anyhow!("Failed to send to parquet manager: {}", e))?; + + let wsc_parquet_data = ParquetDataGeneric { + data: wsc, + last_transaction_timestamp: last_transaction_timestamp.clone(), + transaction_version_to_struct_count: transaction_version_to_struct_count.clone(), + first_txn_version: start_version, + last_txn_version: end_version, + }; + self.wsc_sender + .send(wsc_parquet_data) + .await + .map_err(|e| anyhow!("Failed to send to parquet manager: {}", e))?; + + let t_parquet_data = ParquetDataGeneric { + data: t, + last_transaction_timestamp: last_transaction_timestamp.clone(), + transaction_version_to_struct_count: transaction_version_to_struct_count.clone(), + first_txn_version: start_version, + last_txn_version: end_version, + }; + self.transaction_sender + .send(t_parquet_data) + .await + .map_err(|e| anyhow!("Failed to send to parquet manager: {}", e))?; + + let ti_parquet_data = ParquetDataGeneric { + data: ti, + last_transaction_timestamp: last_transaction_timestamp.clone(), + transaction_version_to_struct_count: transaction_version_to_struct_count.clone(), + first_txn_version: start_version, + last_txn_version: end_version, + }; + + self.ti_sender + .send(ti_parquet_data) + .await + .map_err(|e| anyhow!("Failed to send to parquet manager: {}", e))?; + + Ok(ProcessingResult::ParquetProcessingResult( + ParquetProcessingResult { + start_version: start_version as i64, + end_version: end_version as i64, + last_transaction_timestamp: last_transaction_timestamp.clone(), + txn_version_to_struct_count: AHashMap::new(), + }, + )) + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} + +pub fn process_transactions( + transactions: Vec, +) -> ( + ( + Vec, + Vec, + Vec, + Vec, + ), + AHashMap, +) { + let mut transaction_version_to_struct_count: AHashMap = AHashMap::new(); + let (txns, _block_metadata_txns, write_set_changes, wsc_details) = + TransactionModel::from_transactions( + &transactions, + &mut transaction_version_to_struct_count, + ); + + let mut move_modules = vec![]; + let mut move_resources = vec![]; + let mut table_items = vec![]; + let mut current_table_items = AHashMap::new(); + let mut table_metadata: AHashMap = AHashMap::new(); + + for detail in wsc_details { + match detail { + WriteSetChangeDetail::Module(module) => { + move_modules.push(module.clone()); + // transaction_version_to_struct_count.entry(module.transaction_version).and_modify(|e| *e += 1); // TODO: uncomment in Tranche2 + }, + WriteSetChangeDetail::Resource(resource) => { + transaction_version_to_struct_count + .entry(resource.txn_version) + .and_modify(|e| *e += 1); + move_resources.push(resource); + }, + WriteSetChangeDetail::Table(item, current_item, metadata) => { + transaction_version_to_struct_count + .entry(item.txn_version) + .and_modify(|e| *e += 1); + table_items.push(item); + + current_table_items.insert( + ( + current_item.table_handle.clone(), + current_item.key_hash.clone(), + ), + current_item, + ); + // transaction_version_to_struct_count.entry(current_item.last_transaction_version).and_modify(|e| *e += 1); // TODO: uncomment in Tranche2 + + if let Some(meta) = metadata { + table_metadata.insert(meta.handle.clone(), meta); + // transaction_version_to_struct_count.entry(current_item.last_transaction_version).and_modify(|e| *e += 1); // TODO: uncomment in Tranche2 + } + }, + } + } + + // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes + let mut current_table_items = current_table_items + .into_values() + .collect::>(); + let mut table_metadata = table_metadata.into_values().collect::>(); + // Sort by PK + current_table_items + .sort_by(|a, b| (&a.table_handle, &a.key_hash).cmp(&(&b.table_handle, &b.key_hash))); + table_metadata.sort_by(|a, b| a.handle.cmp(&b.handle)); + + ( + (move_resources, write_set_changes, txns, table_items), + transaction_version_to_struct_count, + ) +} diff --git a/rust/processor/src/processors/stake_processor.rs b/rust/processor/src/processors/stake_processor.rs index 774d3b05c..d623704d1 100644 --- a/rust/processor/src/processors/stake_processor.rs +++ b/rust/processor/src/processors/stake_processor.rs @@ -1,9 +1,9 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::stake_models::{ + db::common::models::stake_models::{ current_delegated_voter::CurrentDelegatedVoter, delegator_activities::DelegatedStakingActivity, delegator_balances::{ @@ -16,11 +16,13 @@ use crate::{ stake_utils::DelegationVoteGovernanceRecordsResource, staking_pool_voter::{CurrentStakingPoolVoter, StakingPoolVoterMap}, }, + gap_detectors::ProcessingResult, schema, utils::{ - database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, util::{parse_timestamp, standardize_address}, }, + IndexerGrpcProcessorConfig, }; use ahash::AHashMap; use anyhow::bail; @@ -31,18 +33,34 @@ use diesel::{ query_builder::QueryFragment, ExpressionMethods, }; +use serde::{Deserialize, Serialize}; use std::fmt::Debug; use tracing::error; +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct StakeProcessorConfig { + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, +} + pub struct StakeProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, + config: StakeProcessorConfig, per_table_chunk_sizes: AHashMap, } impl StakeProcessor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new( + connection_pool: ArcDbPool, + config: StakeProcessorConfig, + per_table_chunk_sizes: AHashMap, + ) -> Self { Self { connection_pool, + config, per_table_chunk_sizes, } } @@ -60,7 +78,7 @@ impl Debug for StakeProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -381,6 +399,8 @@ impl ProcessorTrait for StakeProcessor { let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); let mut conn = self.get_conn().await; + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; let mut all_current_stake_pool_voters: StakingPoolVoterMap = AHashMap::new(); let mut all_proposal_votes = vec![]; @@ -455,6 +475,8 @@ impl ProcessorTrait for StakeProcessor { txn, &active_pool_to_staking_pool, &mut conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap(); @@ -470,6 +492,8 @@ impl ProcessorTrait for StakeProcessor { txn_timestamp, &all_vote_delegation_handle_to_pool_address, &mut conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap(); @@ -489,6 +513,8 @@ impl ProcessorTrait for StakeProcessor { &active_pool_to_staking_pool, &all_current_delegated_voter, &mut conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap() @@ -553,13 +579,15 @@ impl ProcessorTrait for StakeProcessor { .await; let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(e) => { error!( start_version = start_version, @@ -573,7 +601,7 @@ impl ProcessorTrait for StakeProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/processors/token_processor.rs b/rust/processor/src/processors/token_processor.rs index c383d5ff5..cd5411b28 100644 --- a/rust/processor/src/processors/token_processor.rs +++ b/rust/processor/src/processors/token_processor.rs @@ -1,9 +1,9 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::token_models::{ + db::common::models::token_models::{ collection_datas::{CollectionData, CurrentCollectionData}, nft_points::NftPoints, token_activities::TokenActivity, @@ -15,8 +15,10 @@ use crate::{ TokenDataIdHash, }, }, + gap_detectors::ProcessingResult, schema, - utils::database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + IndexerGrpcProcessorConfig, }; use ahash::AHashMap; use anyhow::bail; @@ -35,19 +37,23 @@ use tracing::error; #[serde(deny_unknown_fields)] pub struct TokenProcessorConfig { pub nft_points_contract: Option, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, } pub struct TokenProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, config: TokenProcessorConfig, per_table_chunk_sizes: AHashMap, } impl TokenProcessor { pub fn new( - connection_pool: PgDbPool, - per_table_chunk_sizes: AHashMap, + connection_pool: ArcDbPool, config: TokenProcessorConfig, + per_table_chunk_sizes: AHashMap, ) -> Self { Self { connection_pool, @@ -69,7 +75,7 @@ impl Debug for TokenProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -434,6 +440,8 @@ impl ProcessorTrait for TokenProcessor { let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); let mut conn = self.get_conn().await; + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; // First get all token related table metadata from the batch of transactions. This is in case // an earlier transaction has metadata (in resources) that's missing from a later transaction. @@ -474,7 +482,14 @@ impl ProcessorTrait for TokenProcessor { current_token_datas, current_collection_datas, current_token_claims, - ) = Token::from_transaction(txn, &table_handle_to_owner, &mut conn).await; + ) = Token::from_transaction( + txn, + &table_handle_to_owner, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await; all_tokens.append(&mut tokens); all_token_ownerships.append(&mut token_ownerships); all_token_datas.append(&mut token_datas); @@ -567,13 +582,15 @@ impl ProcessorTrait for TokenProcessor { let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(e) => { error!( start_version = start_version, @@ -587,7 +604,7 @@ impl ProcessorTrait for TokenProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/processors/token_v2_processor.rs b/rust/processor/src/processors/token_v2_processor.rs index 34931bdb7..32530f9bb 100644 --- a/rust/processor/src/processors/token_v2_processor.rs +++ b/rust/processor/src/processors/token_v2_processor.rs @@ -1,17 +1,16 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::{ - fungible_asset_models::v2_fungible_asset_utils::{ - FungibleAssetMetadata, FungibleAssetStore, FungibleAssetSupply, - }, + db::common::models::{ + fungible_asset_models::v2_fungible_asset_utils::FungibleAssetMetadata, object_models::v2_object_utils::{ - ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata, + ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata, Untransferable, }, token_models::tokens::{TableHandleToOwner, TableMetadataForToken}, token_v2_models::{ + v1_token_royalty::CurrentTokenRoyaltyV1, v2_collections::{CollectionV2, CurrentCollectionV2, CurrentCollectionV2PK}, v2_token_activities::TokenActivityV2, v2_token_datas::{CurrentTokenDataV2, CurrentTokenDataV2PK, TokenDataV2}, @@ -27,12 +26,14 @@ use crate::{ }, }, }, + gap_detectors::ProcessingResult, schema, utils::{ counters::PROCESSOR_UNKNOWN_TYPE_COUNT, - database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool, PgPoolConnection}, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool, DbPoolConnection}, util::{get_entry_function_from_user_request, parse_timestamp, standardize_address}, }, + IndexerGrpcProcessorConfig, }; use ahash::{AHashMap, AHashSet}; use anyhow::bail; @@ -43,18 +44,34 @@ use diesel::{ query_builder::QueryFragment, ExpressionMethods, }; +use serde::{Deserialize, Serialize}; use std::fmt::Debug; use tracing::error; +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct TokenV2ProcessorConfig { + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, +} + pub struct TokenV2Processor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, + config: TokenV2ProcessorConfig, per_table_chunk_sizes: AHashMap, } impl TokenV2Processor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new( + connection_pool: ArcDbPool, + config: TokenV2ProcessorConfig, + per_table_chunk_sizes: AHashMap, + ) -> Self { Self { connection_pool, + config, per_table_chunk_sizes, } } @@ -72,7 +89,7 @@ impl Debug for TokenV2Processor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -80,11 +97,17 @@ async fn insert_to_db( token_datas_v2: &[TokenDataV2], token_ownerships_v2: &[TokenOwnershipV2], current_collections_v2: &[CurrentCollectionV2], - current_token_datas_v2: &[CurrentTokenDataV2], - current_token_ownerships_v2: &[CurrentTokenOwnershipV2], - current_deleted_token_ownerships_v2: &[CurrentTokenOwnershipV2], + (current_token_datas_v2, current_deleted_token_datas_v2): ( + &[CurrentTokenDataV2], + &[CurrentTokenDataV2], + ), + (current_token_ownerships_v2, current_deleted_token_ownerships_v2): ( + &[CurrentTokenOwnershipV2], + &[CurrentTokenOwnershipV2], + ), token_activities_v2: &[TokenActivityV2], current_token_v2_metadata: &[CurrentTokenV2Metadata], + current_token_royalties_v1: &[CurrentTokenRoyaltyV1], per_table_chunk_sizes: &AHashMap, ) -> Result<(), diesel::result::Error> { tracing::trace!( @@ -133,6 +156,15 @@ async fn insert_to_db( per_table_chunk_sizes, ), ); + let cdtd_v2 = execute_in_chunks( + conn.clone(), + insert_current_deleted_token_datas_v2_query, + current_deleted_token_datas_v2, + get_config_table_chunk_size::( + "current_token_datas_v2", + per_table_chunk_sizes, + ), + ); let cto_v2 = execute_in_chunks( conn.clone(), insert_current_token_ownerships_v2_query, @@ -161,7 +193,7 @@ async fn insert_to_db( ), ); let ct_v2 = execute_in_chunks( - conn, + conn.clone(), insert_current_token_v2_metadatas_query, current_token_v2_metadata, get_config_table_chunk_size::( @@ -169,6 +201,15 @@ async fn insert_to_db( per_table_chunk_sizes, ), ); + let ctr_v1 = execute_in_chunks( + conn, + insert_current_token_royalties_v1_query, + current_token_royalties_v1, + get_config_table_chunk_size::( + "current_token_royalty_v1", + per_table_chunk_sizes, + ), + ); let ( coll_v2_res, @@ -176,11 +217,15 @@ async fn insert_to_db( to_v2_res, cc_v2_res, ctd_v2_res, + cdtd_v2_res, cto_v2_res, cdto_v2_res, ta_v2_res, ct_v2_res, - ) = tokio::join!(coll_v2, td_v2, to_v2, cc_v2, ctd_v2, cto_v2, cdto_v2, ta_v2, ct_v2,); + ctr_v1_res, + ) = tokio::join!( + coll_v2, td_v2, to_v2, cc_v2, ctd_v2, cdtd_v2, cto_v2, cdto_v2, ta_v2, ct_v2, ctr_v1 + ); for res in [ coll_v2_res, @@ -188,10 +233,12 @@ async fn insert_to_db( to_v2_res, cc_v2_res, ctd_v2_res, + cdtd_v2_res, cto_v2_res, cdto_v2_res, ta_v2_res, ct_v2_res, + ctr_v1_res, ] { res?; } @@ -227,7 +274,14 @@ fn insert_token_datas_v2_query( diesel::insert_into(schema::token_datas_v2::table) .values(items_to_insert) .on_conflict((transaction_version, write_set_change_index)) - .do_nothing(), + .do_update() + .set(( + maximum.eq(excluded(maximum)), + supply.eq(excluded(supply)), + is_fungible_v2.eq(excluded(is_fungible_v2)), + inserted_at.eq(excluded(inserted_at)), + decimals.eq(excluded(decimals)), + )), None, ) } @@ -244,7 +298,11 @@ fn insert_token_ownerships_v2_query( diesel::insert_into(schema::token_ownerships_v2::table) .values(items_to_insert) .on_conflict((transaction_version, write_set_change_index)) - .do_nothing(), + .do_update() + .set(( + is_fungible_v2.eq(excluded(is_fungible_v2)), + inserted_at.eq(excluded(inserted_at)), + )), None, ) } @@ -279,7 +337,7 @@ fn insert_current_collections_v2_query( inserted_at.eq(excluded(inserted_at)), )), Some(" WHERE current_collections_v2.last_transaction_version <= excluded.last_transaction_version "), - ) + ) } fn insert_current_token_datas_v2_query( @@ -310,6 +368,31 @@ fn insert_current_token_datas_v2_query( last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), inserted_at.eq(excluded(inserted_at)), decimals.eq(excluded(decimals)), + // Intentionally not including is_deleted because it should always be true in this part + // and doesn't need to override + )), + Some(" WHERE current_token_datas_v2.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_current_deleted_token_datas_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_datas_v2::dsl::*; + + ( + diesel::insert_into(schema::current_token_datas_v2::table) + .values(items_to_insert) + .on_conflict(token_data_id) + .do_update() + .set(( + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + is_deleted_v2.eq(excluded(is_deleted_v2)), )), Some(" WHERE current_token_datas_v2.last_transaction_version <= excluded.last_transaction_version "), ) @@ -361,6 +444,7 @@ fn insert_current_deleted_token_ownerships_v2_query( amount.eq(excluded(amount)), last_transaction_version.eq(excluded(last_transaction_version)), last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + is_fungible_v2.eq(excluded(is_fungible_v2)), inserted_at.eq(excluded(inserted_at)), )), Some(" WHERE current_token_ownerships_v2.last_transaction_version <= excluded.last_transaction_version "), @@ -381,7 +465,7 @@ fn insert_token_activities_v2_query( .on_conflict((transaction_version, event_index)) .do_update() .set(( - entry_function_id_str.eq(excluded(entry_function_id_str)), + is_fungible_v2.eq(excluded(is_fungible_v2)), inserted_at.eq(excluded(inserted_at)), )), None, @@ -411,6 +495,30 @@ fn insert_current_token_v2_metadatas_query( ) } +fn insert_current_token_royalties_v1_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_royalty_v1::dsl::*; + + ( + diesel::insert_into(schema::current_token_royalty_v1::table) + .values(items_to_insert) + .on_conflict(token_data_id) + .do_update() + .set(( + payee_address.eq(excluded(payee_address)), + royalty_points_numerator.eq(excluded(royalty_points_numerator)), + royalty_points_denominator.eq(excluded(royalty_points_denominator)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + )), + Some(" WHERE current_token_royalty_v1.last_transaction_version <= excluded.last_transaction_version "), + ) +} + #[async_trait] impl ProcessorTrait for TokenV2Processor { fn name(&self) -> &'static str { @@ -434,6 +542,8 @@ impl ProcessorTrait for TokenV2Processor { let table_handle_to_owner = TableMetadataForToken::get_table_handle_to_owner_from_transactions(&transactions); + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; // Token V2 processing which includes token v1 let ( collections_v2, @@ -441,11 +551,20 @@ impl ProcessorTrait for TokenV2Processor { token_ownerships_v2, current_collections_v2, current_token_datas_v2, + current_deleted_token_datas_v2, current_token_ownerships_v2, current_deleted_token_ownerships_v2, token_activities_v2, current_token_v2_metadata, - ) = parse_v2_token(&transactions, &table_handle_to_owner, &mut conn).await; + current_token_royalties_v1, + ) = parse_v2_token( + &transactions, + &table_handle_to_owner, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await; let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); let db_insertion_start = std::time::Instant::now(); @@ -459,24 +578,29 @@ impl ProcessorTrait for TokenV2Processor { &token_datas_v2, &token_ownerships_v2, ¤t_collections_v2, - ¤t_token_datas_v2, - ¤t_token_ownerships_v2, - ¤t_deleted_token_ownerships_v2, + (¤t_token_datas_v2, ¤t_deleted_token_datas_v2), + ( + ¤t_token_ownerships_v2, + ¤t_deleted_token_ownerships_v2, + ), &token_activities_v2, ¤t_token_v2_metadata, + ¤t_token_royalties_v1, &self.per_table_chunk_sizes, ) .await; let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(e) => { error!( start_version = start_version, @@ -490,7 +614,7 @@ impl ProcessorTrait for TokenV2Processor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } @@ -498,27 +622,34 @@ impl ProcessorTrait for TokenV2Processor { async fn parse_v2_token( transactions: &[Transaction], table_handle_to_owner: &TableHandleToOwner, - conn: &mut PgPoolConnection<'_>, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, ) -> ( Vec, Vec, Vec, Vec, Vec, + Vec, Vec, Vec, // deleted token ownerships Vec, Vec, + Vec, ) { // Token V2 and V1 combined let mut collections_v2 = vec![]; let mut token_datas_v2 = vec![]; let mut token_ownerships_v2 = vec![]; let mut token_activities_v2 = vec![]; + let mut current_collections_v2: AHashMap = AHashMap::new(); let mut current_token_datas_v2: AHashMap = AHashMap::new(); + let mut current_deleted_token_datas_v2: AHashMap = + AHashMap::new(); let mut current_token_ownerships_v2: AHashMap< CurrentTokenOwnershipV2PK, CurrentTokenOwnershipV2, @@ -533,6 +664,8 @@ async fn parse_v2_token( // Basically token properties let mut current_token_v2_metadata: AHashMap = AHashMap::new(); + let mut current_token_royalties_v1: AHashMap = + AHashMap::new(); // Code above is inefficient (multiple passthroughs) so I'm approaching TokenV2 with a cleaner code structure for txn in transactions { @@ -576,18 +709,8 @@ async fn parse_v2_token( token_v2_metadata_helper.insert( standardize_address(&wr.address.to_string()), ObjectAggregatedData { - aptos_collection: None, - fixed_supply: None, object, - unlimited_supply: None, - concurrent_supply: None, - property_map: None, - transfer_events: vec![], - token: None, - fungible_asset_metadata: None, - fungible_asset_supply: None, - fungible_asset_store: None, - token_identifier: None, + ..ObjectAggregatedData::default() }, ); } @@ -633,21 +756,16 @@ async fn parse_v2_token( { aggregated_data.fungible_asset_metadata = Some(fungible_asset_metadata); } - if let Some(fungible_asset_supply) = - FungibleAssetSupply::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.fungible_asset_supply = Some(fungible_asset_supply); - } - if let Some(fungible_asset_store) = - FungibleAssetStore::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.fungible_asset_store = Some(fungible_asset_store); - } if let Some(token_identifier) = TokenIdentifiers::from_write_resource(wr, txn_version).unwrap() { aggregated_data.token_identifier = Some(token_identifier); } + if let Some(untransferable) = + Untransferable::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.untransferable = Some(untransferable); + } } } } @@ -657,10 +775,15 @@ async fn parse_v2_token( // and burn / transfer events need to come before the next section for (index, event) in user_txn.events.iter().enumerate() { if let Some(burn_event) = Burn::from_event(event, txn_version).unwrap() { - tokens_burned.insert(burn_event.get_token_address(), Some(burn_event)); + tokens_burned.insert(burn_event.get_token_address(), burn_event); } - if let Some(burn_event) = BurnEvent::from_event(event, txn_version).unwrap() { - tokens_burned.insert(burn_event.get_token_address(), None); + if let Some(old_burn_event) = BurnEvent::from_event(event, txn_version).unwrap() { + let burn_event = Burn::new( + standardize_address(event.key.as_ref().unwrap().account_address.as_str()), + old_burn_event.get_token_address(), + "".to_string(), + ); + tokens_burned.insert(burn_event.get_token_address(), burn_event); } if let Some(mint_event) = MintEvent::from_event(event, txn_version).unwrap() { tokens_minted.insert(mint_event.get_token_address()); @@ -709,21 +832,6 @@ async fn parse_v2_token( { token_activities_v2.push(event); } - // handling all the token v2 events - if let Some(event) = TokenActivityV2::get_ft_v2_from_parsed_event( - event, - txn_version, - txn_timestamp, - index as i64, - &entry_function_id_str, - &token_v2_metadata_helper, - conn, - ) - .await - .unwrap() - { - token_activities_v2.push(event); - } } for (index, wsc) in transaction_info.changes.iter().enumerate() { @@ -738,6 +846,8 @@ async fn parse_v2_token( txn_timestamp, table_handle_to_owner, conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap() @@ -763,6 +873,19 @@ async fn parse_v2_token( current_token_data, ); } + if let Some(current_token_royalty) = + CurrentTokenRoyaltyV1::get_v1_from_write_table_item( + table_item, + txn_version, + txn_timestamp, + ) + .unwrap() + { + current_token_royalties_v1.insert( + current_token_royalty.token_data_id.clone(), + current_token_royalty, + ); + } if let Some((token_ownership, current_token_ownership)) = TokenOwnershipV2::get_v1_from_write_table_item( table_item, @@ -888,6 +1011,22 @@ async fn parse_v2_token( ); } + // Add burned NFT handling for token datas (can probably be merged with below) + if let Some(deleted_token_data) = + TokenDataV2::get_burned_nft_v2_from_write_resource( + resource, + txn_version, + txn_timestamp, + &tokens_burned, + ) + .await + .unwrap() + { + current_deleted_token_datas_v2.insert( + deleted_token_data.token_data_id.clone(), + deleted_token_data, + ); + } // Add burned NFT handling if let Some((nft_ownership, current_nft_ownership)) = TokenOwnershipV2::get_burned_nft_v2_from_write_resource( @@ -895,8 +1034,14 @@ async fn parse_v2_token( txn_version, wsc_index, txn_timestamp, + &prior_nft_ownership, &tokens_burned, + &token_v2_metadata_helper, + conn, + query_retries, + query_retry_delay_ms, ) + .await .unwrap() { token_ownerships_v2.push(nft_ownership); @@ -919,31 +1064,6 @@ async fn parse_v2_token( ); } - // Add fungible token handling - if let Some((ft_ownership, current_ft_ownership)) = - TokenOwnershipV2::get_ft_v2_from_write_resource( - resource, - txn_version, - wsc_index, - txn_timestamp, - &token_v2_metadata_helper, - conn, - ) - .await - .unwrap() - { - token_ownerships_v2.push(ft_ownership); - current_token_ownerships_v2.insert( - ( - current_ft_ownership.token_data_id.clone(), - current_ft_ownership.property_version_v1.clone(), - current_ft_ownership.owner_address.clone(), - current_ft_ownership.storage_id.clone(), - ), - current_ft_ownership, - ); - } - // Track token properties if let Some(token_metadata) = CurrentTokenV2Metadata::from_write_resource( resource, @@ -962,7 +1082,22 @@ async fn parse_v2_token( } }, Change::DeleteResource(resource) => { - // Add burned NFT handling + // Add burned NFT handling for token datas (can probably be merged with below) + if let Some(deleted_token_data) = + TokenDataV2::get_burned_nft_v2_from_delete_resource( + resource, + txn_version, + txn_timestamp, + &tokens_burned, + ) + .await + .unwrap() + { + current_deleted_token_datas_v2.insert( + deleted_token_data.token_data_id.clone(), + deleted_token_data, + ); + } if let Some((nft_ownership, current_nft_ownership)) = TokenOwnershipV2::get_burned_nft_v2_from_delete_resource( resource, @@ -972,6 +1107,8 @@ async fn parse_v2_token( &prior_nft_ownership, &tokens_burned, conn, + query_retries, + query_retry_delay_ms, ) .await .unwrap() @@ -1009,6 +1146,9 @@ async fn parse_v2_token( let mut current_token_datas_v2 = current_token_datas_v2 .into_values() .collect::>(); + let mut current_deleted_token_datas_v2 = current_deleted_token_datas_v2 + .into_values() + .collect::>(); let mut current_token_ownerships_v2 = current_token_ownerships_v2 .into_values() .collect::>(); @@ -1018,9 +1158,13 @@ async fn parse_v2_token( let mut current_deleted_token_ownerships_v2 = current_deleted_token_ownerships_v2 .into_values() .collect::>(); + let mut current_token_royalties_v1 = current_token_royalties_v1 + .into_values() + .collect::>(); // Sort by PK current_collections_v2.sort_by(|a, b| a.collection_id.cmp(&b.collection_id)); + current_deleted_token_datas_v2.sort_by(|a, b| a.token_data_id.cmp(&b.token_data_id)); current_token_datas_v2.sort_by(|a, b| a.token_data_id.cmp(&b.token_data_id)); current_token_ownerships_v2.sort_by(|a, b| { ( @@ -1053,6 +1197,7 @@ async fn parse_v2_token( &b.storage_id, )) }); + current_token_royalties_v1.sort(); ( collections_v2, @@ -1060,9 +1205,11 @@ async fn parse_v2_token( token_ownerships_v2, current_collections_v2, current_token_datas_v2, + current_deleted_token_datas_v2, current_token_ownerships_v2, current_deleted_token_ownerships_v2, token_activities_v2, current_token_v2_metadata, + current_token_royalties_v1, ) } diff --git a/rust/processor/src/processors/transaction_metadata_processor.rs b/rust/processor/src/processors/transaction_metadata_processor.rs index a3393b301..615dacd09 100644 --- a/rust/processor/src/processors/transaction_metadata_processor.rs +++ b/rust/processor/src/processors/transaction_metadata_processor.rs @@ -1,14 +1,15 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::transaction_metadata_model::{ + db::common::models::transaction_metadata_model::{ event_size_info::EventSize, transaction_size_info::TransactionSize, write_set_size_info::WriteSetSize, }, + gap_detectors::ProcessingResult, schema, - utils::database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, }; use ahash::AHashMap; use anyhow::bail; @@ -19,12 +20,12 @@ use std::fmt::Debug; use tracing::{error, warn}; pub struct TransactionMetadataProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap, } impl TransactionMetadataProcessor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { Self { connection_pool, per_table_chunk_sizes, @@ -44,7 +45,7 @@ impl Debug for TransactionMetadataProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -198,13 +199,15 @@ impl ProcessorTrait for TransactionMetadataProcessor { .await; let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp: transactions.last().unwrap().timestamp.clone(), - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp: transactions.last().unwrap().timestamp.clone(), + }, + )), Err(e) => { error!( start_version = start_version, @@ -218,7 +221,7 @@ impl ProcessorTrait for TransactionMetadataProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/processors/user_transaction_processor.rs b/rust/processor/src/processors/user_transaction_processor.rs index 5571eedb9..08416488e 100644 --- a/rust/processor/src/processors/user_transaction_processor.rs +++ b/rust/processor/src/processors/user_transaction_processor.rs @@ -1,15 +1,16 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::{ProcessingResult, ProcessorName, ProcessorTrait}; +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; use crate::{ - models::user_transactions_models::{ + db::common::models::user_transactions_models::{ signatures::Signature, user_transactions::UserTransactionModel, }, + gap_detectors::ProcessingResult, schema, utils::{ counters::PROCESSOR_UNKNOWN_TYPE_COUNT, - database::{execute_in_chunks, get_config_table_chunk_size, PgDbPool}, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, }, }; use ahash::AHashMap; @@ -25,12 +26,12 @@ use std::fmt::Debug; use tracing::error; pub struct UserTransactionProcessor { - connection_pool: PgDbPool, + connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap, } impl UserTransactionProcessor { - pub fn new(connection_pool: PgDbPool, per_table_chunk_sizes: AHashMap) -> Self { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { Self { connection_pool, per_table_chunk_sizes, @@ -50,7 +51,7 @@ impl Debug for UserTransactionProcessor { } async fn insert_to_db( - conn: PgDbPool, + conn: ArcDbPool, name: &'static str, start_version: u64, end_version: u64, @@ -191,13 +192,15 @@ impl ProcessorTrait for UserTransactionProcessor { .await; let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); match tx_result { - Ok(_) => Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs, - db_insertion_duration_in_secs, - last_transaction_timestamp, - }), + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), Err(e) => { error!( start_version = start_version, @@ -211,7 +214,7 @@ impl ProcessorTrait for UserTransactionProcessor { } } - fn connection_pool(&self) -> &PgDbPool { + fn connection_pool(&self) -> &ArcDbPool { &self.connection_pool } } diff --git a/rust/processor/src/utils/counters.rs b/rust/processor/src/utils/counters.rs index ee9431a8d..5f83e183e 100644 --- a/rust/processor/src/utils/counters.rs +++ b/rust/processor/src/utils/counters.rs @@ -3,8 +3,9 @@ use once_cell::sync::Lazy; use prometheus::{ - register_gauge_vec, register_int_counter, register_int_counter_vec, register_int_gauge_vec, - GaugeVec, IntCounter, IntCounterVec, IntGaugeVec, + register_gauge, register_gauge_vec, register_int_counter, register_int_counter_vec, + register_int_gauge, register_int_gauge_vec, Gauge, GaugeVec, IntCounter, IntCounterVec, + IntGauge, IntGaugeVec, }; pub enum ProcessorStep { @@ -235,6 +236,16 @@ pub static PROCESSOR_DATA_GAP_COUNT: Lazy = Lazy::new(|| { .unwrap() }); +/// Data gap warnings for parquet +pub static PARQUET_PROCESSOR_DATA_GAP_COUNT: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_parquet_processor_data_gap_count", + "Data gap count", + &["processor_name"] + ) + .unwrap() +}); + /// GRPC latency. pub static GRPC_LATENCY_BY_PROCESSOR_IN_SECS: Lazy = Lazy::new(|| { register_gauge_vec!( @@ -254,3 +265,53 @@ pub static PROCESSOR_UNKNOWN_TYPE_COUNT: Lazy = Lazy::new(|| { ) .unwrap() }); + +/// Parquet struct size +pub static PARQUET_STRUCT_SIZE: Lazy = Lazy::new(|| { + register_int_gauge_vec!("indexer_parquet_struct_size", "Parquet struct size", &[ + "parquet_type" + ]) + .unwrap() +}); + +/// Parquet handler buffer size +pub static PARQUET_HANDLER_BUFFER_SIZE: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_parquet_handler_buffer_size", + "Parquet handler buffer size", + &["parquet_type"] // TODO: add something like task_index + ) + .unwrap() +}); + +/// Indexer gRPC to Processor 1 serve latency +pub static GRPC_TO_PROCESSOR_1_SERVE_LATENCY_IN_SECS: Lazy = Lazy::new(|| { + register_gauge!( + "indexer_grpc_to_processor_1_serve_latency_in_secs", + "Indexer gRPC to Processor 1 serve latency" + ) + .unwrap() +}); + +/// First value in cache +pub static FIRST_TRANSACTION_VERSION_IN_CACHE: Lazy = Lazy::new(|| { + register_int_gauge!( + "indexer_first_transaction_version_in_cache", + "First value in cache" + ) + .unwrap() +}); + +/// Last value in cache +pub static LAST_TRANSACTION_VERSION_IN_CACHE: Lazy = Lazy::new(|| { + register_int_gauge!( + "indexer_last_transaction_version_in_cache", + "Last value in cache" + ) + .unwrap() +}); + +/// Size of cache in bytes +pub static CACHE_SIZE_IN_BYTES: Lazy = Lazy::new(|| { + register_int_gauge!("indexer_cache_size_in_bytes", "Size of cache in bytes").unwrap() +}); diff --git a/rust/processor/src/utils/database.rs b/rust/processor/src/utils/database.rs index 9a8e4dac4..411ce46c9 100644 --- a/rust/processor/src/utils/database.rs +++ b/rust/processor/src/utils/database.rs @@ -7,28 +7,28 @@ use crate::utils::util::remove_null_bytes; use ahash::AHashMap; use diesel::{ - backend::Backend, query_builder::{AstPass, Query, QueryFragment}, ConnectionResult, QueryResult, }; use diesel_async::{ - pg::AsyncPgConnection, pooled_connection::{ bb8::{Pool, PooledConnection}, AsyncDieselConnectionManager, ManagerConfig, PoolError, }, - RunQueryDsl, + AsyncPgConnection, RunQueryDsl, }; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use futures_util::{future::BoxFuture, FutureExt}; -use std::{cmp::min, sync::Arc}; +use std::sync::Arc; + +pub type Backend = diesel::pg::Pg; pub type MyDbConnection = AsyncPgConnection; -pub type PgPool = Pool; -pub type PgDbPool = Arc; -pub type PgPoolConnection<'a> = PooledConnection<'a, MyDbConnection>; +pub type DbPool = Pool; +pub type ArcDbPool = Arc; +pub type DbPoolConnection<'a> = PooledConnection<'a, MyDbConnection>; -pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/postgres/migrations"); pub const DEFAULT_MAX_POOL_SIZE: u32 = 150; @@ -44,20 +44,6 @@ pub struct UpsertFilterLatestTransactionQuery { // the max is actually u16::MAX but we see that when the size is too big we get an overflow error so reducing it a bit pub const MAX_DIESEL_PARAM_SIZE: usize = (u16::MAX / 2) as usize; -/// This function returns boundaries of chunks in the form of (start_index, end_index) -pub fn get_chunks(num_items_to_insert: usize, chunk_size: usize) -> Vec<(usize, usize)> { - let mut chunk: (usize, usize) = (0, min(num_items_to_insert, chunk_size)); - let mut chunks = vec![chunk]; - while chunk.1 != num_items_to_insert { - chunk = ( - chunk.0 + chunk_size, - min(num_items_to_insert, chunk.1 + chunk_size), - ); - chunks.push(chunk); - } - chunks -} - /// This function will clean the data for postgres. Currently it has support for removing /// null bytes from strings but in the future we will add more functionality. pub fn clean_data_for_db serde::Deserialize<'de>>( @@ -120,13 +106,13 @@ fn parse_and_clean_db_url(url: &str) -> (String, Option) { pub async fn new_db_pool( database_url: &str, max_pool_size: Option, -) -> Result { +) -> Result { let (_url, cert_path) = parse_and_clean_db_url(database_url); let config = if cert_path.is_some() { - let mut config = ManagerConfig::::default(); + let mut config = ManagerConfig::::default(); config.custom_setup = Box::new(|conn| Box::pin(establish_connection(conn))); - AsyncDieselConnectionManager::::new_with_config(database_url, config) + AsyncDieselConnectionManager::::new_with_config(database_url, config) } else { AsyncDieselConnectionManager::::new(database_url) }; @@ -138,22 +124,20 @@ pub async fn new_db_pool( } pub async fn execute_in_chunks( - conn: PgDbPool, + conn: ArcDbPool, build_query: fn(Vec) -> (U, Option<&'static str>), items_to_insert: &[T], chunk_size: usize, ) -> Result<(), diesel::result::Error> where - U: QueryFragment + diesel::query_builder::QueryId + Send + 'static, + U: QueryFragment + diesel::query_builder::QueryId + Send + 'static, T: serde::Serialize + for<'de> serde::Deserialize<'de> + Clone + Send + 'static, { - let chunks = get_chunks(items_to_insert.len(), chunk_size); - - let tasks = chunks - .into_iter() - .map(|(start_ind, end_ind)| { - let items = items_to_insert[start_ind..end_ind].to_vec(); + let tasks = items_to_insert + .chunks(chunk_size) + .map(|chunk| { let conn = conn.clone(); + let items = chunk.to_vec(); tokio::spawn(async move { let (query, additional_where_clause) = build_query(items.clone()); execute_or_retry_cleaned(conn, build_query, items, query, additional_where_clause) @@ -173,14 +157,14 @@ where } pub async fn execute_with_better_error( - pool: PgDbPool, + pool: ArcDbPool, query: U, mut additional_where_clause: Option<&'static str>, ) -> QueryResult where - U: QueryFragment + diesel::query_builder::QueryId + Send, + U: QueryFragment + diesel::query_builder::QueryId + Send, { - let original_query = diesel::debug_query::(&query).to_string(); + let original_query = diesel::debug_query::(&query).to_string(); // This is needed because if we don't insert any row, then diesel makes a call like this // SELECT 1 FROM TABLE WHERE 1=0 if original_query.to_lowercase().contains("where") { @@ -190,7 +174,7 @@ where query, where_clause: additional_where_clause, }; - let debug_string = diesel::debug_query::(&final_query).to_string(); + let debug_string = diesel::debug_query::(&final_query).to_string(); tracing::debug!("Executing query: {:?}", debug_string); let conn = &mut pool.get().await.map_err(|e| { tracing::warn!("Error getting connection from pool: {:?}", e); @@ -225,9 +209,9 @@ pub async fn execute_with_better_error_conn( mut additional_where_clause: Option<&'static str>, ) -> QueryResult where - U: QueryFragment + diesel::query_builder::QueryId + Send, + U: QueryFragment + diesel::query_builder::QueryId + Send, { - let original_query = diesel::debug_query::(&query).to_string(); + let original_query = diesel::debug_query::(&query).to_string(); // This is needed because if we don't insert any row, then diesel makes a call like this // SELECT 1 FROM TABLE WHERE 1=0 if original_query.to_lowercase().contains("where") { @@ -237,7 +221,7 @@ where query, where_clause: additional_where_clause, }; - let debug_string = diesel::debug_query::(&final_query).to_string(); + let debug_string = diesel::debug_query::(&final_query).to_string(); tracing::debug!("Executing query: {:?}", debug_string); let res = final_query.execute(conn).await; if let Err(ref e) = res { @@ -247,14 +231,14 @@ where } async fn execute_or_retry_cleaned( - conn: PgDbPool, + conn: ArcDbPool, build_query: fn(Vec) -> (U, Option<&'static str>), items: Vec, query: U, additional_where_clause: Option<&'static str>, ) -> Result<(), diesel::result::Error> where - U: QueryFragment + diesel::query_builder::QueryId + Send, + U: QueryFragment + diesel::query_builder::QueryId + Send, T: serde::Serialize + for<'de> serde::Deserialize<'de> + Clone, { match execute_with_better_error(conn.clone(), query, additional_where_clause).await { @@ -275,7 +259,7 @@ where Ok(()) } -pub async fn run_pending_migrations(conn: &mut impl MigrationHarness) { +pub fn run_pending_migrations(conn: &mut impl MigrationHarness) { conn.run_pending_migrations(MIGRATIONS) .expect("[Parser] Migrations failed!"); } @@ -287,11 +271,11 @@ impl Query for UpsertFilterLatestTransactionQuery { //impl RunQueryDsl for UpsertFilterLatestTransactionQuery {} -impl QueryFragment for UpsertFilterLatestTransactionQuery +impl QueryFragment for UpsertFilterLatestTransactionQuery where - T: QueryFragment, + T: QueryFragment, { - fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, diesel::pg::Pg>) -> QueryResult<()> { + fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Backend>) -> QueryResult<()> { self.query.walk_ast(out.reborrow())?; if let Some(w) = self.where_clause { out.push_sql(w); @@ -299,35 +283,3 @@ where Ok(()) } } - -#[cfg(test)] -mod test { - use super::*; - - #[tokio::test] - async fn test_get_chunks_logic() { - assert_eq!(get_chunks(10, 5), vec![(0, 10)]); - assert_eq!(get_chunks(65535, 1), vec![ - (0, 32767), - (32767, 65534), - (65534, 65535), - ]); - // 200,000 total items will take 6 buckets. Each bucket can only be 3276 size. - assert_eq!(get_chunks(10000, 20), vec![ - (0, 1638), - (1638, 3276), - (3276, 4914), - (4914, 6552), - (6552, 8190), - (8190, 9828), - (9828, 10000), - ]); - assert_eq!(get_chunks(65535, 2), vec![ - (0, 16383), - (16383, 32766), - (32766, 49149), - (49149, 65532), - (65532, 65535), - ]); - } -} diff --git a/rust/processor/src/utils/event_ordering.rs b/rust/processor/src/utils/event_ordering.rs new file mode 100644 index 000000000..ff6fa76a9 --- /dev/null +++ b/rust/processor/src/utils/event_ordering.rs @@ -0,0 +1,88 @@ +use super::{counters::CACHE_SIZE_IN_BYTES, stream::EventCacheKey}; +use crate::{ + models::events_models::events::{CachedEvent, EventOrder, EventStreamMessage}, + utils::counters::LAST_TRANSACTION_VERSION_IN_CACHE, +}; +use ahash::AHashMap; +use aptos_in_memory_cache::StreamableOrderedCache; +use kanal::AsyncReceiver; +use std::sync::Arc; +use tracing::error; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct TransactionEvents { + pub transaction_version: i64, + pub transaction_timestamp: chrono::NaiveDateTime, + pub events: Vec, +} + +impl Ord for TransactionEvents { + // Comparison must be reversed because BinaryHeap is a max-heap + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + other.transaction_version.cmp(&self.transaction_version) + } +} + +impl PartialOrd for TransactionEvents { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +pub struct EventOrdering + 'static> { + rx: AsyncReceiver>, + cache: Arc, +} + +impl + 'static> EventOrdering { + pub fn new(rx: AsyncReceiver>, cache: Arc) -> Self { + Self { rx, cache } + } + + pub async fn run(&self, starting_version: i64) { + let mut map = AHashMap::new(); + let rx = self.rx.clone(); + let mut next_transaction_version = starting_version; + + loop { + let batch_events = rx.recv().await.unwrap_or_else(|e| { + error!( + error = ?e, + "[Event Stream] Failed to receive message from channel" + ); + panic!(); + }); + + for events in batch_events { + map.insert(events.transaction_version, events); + } + + while let Some(transaction_events) = map.remove(&next_transaction_version) { + let transaction_timestamp = transaction_events.transaction_timestamp; + let num_events = transaction_events.events.len(); + if num_events == 0 { + // Add empty event if transaction doesn't have any events + self.cache.insert( + EventCacheKey::new(transaction_events.transaction_version, 0), + CachedEvent::empty(transaction_events.transaction_version), + ); + } else { + // Add all events to cache + for event in transaction_events.events { + self.cache.insert( + EventCacheKey::new(event.transaction_version, event.event_index), + CachedEvent::from_event_stream_message( + &EventStreamMessage::from_event_order(&event, transaction_timestamp), + num_events, + ), + ); + } + } + LAST_TRANSACTION_VERSION_IN_CACHE + .set(self.cache.last_key().unwrap().transaction_version); + CACHE_SIZE_IN_BYTES.set(self.cache.total_size() as i64); + next_transaction_version += 1; + } + } + } +} diff --git a/rust/processor/src/utils/filter.rs b/rust/processor/src/utils/filter.rs new file mode 100644 index 000000000..c20ebe07b --- /dev/null +++ b/rust/processor/src/utils/filter.rs @@ -0,0 +1,20 @@ +use dashmap::DashSet; + +#[derive(Clone, Debug, Default)] +pub struct EventFilter { + pub accounts: DashSet, + pub types: DashSet, +} + +impl EventFilter { + pub fn new() -> Self { + Self { + accounts: DashSet::new(), + types: DashSet::new(), + } + } + + pub fn is_empty(&self) -> bool { + self.accounts.is_empty() && self.types.is_empty() + } +} diff --git a/rust/processor/src/utils/filter_editor.rs b/rust/processor/src/utils/filter_editor.rs new file mode 100644 index 000000000..f08b12f0f --- /dev/null +++ b/rust/processor/src/utils/filter_editor.rs @@ -0,0 +1,75 @@ +// Copyright © Aptos Foundation + +use crate::utils::filter::EventFilter; +use futures::{stream::SplitStream, StreamExt}; +use std::sync::Arc; +use tokio::sync::Notify; +use tracing::{error, info}; +use warp::filters::ws::WebSocket; + +pub struct FilterEditor { + rx: SplitStream, + filter: Arc, + filter_edit_notify: Arc, +} + +impl FilterEditor { + pub fn new( + rx: SplitStream, + filter: Arc, + filter_edit_notify: Arc, + ) -> Self { + info!("Received WebSocket connection"); + Self { + rx, + filter, + filter_edit_notify, + } + } + + /// Maintains websocket connection and sends messages from channel + pub async fn run(&mut self) { + while let Some(Ok(msg)) = self.rx.next().await { + if let Ok(policy) = msg.to_str() { + let policy = policy.split(',').collect::>(); + match policy[0] { + "account" => match policy[1] { + "add" => { + self.filter.accounts.insert(policy[2].to_string()); + }, + "remove" => { + self.filter.accounts.remove(policy[2]); + }, + _ => { + error!("[Event Stream] Invalid filter command: {}", policy[1]); + }, + }, + "type" => match policy[1] { + "add" => { + self.filter.types.insert(policy[2].to_string()); + }, + "remove" => { + self.filter.types.remove(policy[2]); + }, + _ => { + error!("[Event Stream] Invalid filter command: {}", policy[1]); + }, + }, + _ => { + error!("[Event Stream] Invalid filter type: {}", policy[0]); + }, + } + self.filter_edit_notify.notify_waiters(); + } + } + } +} + +pub async fn spawn_filter_editor( + rx: SplitStream, + filter: Arc, + filter_edit_notify: Arc, +) { + let mut filter = FilterEditor::new(rx, filter, filter_edit_notify); + filter.run().await; +} diff --git a/rust/processor/src/utils/in_memory_cache.rs b/rust/processor/src/utils/in_memory_cache.rs new file mode 100644 index 000000000..f806af92d --- /dev/null +++ b/rust/processor/src/utils/in_memory_cache.rs @@ -0,0 +1,197 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::db::common::models::events_models::events::CachedEvents; +use aptos_in_memory_cache::{caches::sync_mutex::SyncMutexCache, Cache, SizedCache}; +use futures::{stream, Stream}; +use get_size::GetSize; +use std::sync::{ + atomic::{AtomicI64, Ordering}, + Arc, +}; +use tokio::{sync::Notify, task::JoinHandle}; + +#[derive(Debug, Clone)] +pub struct InMemoryCacheMetadata { + pub eviction_trigger_size_in_bytes: usize, + pub target_size_in_bytes: usize, + pub capacity: usize, +} + +// #[derive(Debug)] +pub struct InMemoryCache { + pub metadata: Arc, + pub cache: Arc>, + pub eviction_notify: Arc, + // Metadata for stream + pub head: Arc, + pub tail: Arc, + pub watermark: Arc, + pub stream_notify: Arc, +} + +impl Cache for InMemoryCache { + fn get(&self, key: &i64) -> Option> { + let key = &(*key as usize); + self.cache.get(key).and_then(|entry| { + if entry.key == *key { + return Some(entry.value.clone()); + } + None + }) + } + + fn insert(&self, key: i64, value: CachedEvents) { + let size_in_bytes = value.get_size(); + self.cache + .insert_with_size(key as usize, Arc::new(value), size_in_bytes); + + // Fill pointers if cache was empty + if self.head.load(Ordering::Relaxed) == -1 { + self.head.store(key, Ordering::Relaxed); + } + + if self.tail.load(Ordering::Relaxed) == -1 { + self.tail.store(key, Ordering::Relaxed); + } + + if self.watermark.load(Ordering::Relaxed) == -1 { + self.watermark.store(key, Ordering::Relaxed); + } + + // Since pointers have been filled, the unwraps below are safe + // Update watermark to highest seen transaction version + if key > self.watermark.load(Ordering::Relaxed) { + self.watermark.store(key, Ordering::Relaxed); + } + + // Update tail to the latest consecutive transaction version + loop { + let tail = self.tail.load(Ordering::Relaxed); + let next_tail = self.get(&(tail + 1)); + + // If the next transaction does not exist or is not consecutive, break + // Unwrap ok because next_tail is not None + if next_tail.is_none() || next_tail.unwrap().transaction_version != tail + 1 { + break; + } + + // Update tail and notify stream + self.tail.store(tail + 1, Ordering::Relaxed); + self.stream_notify.notify_one(); + } + + // Notify eviction task if cache size exceeds trigger size + if self.cache.total_size() >= self.metadata.eviction_trigger_size_in_bytes { + self.eviction_notify.notify_one(); + } + } + + fn total_size(&self) -> usize { + self.cache.total_size() as usize + } +} + +impl InMemoryCache { + pub fn with_capacity( + eviction_trigger_size_in_bytes: usize, + target_size_in_bytes: usize, + capacity: usize, + ) -> Arc { + let c = SyncMutexCache::with_capacity(capacity); + let metadata = Arc::new(InMemoryCacheMetadata { + eviction_trigger_size_in_bytes, + target_size_in_bytes, + capacity: c.capacity(), + }); + let cache = Arc::new(c); + let eviction_notify = Arc::new(Notify::new()); + + let out = Arc::new(Self { + metadata, + cache, + eviction_notify, + stream_notify: Arc::new(Notify::new()), + head: Arc::new(AtomicI64::new(-1)), + tail: Arc::new(AtomicI64::new(-1)), + watermark: Arc::new(AtomicI64::new(-1)), + }); + + spawn_eviction_task(out.clone()); + + out + } + + /// Returns a stream of values in the cache starting from the given (transaction_version, event_index). + /// If the stream falls behind, the stream will return None for the next value (indicating that it should be reset). + pub fn get_stream( + &self, + starting_key: Option, + ) -> impl Stream> + '_ { + // Start from the starting key if provided, otherwise start from the last key + let initial_state = starting_key.unwrap_or(self.tail.load(Ordering::Relaxed)); + + Box::pin(stream::unfold(initial_state, move |state| { + async move { + // If the current key is None, the cache is empty + // Wait until a new value is inserted before assigning it + let mut current_transaction_version = state; + if current_transaction_version == -1 { + self.eviction_notify.notified().await; + // Ok to unwrap because the last_transaction_version should be populated after the first insert + current_transaction_version = self.tail.load(Ordering::Relaxed); + } + + let last_transaction_version = self.tail.load(Ordering::Relaxed); + + // Stream is ahead of cache + // If the last value in the cache has already been streamed, wait until the next value is inserted and return it + if current_transaction_version > last_transaction_version { + // Wait until the next value is inserted + loop { + self.stream_notify.notified().await; + if let Some(cached_events) = self.get(¤t_transaction_version) { + return Some((cached_events, current_transaction_version + 1)); + } + } + } + // Stream is in cache bounds + // If the next value to stream is in the cache, return it + else if let Some(cached_events) = self.get(¤t_transaction_version) { + return Some((cached_events, current_transaction_version + 1)); + } + + // If we get here, the stream is behind the cache + // Stop the stream + None + } + })) + } +} + +/// Perform cache eviction on a separate task. +fn spawn_eviction_task(cache: Arc) -> JoinHandle<()> { + tokio::spawn(async move { + loop { + cache.eviction_notify.notified().await; + // Evict entries until the cache size is below the target size + while cache.total_size() > cache.metadata.target_size_in_bytes { + // Unwrap ok because eviction_notify is only notified after head is populated + let eviction_key = cache.head.load(Ordering::Relaxed) as usize; + if let Some(value) = cache.cache.evict(&eviction_key) { + if value.key > eviction_key { + cache.cache.insert_with_size( + value.key, + value.value.clone(), + value.size_in_bytes, + ); + break; + } + } + + // Update head + cache.head.store(eviction_key as i64, Ordering::Relaxed); + } + } + }) +} diff --git a/rust/processor/src/utils/mod.rs b/rust/processor/src/utils/mod.rs index 4f13167fe..aa0c5044f 100644 --- a/rust/processor/src/utils/mod.rs +++ b/rust/processor/src/utils/mod.rs @@ -3,4 +3,8 @@ pub mod counters; pub mod database; +pub mod filter; +pub mod filter_editor; +pub mod in_memory_cache; +pub mod stream; pub mod util; diff --git a/rust/processor/src/utils/stream.rs b/rust/processor/src/utils/stream.rs new file mode 100644 index 000000000..a291b3a16 --- /dev/null +++ b/rust/processor/src/utils/stream.rs @@ -0,0 +1,128 @@ +// Copyright © Aptos Foundation + +use super::in_memory_cache::InMemoryCache; +use crate::{ + db::common::models::events_models::events::CachedEvents, + utils::{counters::GRPC_TO_PROCESSOR_1_SERVE_LATENCY_IN_SECS, filter::EventFilter}, +}; +use futures::{stream::SplitSink, SinkExt, StreamExt}; +use std::{fmt::Debug, sync::Arc}; +use tokio::sync::Notify; +use tracing::{info, warn}; +use warp::filters::ws::{Message, WebSocket}; + +pub struct Stream { + tx: SplitSink, + filter: Arc, + cache: Arc, + filter_edit_notify: Arc, +} + +impl Stream { + pub fn new( + tx: SplitSink, + filter: Arc, + cache: Arc, + filter_edit_notify: Arc, + ) -> Self { + info!("Received WebSocket connection"); + Self { + tx, + filter, + cache, + filter_edit_notify, + } + } + + /// Maintains websocket connection and sends messages from channel + pub async fn run(&mut self, starting_event: Option) { + let cache = self.cache.clone(); + let mut stream = Box::pin(cache.get_stream(starting_event)); + while let Some(cached_events) = stream.next().await { + if self.filter.is_empty() { + self.filter_edit_notify.notified().await; + } + + if let Err(e) = self.send_events(cached_events).await { + warn!( + error = ?e, + "Error sending events to WebSocket" + ); + break; + } + } + + if let Err(e) = self.tx.send(Message::text("Stream ended")).await { + warn!("Error sending error message: {:?}", e); + } + + if let Err(e) = self.tx.send(Message::close()).await { + warn!("Error sending close message: {:?}", e); + } + } + + async fn send_events(&mut self, cached_events: Arc) -> anyhow::Result<()> { + for event in cached_events.events.clone() { + if self.filter.accounts.contains(&event.account_address) + || self.filter.types.contains(&event.type_) + { + GRPC_TO_PROCESSOR_1_SERVE_LATENCY_IN_SECS.set({ + use chrono::TimeZone; + let transaction_timestamp = + chrono::Utc.from_utc_datetime(&event.transaction_timestamp); + let transaction_timestamp = std::time::SystemTime::from(transaction_timestamp); + std::time::SystemTime::now() + .duration_since(transaction_timestamp) + .unwrap_or_default() + .as_secs_f64() + }); + let msg = serde_json::to_string(&event).unwrap_or_default(); + info!( + account_address = event.account_address, + transaction_version = event.transaction_version, + event_index = event.event_index, + event = msg, + "Sending event through WebSocket" + ); + + if let Err(e) = self.tx.send(Message::text(msg)).await { + warn!( + error = ?e, + "[Event Stream] Failed to send message to WebSocket" + ); + return Err(anyhow::anyhow!( + "Failed to send message to WebSocket: {}", + e + )); + } + } + } + Ok(()) + } +} + +pub async fn spawn_stream( + tx: SplitSink, + filter: Arc, + cache: Arc, + starting_event: Option, + filter_edit_notify: Arc, +) { + let mut stream = Stream::new(tx, filter, cache, filter_edit_notify); + stream.run(starting_event).await; +} + +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct EventCacheKey { + pub transaction_version: i64, + pub event_index: i64, +} + +impl EventCacheKey { + pub fn new(transaction_version: i64, event_index: i64) -> Self { + Self { + transaction_version, + event_index, + } + } +} diff --git a/rust/processor/src/utils/util.rs b/rust/processor/src/utils/util.rs index 14f750322..e00688586 100644 --- a/rust/processor/src/utils/util.rs +++ b/rust/processor/src/utils/util.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - models::property_map::{PropertyMap, TokenObjectPropertyMap}, + db::common::models::property_map::{PropertyMap, TokenObjectPropertyMap}, utils::counters::PROCESSOR_UNKNOWN_TYPE_COUNT, }; use aptos_protos::{ @@ -15,16 +15,29 @@ use aptos_protos::{ util::timestamp::Timestamp, }; use bigdecimal::{BigDecimal, Signed, ToPrimitive, Zero}; +use lazy_static::lazy_static; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use sha2::Digest; use std::str::FromStr; +use tiny_keccak::{Hasher, Sha3}; // 9999-12-31 23:59:59, this is the max supported by Google BigQuery pub const MAX_TIMESTAMP_SECS: i64 = 253_402_300_799; // Max length of entry function id string to ensure that db doesn't explode pub const MAX_ENTRY_FUNCTION_LENGTH: usize = 1000; +pub const APTOS_COIN_TYPE_STR: &str = "0x1::aptos_coin::AptosCoin"; + +lazy_static! { + pub static ref APT_METADATA_ADDRESS_RAW: [u8; 32] = { + let mut addr = [0u8; 32]; + addr[31] = 10u8; + addr + }; + pub static ref APT_METADATA_ADDRESS_HEX: String = + format!("0x{}", hex::encode(*APT_METADATA_ADDRESS_RAW)); +} // Supporting structs to get clean payload without escaped strings #[derive(Debug, Deserialize, Serialize)] pub struct EntryFunctionPayloadClean { @@ -65,6 +78,14 @@ pub fn hash_str(val: &str) -> String { hex::encode(sha2::Sha256::digest(val.as_bytes())) } +pub fn sha3_256(buffer: &[u8]) -> [u8; 32] { + let mut output = [0; 32]; + let mut sha3 = Sha3::v256(); + sha3.update(buffer); + sha3.finalize(&mut output); + output +} + pub fn truncate_str(val: &str, max_chars: usize) -> String { let mut trunc = val.to_string(); trunc.truncate(max_chars); @@ -89,24 +110,28 @@ pub fn ensure_not_negative(val: BigDecimal) -> BigDecimal { pub fn get_entry_function_from_user_request( user_request: &UserTransactionRequest, ) -> Option { - let entry_function_id_str: String = match &user_request.payload.as_ref().unwrap().payload { - Some(PayloadType::EntryFunctionPayload(payload)) => payload.entry_function_id_str.clone(), - Some(PayloadType::MultisigPayload(payload)) => { - if let Some(payload) = payload.transaction_payload.as_ref() { - match payload.payload.as_ref().unwrap() { - MultisigPayloadType::EntryFunctionPayload(payload) => { - Some(payload.entry_function_id_str.clone()) - }, - }; - } - return None; + let entry_function_id_str: Option = match &user_request.payload { + Some(txn_payload) => match &txn_payload.payload { + Some(PayloadType::EntryFunctionPayload(payload)) => { + Some(payload.entry_function_id_str.clone()) + }, + Some(PayloadType::MultisigPayload(payload)) => { + if let Some(payload) = payload.transaction_payload.as_ref() { + match payload.payload.as_ref().unwrap() { + MultisigPayloadType::EntryFunctionPayload(payload) => { + Some(payload.entry_function_id_str.clone()) + }, + } + } else { + None + } + }, + _ => return None, }, - _ => return None, + None => return None, }; - Some(truncate_str( - &entry_function_id_str, - MAX_ENTRY_FUNCTION_LENGTH, - )) + + entry_function_id_str.map(|s| truncate_str(&s, MAX_ENTRY_FUNCTION_LENGTH)) } pub fn get_payload_type(payload: &TransactionPayload) -> String { @@ -251,11 +276,13 @@ pub fn parse_timestamp(ts: &Timestamp, version: i64) -> chrono::NaiveDateTime { } else { ts.clone() }; + #[allow(deprecated)] chrono::NaiveDateTime::from_timestamp_opt(final_ts.seconds, final_ts.nanos as u32) .unwrap_or_else(|| panic!("Could not parse timestamp {:?} for version {}", ts, version)) } pub fn parse_timestamp_secs(ts: u64, version: i64) -> chrono::NaiveDateTime { + #[allow(deprecated)] chrono::NaiveDateTime::from_timestamp_opt( std::cmp::min(ts, MAX_TIMESTAMP_SECS as u64) as i64, 0, @@ -328,7 +355,7 @@ where D: Deserializer<'de>, { let s = ::deserialize(deserializer)?; - Ok(convert_hex(s.clone()).unwrap_or(s)) + Ok(String::from_utf8(hex_to_raw_bytes(&s).unwrap()).unwrap_or(s)) } /// Convert the bcs serialized vector to its original string format @@ -388,10 +415,9 @@ pub fn convert_bcs_token_object_propertymap(s: Value) -> Option { } } -/// Convert the vector that is directly generated from b"xxx" -pub fn convert_hex(val: String) -> Option { - let decoded = hex::decode(val.strip_prefix("0x").unwrap_or(&*val)).ok()?; - String::from_utf8(decoded).ok() +/// Convert from hex string to raw byte string +pub fn hex_to_raw_bytes(val: &str) -> anyhow::Result> { + Ok(hex::decode(val.strip_prefix("0x").unwrap_or(val))?) } /// Deserialize from string to type T @@ -437,7 +463,7 @@ pub fn get_name_from_unnested_move_type(move_type: &str) -> &str { /* COMMON STRUCTS */ #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct AggregatorU64 { +pub struct Aggregator { #[serde(deserialize_with = "deserialize_from_string")] pub value: BigDecimal, #[serde(deserialize_with = "deserialize_from_string")] @@ -445,7 +471,7 @@ pub struct AggregatorU64 { } #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct AggregatorSnapshotU64 { +pub struct AggregatorSnapshot { #[serde(deserialize_with = "deserialize_from_string")] pub value: BigDecimal, } @@ -490,14 +516,14 @@ mod tests { }, 1, ); - assert_eq!(ts.timestamp(), 1649560602); + assert_eq!(ts.and_utc().timestamp(), 1649560602); assert_eq!(ts.year(), 2022); let ts2 = parse_timestamp_secs(600000000000000, 2); assert_eq!(ts2.year(), 9999); let ts3 = parse_timestamp_secs(1659386386, 2); - assert_eq!(ts3.timestamp(), 1659386386); + assert_eq!(ts3.and_utc().timestamp(), 1659386386); } #[test] diff --git a/rust/processor/src/worker.rs b/rust/processor/src/worker.rs index b3305221b..7841c0d87 100644 --- a/rust/processor/src/worker.rs +++ b/rust/processor/src/worker.rs @@ -3,17 +3,20 @@ use crate::{ config::IndexerGrpcHttp2Config, + db::common::models::{ledger_info::LedgerInfo, processor_status::ProcessorStatusQuery}, + gap_detectors::{create_gap_detector_status_tracker_loop, ProcessingResult}, grpc_stream::TransactionsPBResponse, - models::{ledger_info::LedgerInfo, processor_status::ProcessorStatusQuery}, processors::{ account_transactions_processor::AccountTransactionsProcessor, ans_processor::AnsProcessor, coin_processor::CoinProcessor, default_processor::DefaultProcessor, - events_processor::EventsProcessor, fungible_asset_processor::FungibleAssetProcessor, + event_stream_processor::EventStreamProcessor, events_processor::EventsProcessor, + fungible_asset_processor::FungibleAssetProcessor, monitoring_processor::MonitoringProcessor, nft_metadata_processor::NftMetadataProcessor, - objects_processor::ObjectsProcessor, stake_processor::StakeProcessor, - token_processor::TokenProcessor, token_v2_processor::TokenV2Processor, + objects_processor::ObjectsProcessor, parquet_default_processor::DefaultParquetProcessor, + stake_processor::StakeProcessor, token_processor::TokenProcessor, + token_v2_processor::TokenV2Processor, transaction_metadata_processor::TransactionMetadataProcessor, - user_transaction_processor::UserTransactionProcessor, ProcessingResult, Processor, + user_transaction_processor::UserTransactionProcessor, DefaultProcessingResult, Processor, ProcessorConfig, ProcessorTrait, }, schema::ledger_infos, @@ -28,28 +31,70 @@ use crate::{ SINGLE_BATCH_DB_INSERTION_TIME_IN_SECS, SINGLE_BATCH_PARSING_TIME_IN_SECS, SINGLE_BATCH_PROCESSING_TIME_IN_SECS, TRANSACTION_UNIX_TIMESTAMP, }, - database::{execute_with_better_error_conn, new_db_pool, run_pending_migrations, PgDbPool}, + database::{ + execute_with_better_error_conn, new_db_pool, run_pending_migrations, ArcDbPool, + }, + filter::EventFilter, + filter_editor::spawn_filter_editor, + in_memory_cache::InMemoryCache, + stream::spawn_stream, util::{time_diff_since_pb_timestamp_in_secs, timestamp_to_iso, timestamp_to_unixtime}, }, }; use ahash::AHashMap; use anyhow::{Context, Result}; use aptos_moving_average::MovingAverage; -use diesel::Connection; -use tokio::task::JoinHandle; +use bitflags::bitflags; +use futures::StreamExt; +use kanal::AsyncSender; +use std::{collections::HashSet, sync::Arc}; +use tokio::{sync::Notify, task::JoinHandle}; use tracing::{debug, error, info}; use url::Url; +use warp::Filter; // this is how large the fetch queue should be. Each bucket should have a max of 80MB or so, so a batch // of 50 means that we could potentially have at least 4.8GB of data in memory at any given time and that we should provision // machines accordingly. -pub const BUFFER_SIZE: usize = 100; -// Consumer thread will wait X seconds before panicking if it doesn't receive any data -pub const CONSUMER_THREAD_TIMEOUT_IN_SECS: u64 = 60 * 5; + +// TODO: Make this configurable +pub const BUFFER_SIZE: usize = 300; pub const PROCESSOR_SERVICE_TYPE: &str = "processor"; +bitflags! { + #[derive(Debug, Clone, Copy)] + pub struct TableFlags: u64 { + const TRANSACTIONS = 1; + const WRITE_SET_CHANGES = 2; + const MOVE_RESOURCES = 4; + const TABLE_ITEMS = 8; + } +} + +/// Handles WebSocket connection from /filter endpoint +async fn handle_websocket( + websocket: warp::ws::WebSocket, + query_params: AHashMap, + cache: Arc, +) { + let (tx, rx) = websocket.split(); + let filter = Arc::new(EventFilter::new()); + + let start: Option = query_params.get("start").map(|s| s.parse::().unwrap()); + + let filter_edit = filter.clone(); + let filter_edit_notify = Arc::new(Notify::new()); + + let filter_edit_notify_clone = filter_edit_notify.clone(); + tokio::spawn( + async move { spawn_filter_editor(rx, filter_edit, filter_edit_notify_clone).await }, + ); + + spawn_stream(tx, filter.clone(), cache.clone(), start, filter_edit_notify).await; +} + pub struct Worker { - pub db_pool: PgDbPool, + pub db_pool: ArcDbPool, pub processor_config: ProcessorConfig, pub postgres_connection_string: String, pub indexer_grpc_data_service_address: Url, @@ -59,14 +104,18 @@ pub struct Worker { pub ending_version: Option, pub number_concurrent_processing_tasks: usize, pub gap_detection_batch_size: u64, + pub parquet_gap_detection_batch_size: u64, pub grpc_chain_id: Option, pub pb_channel_txn_chunk_size: usize, pub per_table_chunk_sizes: AHashMap, pub enable_verbose_logging: Option, pub transaction_filter: TransactionFilter, + pub grpc_response_item_timeout_in_secs: u64, + pub deprecated_tables: TableFlags, } impl Worker { + #[allow(clippy::too_many_arguments)] pub async fn new( processor_config: ProcessorConfig, postgres_connection_string: String, @@ -78,11 +127,14 @@ impl Worker { number_concurrent_processing_tasks: Option, db_pool_size: Option, gap_detection_batch_size: u64, + parquet_gap_detection_batch_size: u64, // The number of transactions per protobuf batch pb_channel_txn_chunk_size: usize, per_table_chunk_sizes: AHashMap, enable_verbose_logging: Option, transaction_filter: TransactionFilter, + grpc_response_item_timeout_in_secs: u64, + deprecated_tables: HashSet, ) -> Result { let processor_name = processor_config.name(); info!(processor_name = processor_name, "[Parser] Kicking off"); @@ -101,6 +153,14 @@ impl Worker { "[Parser] Finish creating the connection pool" ); let number_concurrent_processing_tasks = number_concurrent_processing_tasks.unwrap_or(10); + + let mut deprecated_tables_flags = TableFlags::empty(); + for table in deprecated_tables.iter() { + if let Some(flags) = TableFlags::from_name(table) { + deprecated_tables_flags |= flags; + } + } + Ok(Self { db_pool: conn_pool, processor_config, @@ -112,11 +172,14 @@ impl Worker { auth_token, number_concurrent_processing_tasks, gap_detection_batch_size, + parquet_gap_detection_batch_size, grpc_chain_id: None, pb_channel_txn_chunk_size, per_table_chunk_sizes, enable_verbose_logging, transaction_filter, + grpc_response_item_timeout_in_secs, + deprecated_tables: deprecated_tables_flags, }) } @@ -167,6 +230,8 @@ impl Worker { "[Parser] Building processor", ); + let cache = InMemoryCache::with_capacity(3_300_000_000, 3_000_000_000, 1_000_000); + let concurrent_tasks = self.number_concurrent_processing_tasks; // get the chain id @@ -174,6 +239,7 @@ impl Worker { self.indexer_grpc_data_service_address.clone(), self.grpc_http2_config.grpc_http2_ping_interval_in_secs(), self.grpc_http2_config.grpc_http2_ping_timeout_in_secs(), + self.grpc_http2_config.grpc_connection_timeout_secs(), self.auth_token.clone(), processor_name.to_string(), ) @@ -190,6 +256,8 @@ impl Worker { self.grpc_http2_config.grpc_http2_ping_interval_in_secs(); let indexer_grpc_http2_ping_timeout = self.grpc_http2_config.grpc_http2_ping_timeout_in_secs(); + let indexer_grpc_reconnection_timeout_secs = + self.grpc_http2_config.grpc_connection_timeout_secs(); let pb_channel_txn_chunk_size = self.pb_channel_txn_chunk_size; // Create a transaction fetcher thread that will continuously fetch transactions from the GRPC stream @@ -199,6 +267,8 @@ impl Worker { let request_ending_version = self.ending_version; let auth_token = self.auth_token.clone(); let transaction_filter = self.transaction_filter.clone(); + let grpc_response_item_timeout = + std::time::Duration::from_secs(self.grpc_response_item_timeout_in_secs); let fetcher_task = tokio::spawn(async move { info!( processor_name = processor_name, @@ -213,6 +283,8 @@ impl Worker { indexer_grpc_data_service_address.clone(), indexer_grpc_http2_ping_interval, indexer_grpc_http2_ping_timeout, + indexer_grpc_reconnection_timeout_secs, + grpc_response_item_timeout, starting_version, request_ending_version, auth_token.clone(), @@ -223,17 +295,49 @@ impl Worker { .await }); + println!("1"); + // Create a gap detector task that will panic if there is a gap in the processing let (gap_detector_sender, gap_detector_receiver) = kanal::bounded_async::(BUFFER_SIZE); - let gap_detection_batch_size = self.gap_detection_batch_size; - let processor = build_processor( - &self.processor_config, - self.per_table_chunk_sizes.clone(), - self.db_pool.clone(), - ); + let (processor, gap_detection_batch_size, gap_detector_sender) = + if self.processor_config.is_parquet_processor() { + let processor = build_processor( + &self.processor_config, + self.per_table_chunk_sizes.clone(), + self.deprecated_tables, + self.db_pool.clone(), + Some(gap_detector_sender.clone()), + cache.clone(), + ); + let gap_detection_batch_size: u64 = self.parquet_gap_detection_batch_size; + + ( + processor, + gap_detection_batch_size, + Some(gap_detector_sender), + ) + } else { + let processor = build_processor( + &self.processor_config, + self.per_table_chunk_sizes.clone(), + self.deprecated_tables, + self.db_pool.clone(), + None, + cache.clone(), + ); + let gap_detection_batch_size = self.gap_detection_batch_size; + + ( + processor, + gap_detection_batch_size, + Some(gap_detector_sender), + ) + }; + println!("2"); + tokio::spawn(async move { - crate::gap_detector::create_gap_detector_status_tracker_loop( + create_gap_detector_status_tracker_loop( gap_detector_receiver, processor, starting_version, @@ -242,6 +346,25 @@ impl Worker { .await; }); + println!("3"); + if self.processor_config.name() == "event_stream_processor" { + println!("4"); + // Create web server + let cache_arc = cache.clone(); + let cache_ws = warp::any().map(move || cache_arc.clone()); + tokio::spawn(async move { + let ws_route = warp::path("stream") + .and(warp::ws()) + .and(warp::query::>()) + .and(cache_ws) + .map(|ws: warp::ws::Ws, query_params, cache| { + ws.on_upgrade(move |socket| handle_websocket(socket, query_params, cache)) + }); + + warp::serve(ws_route).run(([0, 0, 0, 0], 12345)).await; + }); + } + // This is the consumer side of the channel. These are the major states: // 1. We're backfilling so we should expect many concurrent threads to process transactions // 2. We're caught up so we should expect a single thread to process transactions @@ -260,7 +383,12 @@ impl Worker { let mut processor_tasks = vec![fetcher_task]; for task_index in 0..concurrent_tasks { let join_handle = self - .launch_processor_task(task_index, receiver.clone(), gap_detector_sender.clone()) + .launch_processor_task( + task_index, + receiver.clone(), + gap_detector_sender.clone(), + cache.clone(), + ) .await; processor_tasks.push(join_handle); } @@ -283,7 +411,8 @@ impl Worker { &self, task_index: usize, receiver: kanal::AsyncReceiver, - gap_detector_sender: kanal::AsyncSender, + gap_detector_sender: Option>, + cache: Arc, ) -> JoinHandle<()> { let processor_name = self.processor_config.name(); let stream_address = self.indexer_grpc_data_service_address.to_string(); @@ -294,7 +423,10 @@ impl Worker { let processor = build_processor( &self.processor_config, self.per_table_chunk_sizes.clone(), + self.deprecated_tables, self.db_pool.clone(), + gap_detector_sender.clone(), + cache.clone(), ); let concurrent_tasks = self.number_concurrent_processing_tasks; @@ -311,197 +443,274 @@ impl Worker { loop { let txn_channel_fetch_latency = std::time::Instant::now(); - - let transactions_pb = fetch_transactions( + match fetch_transactions( processor_name, &stream_address, receiver_clone.clone(), task_index, ) - .await; - - let size_in_bytes = transactions_pb.size_in_bytes as f64; - let first_txn_version = transactions_pb - .transactions - .first() - .map(|t| t.version) - .unwrap_or_default(); - let batch_first_txn_version = transactions_pb.start_version; - let last_txn_version = transactions_pb - .transactions - .last() - .map(|t| t.version) - .unwrap_or_default(); - let batch_last_txn_version = transactions_pb.end_version; - let start_txn_timestamp = transactions_pb.start_txn_timestamp.clone(); - let end_txn_timestamp = transactions_pb.end_txn_timestamp.clone(); - - let start_txn_timestamp_unix = start_txn_timestamp - .as_ref() - .map(timestamp_to_unixtime) - .unwrap_or_default(); - let start_txn_timestamp_iso = start_txn_timestamp - .as_ref() - .map(timestamp_to_iso) - .unwrap_or_default(); - let end_txn_timestamp_iso = end_txn_timestamp - .as_ref() - .map(timestamp_to_iso) - .unwrap_or_default(); - - let txn_channel_fetch_latency_sec = - txn_channel_fetch_latency.elapsed().as_secs_f64(); - - debug!( - processor_name = processor_name, - service_type = PROCESSOR_SERVICE_TYPE, - start_version = batch_first_txn_version, - end_version = batch_last_txn_version, - num_of_transactions = - (batch_last_txn_version - batch_first_txn_version) as i64 + 1, - size_in_bytes, - task_index, - duration_in_secs = txn_channel_fetch_latency_sec, - tps = (batch_last_txn_version as f64 - batch_first_txn_version as f64) - / txn_channel_fetch_latency_sec, - bytes_per_sec = size_in_bytes / txn_channel_fetch_latency_sec, - "[Parser][T#{}] Successfully fetched transactions from channel.", - task_index - ); - - // Ensure chain_id has not changed - if transactions_pb.chain_id != chain_id { - error!( - processor_name = processor_name, - stream_address = stream_address.as_str(), - chain_id = transactions_pb.chain_id, - existing_id = chain_id, - task_index, - "[Parser][T#{}] Stream somehow changed chain id!", - task_index - ); - panic!( - "[Parser][T#{}] Stream somehow changed chain id!", - task_index - ); - } - - let processing_time = std::time::Instant::now(); - - let res = do_processor( - transactions_pb, - &processor, - chain_id, - processor_name, - &auth_token, - false, // enable_verbose_logging - ) - .await; + .await + { + // Fetched transactions from channel + Ok(transactions_pb) => { + let size_in_bytes = transactions_pb.size_in_bytes as f64; + let first_txn_version = transactions_pb + .transactions + .first() + .map(|t| t.version) + .unwrap_or_default(); + let batch_first_txn_version = transactions_pb.start_version; + let last_txn_version = transactions_pb + .transactions + .last() + .map(|t| t.version) + .unwrap_or_default(); + let batch_last_txn_version = transactions_pb.end_version; + let start_txn_timestamp = transactions_pb.start_txn_timestamp.clone(); + let end_txn_timestamp = transactions_pb.end_txn_timestamp.clone(); + + let start_txn_timestamp_unix = start_txn_timestamp + .as_ref() + .map(timestamp_to_unixtime) + .unwrap_or_default(); + let start_txn_timestamp_iso = start_txn_timestamp + .as_ref() + .map(timestamp_to_iso) + .unwrap_or_default(); + let end_txn_timestamp_iso = end_txn_timestamp + .as_ref() + .map(timestamp_to_iso) + .unwrap_or_default(); + + let txn_channel_fetch_latency_sec = + txn_channel_fetch_latency.elapsed().as_secs_f64(); + + debug!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + start_version = batch_first_txn_version, + end_version = batch_last_txn_version, + num_of_transactions = + (batch_last_txn_version - batch_first_txn_version) as i64 + 1, + size_in_bytes, + task_index, + duration_in_secs = txn_channel_fetch_latency_sec, + tps = (batch_last_txn_version as f64 - batch_first_txn_version as f64) + / txn_channel_fetch_latency_sec, + bytes_per_sec = size_in_bytes / txn_channel_fetch_latency_sec, + "[Parser][T#{}] Successfully fetched transactions from channel.", + task_index + ); - let processing_result = match res { - Ok(versions) => { - PROCESSOR_SUCCESSES_COUNT - .with_label_values(&[processor_name]) - .inc(); - versions + // Ensure chain_id has not changed + if transactions_pb.chain_id != chain_id { + error!( + processor_name = processor_name, + stream_address = stream_address.as_str(), + chain_id = transactions_pb.chain_id, + existing_id = chain_id, + task_index, + "[Parser][T#{}] Stream somehow changed chain id!", + task_index + ); + panic!( + "[Parser][T#{}] Stream somehow changed chain id!", + task_index + ); + } + + let processing_time = std::time::Instant::now(); + + let res = do_processor( + transactions_pb, + &processor, + chain_id, + processor_name, + &auth_token, + false, // enable_verbose_logging + ) + .await; + + let processing_result = match res { + Ok(versions) => { + PROCESSOR_SUCCESSES_COUNT + .with_label_values(&[processor_name]) + .inc(); + versions + }, + Err(e) => { + error!( + processor_name = processor_name, + stream_address = stream_address.as_str(), + error = ?e, + task_index, + "[Parser][T#{}] Error processing transactions", task_index + ); + PROCESSOR_ERRORS_COUNT + .with_label_values(&[processor_name]) + .inc(); + panic!( + "[Parser][T#{}] Error processing '{:}' transactions: {:?}", + task_index, processor_name, e + ); + }, + }; + + match processing_result { + ProcessingResult::DefaultProcessingResult(processing_result) => { + let processing_time = processing_time.elapsed().as_secs_f64(); + + // We've processed things: do some data and metrics + ma.tick_now((last_txn_version - first_txn_version) + 1); + let tps = ma.avg().ceil() as u64; + + let num_processed = (last_txn_version - first_txn_version) + 1; + + debug!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + first_txn_version, + batch_first_txn_version, + last_txn_version, + batch_last_txn_version, + start_txn_timestamp_iso, + end_txn_timestamp_iso, + num_of_transactions = num_processed, + concurrent_tasks, + task_index, + size_in_bytes, + processing_duration_in_secs = + processing_result.processing_duration_in_secs, + db_insertion_duration_in_secs = + processing_result.db_insertion_duration_in_secs, + duration_in_secs = processing_time, + tps = tps, + bytes_per_sec = size_in_bytes / processing_time, + step = &step, + "{}", + label, + ); + + // TODO: For these three, do an atomic thing, or ideally move to an async metrics collector! + GRPC_LATENCY_BY_PROCESSOR_IN_SECS + .with_label_values(&[processor_name, &task_index_str]) + .set(time_diff_since_pb_timestamp_in_secs( + end_txn_timestamp.as_ref().unwrap(), + )); + LATEST_PROCESSED_VERSION + .with_label_values(&[ + processor_name, + step, + label, + &task_index_str, + ]) + .set(last_txn_version as i64); + TRANSACTION_UNIX_TIMESTAMP + .with_label_values(&[ + processor_name, + step, + label, + &task_index_str, + ]) + .set(start_txn_timestamp_unix); + + // Single batch metrics + PROCESSED_BYTES_COUNT + .with_label_values(&[ + processor_name, + step, + label, + &task_index_str, + ]) + .inc_by(size_in_bytes as u64); + NUM_TRANSACTIONS_PROCESSED_COUNT + .with_label_values(&[ + processor_name, + step, + label, + &task_index_str, + ]) + .inc_by(num_processed); + + SINGLE_BATCH_PROCESSING_TIME_IN_SECS + .with_label_values(&[processor_name, &task_index_str]) + .set(processing_time); + SINGLE_BATCH_PARSING_TIME_IN_SECS + .with_label_values(&[processor_name, &task_index_str]) + .set(processing_result.processing_duration_in_secs); + SINGLE_BATCH_DB_INSERTION_TIME_IN_SECS + .with_label_values(&[processor_name, &task_index_str]) + .set(processing_result.db_insertion_duration_in_secs); + + gap_detector_sender + .as_ref() + .unwrap() + .send(ProcessingResult::DefaultProcessingResult( + processing_result, + )) + .await + .expect("[Parser] Failed to send versions to gap detector"); + }, + ProcessingResult::ParquetProcessingResult(_) => { + debug!("parquet processing result doesn't need to be handled here"); + }, + } }, + // Could not fetch transactions from channel. This happens when there are + // no more transactions to fetch and the channel is closed. Err(e) => { error!( processor_name = processor_name, stream_address = stream_address.as_str(), error = ?e, task_index, - "[Parser][T#{}] Error processing transactions", task_index - ); - PROCESSOR_ERRORS_COUNT - .with_label_values(&[processor_name]) - .inc(); - panic!( - "[Parser][T#{}] Error processing '{:}' transactions: {:?}", - task_index, processor_name, e + "[Parser][T#{}] Consumer thread exiting fetching loop", task_index ); + break; }, - }; - - let processing_time = processing_time.elapsed().as_secs_f64(); - - // We've processed things: do some data and metrics - - ma.tick_now((last_txn_version - first_txn_version) + 1); - let tps = (ma.avg() * 1000.0) as u64; - - let num_processed = (last_txn_version - first_txn_version) + 1; - - debug!( - processor_name = processor_name, - service_type = PROCESSOR_SERVICE_TYPE, - first_txn_version, - batch_first_txn_version, - last_txn_version, - batch_last_txn_version, - start_txn_timestamp_iso, - end_txn_timestamp_iso, - num_of_transactions = num_processed, - concurrent_tasks, - task_index, - size_in_bytes, - processing_duration_in_secs = processing_result.processing_duration_in_secs, - db_insertion_duration_in_secs = processing_result.db_insertion_duration_in_secs, - duration_in_secs = processing_time, - tps = tps, - bytes_per_sec = size_in_bytes / processing_time, - step = &step, - "{}", - label, - ); - - // TODO: For these three, do an atomic thing, or ideally move to an async metrics collector! - GRPC_LATENCY_BY_PROCESSOR_IN_SECS - .with_label_values(&[processor_name, &task_index_str]) - .set(time_diff_since_pb_timestamp_in_secs( - end_txn_timestamp.as_ref().unwrap(), - )); - LATEST_PROCESSED_VERSION - .with_label_values(&[processor_name, step, label, &task_index_str]) - .set(last_txn_version as i64); - TRANSACTION_UNIX_TIMESTAMP - .with_label_values(&[processor_name, step, label, &task_index_str]) - .set(start_txn_timestamp_unix); - - // Single batch metrics - PROCESSED_BYTES_COUNT - .with_label_values(&[processor_name, step, label, &task_index_str]) - .inc_by(size_in_bytes as u64); - NUM_TRANSACTIONS_PROCESSED_COUNT - .with_label_values(&[processor_name, step, label, &task_index_str]) - .inc_by(num_processed); - - SINGLE_BATCH_PROCESSING_TIME_IN_SECS - .with_label_values(&[processor_name, &task_index_str]) - .set(processing_time); - SINGLE_BATCH_PARSING_TIME_IN_SECS - .with_label_values(&[processor_name, &task_index_str]) - .set(processing_result.processing_duration_in_secs); - SINGLE_BATCH_DB_INSERTION_TIME_IN_SECS - .with_label_values(&[processor_name, &task_index_str]) - .set(processing_result.db_insertion_duration_in_secs); - - // Send the result to the gap detector - gap_detector_sender - .send(processing_result) - .await - .expect("[Parser] Failed to send versions to gap detector"); + } } }) } + // For the normal processor build we just use standard Diesel with the postgres + // feature enabled (which uses libpq under the hood, hence why we named the feature + // this way). + #[cfg(feature = "libpq")] async fn run_migrations(&self) { + use crate::diesel::Connection; use diesel::pg::PgConnection; - println!("Running migrations: {:?}", self.postgres_connection_string); + info!("Running migrations: {:?}", self.postgres_connection_string); let mut conn = PgConnection::establish(&self.postgres_connection_string).expect("migrations failed!"); - run_pending_migrations(&mut conn).await; + run_pending_migrations(&mut conn); + } + + // If the libpq feature isn't enabled, we use diesel async instead. This is used by + // the CLI for the local testnet, where we cannot tolerate the libpq dependency. + #[cfg(not(feature = "libpq"))] + async fn run_migrations(&self) { + use diesel_async::async_connection_wrapper::AsyncConnectionWrapper; + + info!("Running migrations: {:?}", self.postgres_connection_string); + let conn = self + .db_pool + // We need to use this since AsyncConnectionWrapper doesn't know how to + // work with a pooled connection. + .dedicated_connection() + .await + .expect("[Parser] Failed to get connection"); + // We use spawn_blocking since run_pending_migrations is a blocking function. + tokio::task::spawn_blocking(move || { + // This lets us use the connection like a normal diesel connection. See more: + // https://docs.rs/diesel-async/latest/diesel_async/async_connection_wrapper/type.AsyncConnectionWrapper.html + let mut conn: AsyncConnectionWrapper = + AsyncConnectionWrapper::from(conn); + run_pending_migrations(&mut conn); + }) + .await + .expect("[Parser] Failed to run migrations"); } /// Gets the start version for the processor. If not found, start from 0. @@ -565,7 +774,7 @@ async fn fetch_transactions( stream_address: &str, receiver: kanal::AsyncReceiver, task_index: usize, -) -> TransactionsPBResponse { +) -> Result { let pb_channel_fetch_time = std::time::Instant::now(); let txn_pb_res = receiver.recv().await; // Track how much time this task spent waiting for a pb bundle @@ -574,19 +783,19 @@ async fn fetch_transactions( .set(pb_channel_fetch_time.elapsed().as_secs_f64()); match txn_pb_res { - Ok(txn_pb) => txn_pb, + Ok(txn_pb) => Ok(txn_pb), Err(_e) => { error!( processor_name = processor_name, service_type = PROCESSOR_SERVICE_TYPE, stream_address = stream_address, - "[Parser][T#{}] Consumer thread timed out waiting for transactions", + "[Parser][T#{}] Consumer thread receiver channel closed.", task_index ); - panic!( - "[Parser][T#{}] Consumer thread timed out waiting for transactions", + Err(anyhow::anyhow!( + "[Parser][T#{}] Consumer thread receiver channel closed.", task_index - ); + )) }, } } @@ -605,13 +814,15 @@ pub async fn do_processor( // Fake this as it's possible we have filtered out all of the txns in this batch if transactions_pb.transactions.is_empty() { - return Ok(ProcessingResult { - start_version, - end_version, - processing_duration_in_secs: 0.0, - db_insertion_duration_in_secs: 0.0, - last_transaction_timestamp: transactions_pb.end_txn_timestamp, - }); + return Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs: 0.0, + db_insertion_duration_in_secs: 0.0, + last_transaction_timestamp: transactions_pb.end_txn_timestamp, + }, + )); } let txn_time = transactions_pb.start_txn_timestamp; @@ -662,7 +873,10 @@ pub async fn do_processor( pub fn build_processor( config: &ProcessorConfig, per_table_chunk_sizes: AHashMap, - db_pool: PgDbPool, + deprecated_tables: TableFlags, + db_pool: ArcDbPool, + gap_detector_sender: Option>, // Parquet only + cache: Arc, ) -> Processor { match config { ProcessorConfig::AccountTransactionsProcessor => Processor::from( @@ -670,18 +884,23 @@ pub fn build_processor( ), ProcessorConfig::AnsProcessor(config) => Processor::from(AnsProcessor::new( db_pool, - per_table_chunk_sizes, config.clone(), + per_table_chunk_sizes, )), ProcessorConfig::CoinProcessor => { Processor::from(CoinProcessor::new(db_pool, per_table_chunk_sizes)) }, - ProcessorConfig::DefaultProcessor => { - Processor::from(DefaultProcessor::new(db_pool, per_table_chunk_sizes)) - }, + ProcessorConfig::DefaultProcessor => Processor::from(DefaultProcessor::new( + db_pool, + per_table_chunk_sizes, + deprecated_tables, + )), ProcessorConfig::EventsProcessor => { Processor::from(EventsProcessor::new(db_pool, per_table_chunk_sizes)) }, + ProcessorConfig::EventStreamProcessor => { + Processor::from(EventStreamProcessor::new(db_pool, cache)) + }, ProcessorConfig::FungibleAssetProcessor => { Processor::from(FungibleAssetProcessor::new(db_pool, per_table_chunk_sizes)) }, @@ -689,25 +908,38 @@ pub fn build_processor( ProcessorConfig::NftMetadataProcessor(config) => { Processor::from(NftMetadataProcessor::new(db_pool, config.clone())) }, - ProcessorConfig::ObjectsProcessor => { - Processor::from(ObjectsProcessor::new(db_pool, per_table_chunk_sizes)) - }, - ProcessorConfig::StakeProcessor => { - Processor::from(StakeProcessor::new(db_pool, per_table_chunk_sizes)) - }, + ProcessorConfig::ObjectsProcessor(config) => Processor::from(ObjectsProcessor::new( + db_pool, + config.clone(), + per_table_chunk_sizes, + )), + ProcessorConfig::StakeProcessor(config) => Processor::from(StakeProcessor::new( + db_pool, + config.clone(), + per_table_chunk_sizes, + )), ProcessorConfig::TokenProcessor(config) => Processor::from(TokenProcessor::new( db_pool, + config.clone(), per_table_chunk_sizes, + )), + ProcessorConfig::TokenV2Processor(config) => Processor::from(TokenV2Processor::new( + db_pool, config.clone(), + per_table_chunk_sizes, )), - ProcessorConfig::TokenV2Processor => { - Processor::from(TokenV2Processor::new(db_pool, per_table_chunk_sizes)) - }, ProcessorConfig::TransactionMetadataProcessor => Processor::from( TransactionMetadataProcessor::new(db_pool, per_table_chunk_sizes), ), ProcessorConfig::UserTransactionProcessor => Processor::from( UserTransactionProcessor::new(db_pool, per_table_chunk_sizes), ), + ProcessorConfig::DefaultParquetProcessor(config) => { + Processor::from(DefaultParquetProcessor::new( + db_pool, + config.clone(), + gap_detector_sender.expect("Parquet processor requires a gap detector sender"), + )) + }, } } diff --git a/rust/scripts/check_banned_deps.sh b/rust/scripts/check_banned_deps.sh new file mode 100755 index 000000000..4a69baf43 --- /dev/null +++ b/rust/scripts/check_banned_deps.sh @@ -0,0 +1,52 @@ +#!/bin/sh + +# This script checks if the crate depends on external deps that it shouldn't. We run +# this in CI to make sure we don't accidentally reintroduce deps that would make the +# crate unusable for the CLI. +# +# While it would be more reliable to actually build the crate and check what libraries +# it links to, e.g. with otool, it is much cheaper to use cargo tree. As far as I can +# tell the entire Rust ecosystem makes use of these `x-sys` libraries to depend on +# external dynamically linked libraries. +# +# We can almost use cargo deny but it doesn't support checking specific build paths. We +# don't care if openssl-sys for example is used at build time (which it is, indirectly +# by shadow-rs), only at run time. See more here: +# https://github.com/EmbarkStudios/cargo-deny/issues/563 +# +# It assumes cargo and friends are available. +# +# Run this from the rust/ directory. + +cd "$(dirname "$0")" +cd .. + +declare -a deps=("pq-sys" "openssl-sys") + +for dep in "${deps[@]}"; do + echo "Checking for banned dependency $dep..." + + # Check for deps. As you can see, we only check for MacOS right now. + # We specify --no-default-features because we only care about these banned deps + # for the local testnet use case, in which case it opts out of the default + # features. + out=`cargo tree --no-default-features -e features,no-build,no-dev --target aarch64-apple-darwin -p processor -i "$dep"` + + # If the exit status was non-zero, great, the dep couldn't be found. + if [ $? -ne 0 ]; then + continue + fi + + # If the exit status was zero we have to check the output to see if the dep is in + # use. If it is in the output, it is in use. + if [[ $out != *"$dep"* ]]; then + continue + fi + + echo "Banned dependency $dep found!" + exit 1 +done + +echo +echo "None of the banned dependencies are in use, great!" +exit 0 diff --git a/rust/scripts/rust_lint.sh b/rust/scripts/rust_lint.sh index 2db0379e1..5767e338e 100755 --- a/rust/scripts/rust_lint.sh +++ b/rust/scripts/rust_lint.sh @@ -5,7 +5,7 @@ # # The best way to do this however is to run scripts/dev_setup.sh # -# If you want to run this from anywhere in aptos-core, try adding this wrapepr +# If you want to run this from anywhere in aptos-core, try adding this wrapper # script to your path: # https://gist.github.com/banool/e6a2b85e2fff067d3a215cbfaf808032 diff --git a/rust/server-framework/Cargo.toml b/rust/server-framework/Cargo.toml index 0ec2f4609..8412ea436 100644 --- a/rust/server-framework/Cargo.toml +++ b/rust/server-framework/Cargo.toml @@ -16,7 +16,6 @@ anyhow = { workspace = true } async-trait = { workspace = true } backtrace = { workspace = true } clap = { workspace = true } -futures = { workspace = true } prometheus = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } @@ -26,3 +25,6 @@ toml = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } warp = { workspace = true } + +[target.'cfg(target_os = "linux")'.dependencies] +aptos-system-utils = { workspace = true } diff --git a/rust/server-framework/src/lib.rs b/rust/server-framework/src/lib.rs index 333f4ac88..a81361fc9 100644 --- a/rust/server-framework/src/lib.rs +++ b/rust/server-framework/src/lib.rs @@ -1,10 +1,14 @@ // Copyright © Aptos Foundation -use anyhow::{bail, Context, Ok, Result}; +use anyhow::{Context, Result}; +#[cfg(target_os = "linux")] +use aptos_system_utils::profiling::start_cpu_profiling; use backtrace::Backtrace; use clap::Parser; use prometheus::{Encoder, TextEncoder}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +#[cfg(target_os = "linux")] +use std::convert::Infallible; use std::{fs::File, io::Read, panic::PanicInfo, path::PathBuf, process}; use tokio::runtime::Handle; use tracing::error; @@ -42,17 +46,15 @@ where // Start liveness and readiness probes. let task_handler = handle.spawn(async move { register_probes_and_metrics_handler(health_port).await; - Ok(()) + anyhow::Ok(()) }); let main_task_handler = handle.spawn(async move { config.run().await }); tokio::select! { - _ = task_handler => { - error!("Probes and metrics handler unexpectedly exited"); - bail!("Probes and metrics handler unexpectedly exited"); + res = task_handler => { + res.expect("Probes and metrics handler unexpectedly exited") }, - _ = main_task_handler => { - error!("Main task unexpectedly exited"); - bail!("Main task unexpectedly exited"); + res = main_task_handler => { + res.expect("Main task handler unexpectedly exited") }, } } @@ -164,9 +166,42 @@ async fn register_probes_and_metrics_handler(port: u16) { .header("Content-Type", "text/plain") .body(encode_buffer) }); - warp::serve(readiness.or(metrics_endpoint)) - .run(([0, 0, 0, 0], port)) - .await; + + if cfg!(target_os = "linux") { + #[cfg(target_os = "linux")] + let profilez = warp::path("profilez").and_then(|| async move { + // TODO(grao): Consider make the parameters configurable. + Ok::<_, Infallible>(match start_cpu_profiling(10, 99, false).await { + Ok(body) => { + let response = Response::builder() + .header("Content-Length", body.len()) + .header("Content-Disposition", "inline") + .header("Content-Type", "image/svg+xml") + .body(body); + + match response { + Ok(res) => warp::reply::with_status(res, warp::http::StatusCode::OK), + Err(e) => warp::reply::with_status( + Response::new(format!("Profiling failed: {e:?}.").as_bytes().to_vec()), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ), + } + }, + Err(e) => warp::reply::with_status( + Response::new(format!("Profiling failed: {e:?}.").as_bytes().to_vec()), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ), + }) + }); + #[cfg(target_os = "linux")] + warp::serve(readiness.or(metrics_endpoint).or(profilez)) + .run(([0, 0, 0, 0], port)) + .await; + } else { + warp::serve(readiness.or(metrics_endpoint)) + .run(([0, 0, 0, 0], port)) + .await; + } } #[cfg(test)] diff --git a/scripts/release-processor-images.mjs b/scripts/release-processor-images.mjs index 9378e508a..3013678da 100755 --- a/scripts/release-processor-images.mjs +++ b/scripts/release-processor-images.mjs @@ -22,8 +22,8 @@ // 3. gcloud auth configure-docker us-west1-docker.pkg.dev // 4. gcloud auth login --update-adc // -// Once you have all prerequisites fulfilled, you can run this script via: -// GIT_SHA=${{ github.sha }} GCP_DOCKER_ARTIFACT_PROCESSOR_REPO_US="${{ secrets.GCP_DOCKER_ARTIFACT_REPO }}" ./docker/release-processor-images.mjs --language=rust --wait-for-image-seconds=1800 +// Once you have all prerequisites fulfilled, you can run this script via (note the GH context and vars): +// GIT_SHA=${{ github.sha }} GCP_DOCKER_ARTIFACT_REPO="${{ vars.GCP_DOCKER_ARTIFACT_REPO }}" ./docker/release-processor-images.mjs --language=rust --wait-for-image-seconds=1800 import { dirname } from "node:path"; @@ -36,7 +36,7 @@ chdir(dirname(process.argv[1]) + "/.."); await import("zx/globals"); -const REQUIRED_ARGS = ["LANGUAGE", "GIT_SHA", "GCP_DOCKER_ARTIFACT_PROCESSOR_REPO_US"]; +const REQUIRED_ARGS = ["LANGUAGE", "GIT_SHA", "GCP_DOCKER_ARTIFACT_REPO"]; const OPTIONAL_ARGS = ["VERSION_TAG", "WAIT_FOR_IMAGE_SECONDS"]; const parsedArgs = {}; @@ -88,13 +88,13 @@ function getImage(language) { return {sourceImage, targetImage}; } -const GCP_ARTIFACT_PROCESSOR_REPO_US = parsedArgs.GCP_DOCKER_ARTIFACT_PROCESSOR_REPO_US; +const GCP_DOCKER_ARTIFACT_REPO = parsedArgs.GCP_DOCKER_ARTIFACT_REPO; const DOCKERHUB = "docker.io/aptoslabs"; const {sourceImage, targetImage} = getImage(parsedArgs.LANGUAGE); console.info(chalk.yellow(`INFO: Target image: ${targetImage}`)); -const imageSource = `${GCP_ARTIFACT_PROCESSOR_REPO_US}/${sourceImage}:${parsedArgs.GIT_SHA}`; +const imageSource = `${GCP_DOCKER_ARTIFACT_REPO}/${sourceImage}:${parsedArgs.GIT_SHA}`; const imageGitShaTarget = `${DOCKERHUB}/${targetImage}:${parsedArgs.GIT_SHA}`; console.info(chalk.green(`INFO: Waiting for ${imageSource} to become available in the source repo`)); diff --git a/test_move_script/Move.toml b/test_move_script/Move.toml index 213daa04e..81175f294 100644 --- a/test_move_script/Move.toml +++ b/test_move_script/Move.toml @@ -2,6 +2,9 @@ name = 'run_script' version = '1.0.0' +[addresses] +test_addr = "_" + [dependencies.AptosFramework] git = 'https://github.com/aptos-labs/aptos-core.git' rev = 'mainnet' @@ -20,4 +23,4 @@ subdir = 'aptos-move/framework/move-stdlib' [dependencies.AptosToken] git = 'https://github.com/aptos-labs/aptos-core.git' rev = 'mainnet' -subdir = 'aptos-move/framework/aptos-token' \ No newline at end of file +subdir = 'aptos-move/framework/aptos-token' diff --git a/test_move_script/README.md b/test_move_script/README.md index 9df477b00..136fdf56c 100644 --- a/test_move_script/README.md +++ b/test_move_script/README.md @@ -1,14 +1,18 @@ ## Purpose + Sometimes we might need to create move transactions that simulate edge cases. For example, we noticed that mint + burn the same token in a single transaction creates problems, and to repro we'd have to submit a blockchain transaction. This is an example of how to create a script. Checkout how scripts work in: https://stackoverflow.com/questions/74627977/how-do-i-execute-a-move-script-with-the-aptos-cli. -This script attempts to get the signer of the resource account and deploy code to the resource account from the admin account. +This script attempts to get the signer of the resource account and deploy code to the resource account from the admin account. ## How to run this code? -`aptos move compile && aptos move run-script --compiled-script-path build/run_script/bytecode_scripts/main.mv --profile blah` - * If you haven't created a profile before run `aptos init --profile blah` + +`aptos move compile && aptos move publish --named-addresses test_addr=default && aptos move run-script --compiled-script-path build/run_script/bytecode_scripts/main.mv --profile blah` + +- If you haven't created a profile before run `aptos init --profile blah` ## Notes - * This is meant to be a template. You should build your own script. - * In `Move.toml`, you can change the revision for the frameworks to test new code, from `main` for example. \ No newline at end of file + +- This is meant to be a template. You should build your own script. +- In `Move.toml`, you can change the revision for the frameworks to test new code, from `main` for example. diff --git a/test_move_script/sources/modules/managed_fungible_asset.move b/test_move_script/sources/modules/managed_fungible_asset.move new file mode 100644 index 000000000..ef9fac8bd --- /dev/null +++ b/test_move_script/sources/modules/managed_fungible_asset.move @@ -0,0 +1,419 @@ +/// This module provides a managed fungible asset that allows the owner of the metadata object to +/// mint, transfer and burn fungible assets. +/// +/// The functionalities offered by this module are: +/// 1. Mint fungible assets to fungible stores as the owner of metadata object. +/// 2. Transfer fungible assets as the owner of metadata object ignoring `frozen` field between fungible stores. +/// 3. Burn fungible assets from fungible stores as the owner of metadata object. +/// 4. Withdraw the merged fungible assets from fungible stores as the owner of metadata object. +/// 5. Deposit fungible assets to fungible stores. +module test_addr::managed_fungible_asset { + use aptos_framework::fungible_asset::{Self, MintRef, TransferRef, BurnRef, Metadata, FungibleStore, FungibleAsset}; + use aptos_framework::object::{Self, Object, ConstructorRef}; + use aptos_framework::primary_fungible_store; + use std::error; + use std::signer; + use std::string::String; + use std::option; + + /// Only fungible asset metadata owner can make changes. + const ERR_NOT_OWNER: u64 = 1; + /// The length of ref_flags is not 3. + const ERR_INVALID_REF_FLAGS_LENGTH: u64 = 2; + /// The lengths of two vector do not equal. + const ERR_VECTORS_LENGTH_MISMATCH: u64 = 3; + /// MintRef error. + const ERR_MINT_REF: u64 = 4; + /// TransferRef error. + const ERR_TRANSFER_REF: u64 = 5; + /// BurnRef error. + const ERR_BURN_REF: u64 = 6; + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// Hold refs to control the minting, transfer and burning of fungible assets. + struct ManagingRefs has key { + mint_ref: Option, + transfer_ref: Option, + burn_ref: Option, + } + + /// Initialize metadata object and store the refs specified by `ref_flags`. + public fun initialize( + constructor_ref: &ConstructorRef, + maximum_supply: u128, + name: String, + symbol: String, + decimals: u8, + icon_uri: String, + project_uri: String, + ref_flags: vector, + ) { + assert!(vector::length(&ref_flags) == 3, error::invalid_argument(ERR_INVALID_REF_FLAGS_LENGTH)); + let supply = if (maximum_supply != 0) { + option::some(maximum_supply) + } else { + option::none() + }; + primary_fungible_store::create_primary_store_enabled_fungible_asset( + constructor_ref, + supply, + name, + symbol, + decimals, + icon_uri, + project_uri, + ); + + // Optionally create mint/burn/transfer refs to allow creator to manage the fungible asset. + let mint_ref = if (*vector::borrow(&ref_flags, 0)) { + option::some(fungible_asset::generate_mint_ref(constructor_ref)) + } else { + option::none() + }; + let transfer_ref = if (*vector::borrow(&ref_flags, 1)) { + option::some(fungible_asset::generate_transfer_ref(constructor_ref)) + } else { + option::none() + }; + let burn_ref = if (*vector::borrow(&ref_flags, 2)) { + option::some(fungible_asset::generate_burn_ref(constructor_ref)) + } else { + option::none() + }; + let metadata_object_signer = object::generate_signer(constructor_ref); + move_to( + &metadata_object_signer, + ManagingRefs { mint_ref, transfer_ref, burn_ref } + ) + } + + /// Mint as the owner of metadata object to the primary fungible stores of the accounts with amounts of FAs. + public entry fun mint_to_primary_stores( + admin: &signer, + asset: Object, + to: vector
, + amounts: vector + ) acquires ManagingRefs { + let receiver_primary_stores = vector::map( + to, + |addr| primary_fungible_store::ensure_primary_store_exists(addr, asset) + ); + mint(admin, asset, receiver_primary_stores, amounts); + } + + + /// Mint as the owner of metadata object to multiple fungible stores with amounts of FAs. + public entry fun mint( + admin: &signer, + asset: Object, + stores: vector>, + amounts: vector, + ) acquires ManagingRefs { + let length = vector::length(&stores); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let mint_ref = authorized_borrow_mint_ref(admin, asset); + let i = 0; + while (i < length) { + fungible_asset::mint_to(mint_ref, *vector::borrow(&stores, i), *vector::borrow(&amounts, i)); + i = i + 1; + } + } + + /// Transfer as the owner of metadata object ignoring `frozen` field from primary stores to primary stores of + /// accounts. + public entry fun transfer_between_primary_stores( + admin: &signer, + asset: Object, + from: vector
, + to: vector
, + amounts: vector + ) acquires ManagingRefs { + let sender_primary_stores = vector::map( + from, + |addr| primary_fungible_store::primary_store(addr, asset) + ); + let receiver_primary_stores = vector::map( + to, + |addr| primary_fungible_store::ensure_primary_store_exists(addr, asset) + ); + transfer(admin, asset, sender_primary_stores, receiver_primary_stores, amounts); + } + + /// Transfer as the owner of metadata object ignoring `frozen` field between fungible stores. + public entry fun transfer( + admin: &signer, + asset: Object, + sender_stores: vector>, + receiver_stores: vector>, + amounts: vector, + ) acquires ManagingRefs { + let length = vector::length(&sender_stores); + assert!(length == vector::length(&receiver_stores), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let transfer_ref = authorized_borrow_transfer_ref(admin, asset); + let i = 0; + while (i < length) { + fungible_asset::transfer_with_ref( + transfer_ref, + *vector::borrow(&sender_stores, i), + *vector::borrow(&receiver_stores, i), + *vector::borrow(&amounts, i) + ); + i = i + 1; + } + } + + /// Burn fungible assets as the owner of metadata object from the primary stores of accounts. + public entry fun burn_from_primary_stores( + admin: &signer, + asset: Object, + from: vector
, + amounts: vector + ) acquires ManagingRefs { + let primary_stores = vector::map( + from, + |addr| primary_fungible_store::primary_store(addr, asset) + ); + burn(admin, asset, primary_stores, amounts); + } + + /// Burn fungible assets as the owner of metadata object from fungible stores. + public entry fun burn( + admin: &signer, + asset: Object, + stores: vector>, + amounts: vector + ) acquires ManagingRefs { + let length = vector::length(&stores); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let burn_ref = authorized_borrow_burn_ref(admin, asset); + let i = 0; + while (i < length) { + fungible_asset::burn_from(burn_ref, *vector::borrow(&stores, i), *vector::borrow(&amounts, i)); + i = i + 1; + }; + } + + + /// Freeze/unfreeze the primary stores of accounts so they cannot transfer or receive fungible assets. + public entry fun set_primary_stores_frozen_status( + admin: &signer, + asset: Object, + accounts: vector
, + frozen: bool + ) acquires ManagingRefs { + let primary_stores = vector::map(accounts, |acct| { + primary_fungible_store::ensure_primary_store_exists(acct, asset) + }); + set_frozen_status(admin, asset, primary_stores, frozen); + } + + /// Freeze/unfreeze the fungible stores so they cannot transfer or receive fungible assets. + public entry fun set_frozen_status( + admin: &signer, + asset: Object, + stores: vector>, + frozen: bool + ) acquires ManagingRefs { + let transfer_ref = authorized_borrow_transfer_ref(admin, asset); + vector::for_each(stores, |store| { + fungible_asset::set_frozen_flag(transfer_ref, store, frozen); + }); + } + + /// Withdraw as the owner of metadata object ignoring `frozen` field from primary fungible stores of accounts. + public fun withdraw_from_primary_stores( + admin: &signer, + asset: Object, + from: vector
, + amounts: vector + ): FungibleAsset acquires ManagingRefs { + let primary_stores = vector::map( + from, + |addr| primary_fungible_store::primary_store(addr, asset) + ); + withdraw(admin, asset, primary_stores, amounts) + } + + /// Withdraw as the owner of metadata object ignoring `frozen` field from fungible stores. + /// return a fungible asset `fa` where `fa.amount = sum(amounts)`. + public fun withdraw( + admin: &signer, + asset: Object, + stores: vector>, + amounts: vector + ): FungibleAsset acquires ManagingRefs { + let length = vector::length(&stores); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let transfer_ref = authorized_borrow_transfer_ref(admin, asset); + let i = 0; + let sum = fungible_asset::zero(asset); + while (i < length) { + let fa = fungible_asset::withdraw_with_ref( + transfer_ref, + *vector::borrow(&stores, i), + *vector::borrow(&amounts, i) + ); + fungible_asset::merge(&mut sum, fa); + i = i + 1; + }; + sum + } + + /// Deposit as the owner of metadata object ignoring `frozen` field to primary fungible stores of accounts from a + /// single source of fungible asset. + public fun deposit_to_primary_stores( + admin: &signer, + fa: &mut FungibleAsset, + from: vector
, + amounts: vector, + ) acquires ManagingRefs { + let primary_stores = vector::map( + from, + |addr| primary_fungible_store::ensure_primary_store_exists(addr, fungible_asset::asset_metadata(fa)) + ); + deposit(admin, fa, primary_stores, amounts); + } + + /// Deposit as the owner of metadata object ignoring `frozen` field from fungible stores. The amount left in `fa` + /// is `fa.amount - sum(amounts)`. + public fun deposit( + admin: &signer, + fa: &mut FungibleAsset, + stores: vector>, + amounts: vector + ) acquires ManagingRefs { + let length = vector::length(&stores); + assert!(length == vector::length(&amounts), error::invalid_argument(ERR_VECTORS_LENGTH_MISMATCH)); + let transfer_ref = authorized_borrow_transfer_ref(admin, fungible_asset::asset_metadata(fa)); + let i = 0; + while (i < length) { + let split_fa = fungible_asset::extract(fa, *vector::borrow(&amounts, i)); + fungible_asset::deposit_with_ref( + transfer_ref, + *vector::borrow(&stores, i), + split_fa, + ); + i = i + 1; + }; + } + + /// Borrow the immutable reference of the refs of `metadata`. + /// This validates that the signer is the metadata object's owner. + inline fun authorized_borrow_refs( + owner: &signer, + asset: Object, + ): &ManagingRefs acquires ManagingRefs { + assert!(object::is_owner(asset, signer::address_of(owner)), error::permission_denied(ERR_NOT_OWNER)); + borrow_global(object::object_address(&asset)) + } + + /// Check the existence and borrow `MintRef`. + inline fun authorized_borrow_mint_ref( + owner: &signer, + asset: Object, + ): &MintRef acquires ManagingRefs { + let refs = authorized_borrow_refs(owner, asset); + assert!(option::is_some(&refs.mint_ref), error::not_found(ERR_MINT_REF)); + option::borrow(&refs.mint_ref) + } + + /// Check the existence and borrow `TransferRef`. + inline fun authorized_borrow_transfer_ref( + owner: &signer, + asset: Object, + ): &TransferRef acquires ManagingRefs { + let refs = authorized_borrow_refs(owner, asset); + assert!(option::is_some(&refs.transfer_ref), error::not_found(ERR_TRANSFER_REF)); + option::borrow(&refs.transfer_ref) + } + + /// Check the existence and borrow `BurnRef`. + inline fun authorized_borrow_burn_ref( + owner: &signer, + asset: Object, + ): &BurnRef acquires ManagingRefs { + let refs = authorized_borrow_refs(owner, asset); + assert!(option::is_some(&refs.mint_ref), error::not_found(ERR_BURN_REF)); + option::borrow(&refs.burn_ref) + } + + #[test_only] + use aptos_framework::object::object_from_constructor_ref; + #[test_only] + use std::string::utf8; + use std::vector; + use std::option::Option; + + #[test_only] + fun create_test_mfa(creator: &signer): Object { + let constructor_ref = &object::create_named_object(creator, b"APT"); + initialize( + constructor_ref, + 0, + utf8(b"Aptos Token"), /* name */ + utf8(b"APT"), /* symbol */ + 8, /* decimals */ + utf8(b"http://example.com/favicon.ico"), /* icon */ + utf8(b"http://example.com"), /* project */ + vector[true, true, true] + ); + object_from_constructor_ref(constructor_ref) + } + + #[test(creator = @example_addr)] + fun test_basic_flow( + creator: &signer, + ) acquires ManagingRefs { + let metadata = create_test_mfa(creator); + let creator_address = signer::address_of(creator); + let aaron_address = @0xface; + + mint_to_primary_stores(creator, metadata, vector[creator_address, aaron_address], vector[100, 50]); + assert!(primary_fungible_store::balance(creator_address, metadata) == 100, 1); + assert!(primary_fungible_store::balance(aaron_address, metadata) == 50, 2); + + set_primary_stores_frozen_status(creator, metadata, vector[creator_address, aaron_address], true); + assert!(primary_fungible_store::is_frozen(creator_address, metadata), 3); + assert!(primary_fungible_store::is_frozen(aaron_address, metadata), 4); + + transfer_between_primary_stores( + creator, + metadata, + vector[creator_address, aaron_address], + vector[aaron_address, creator_address], + vector[10, 5] + ); + assert!(primary_fungible_store::balance(creator_address, metadata) == 95, 5); + assert!(primary_fungible_store::balance(aaron_address, metadata) == 55, 6); + + set_primary_stores_frozen_status(creator, metadata, vector[creator_address, aaron_address], false); + assert!(!primary_fungible_store::is_frozen(creator_address, metadata), 7); + assert!(!primary_fungible_store::is_frozen(aaron_address, metadata), 8); + + let fa = withdraw_from_primary_stores( + creator, + metadata, + vector[creator_address, aaron_address], + vector[25, 15] + ); + assert!(fungible_asset::amount(&fa) == 40, 9); + deposit_to_primary_stores(creator, &mut fa, vector[creator_address, aaron_address], vector[30, 10]); + fungible_asset::destroy_zero(fa); + + burn_from_primary_stores(creator, metadata, vector[creator_address, aaron_address], vector[100, 50]); + assert!(primary_fungible_store::balance(creator_address, metadata) == 0, 10); + assert!(primary_fungible_store::balance(aaron_address, metadata) == 0, 11); + } + + #[test(creator = @example_addr, aaron = @0xface)] + #[expected_failure(abort_code = 0x50001, location = Self)] + fun test_permission_denied( + creator: &signer, + aaron: &signer + ) acquires ManagingRefs { + let metadata = create_test_mfa(creator); + let creator_address = signer::address_of(creator); + mint_to_primary_stores(aaron, metadata, vector[creator_address], vector[100]); + } +} \ No newline at end of file diff --git a/test_move_script/sources/run_script.move b/test_move_script/sources/run_script.move index c4e566f30..0b8b585c3 100644 --- a/test_move_script/sources/run_script.move +++ b/test_move_script/sources/run_script.move @@ -13,30 +13,30 @@ script { const S: vector = b"TEST2"; - fun main(deployer: &signer) { - collection::create_unlimited_collection( - deployer, - utf8(S), - utf8(S), - option::none(), - utf8(S), - ); - let constructor_ref = token::create_named_token( - deployer, - utf8(S), - utf8(S), - utf8(S), - option::none(), - utf8(S), - ); - let transfer_ref = object::generate_transfer_ref(&constructor_ref); - let linear_transfer_ref = object::generate_linear_transfer_ref(&transfer_ref); - object::transfer_with_ref(linear_transfer_ref, @0xcafe); - let linear_transfer_ref = object::generate_linear_transfer_ref(&transfer_ref); - object::transfer_with_ref(linear_transfer_ref, @0xcafe2); - // Disabling transfer ref after transferring - object::disable_ungated_transfer(&transfer_ref); - let burn_ref = token::generate_burn_ref(&constructor_ref); - token::burn(burn_ref); - } -} \ No newline at end of file + fun main(deployer: &signer) { + collection::create_unlimited_collection( + deployer, + utf8(S), + utf8(S), + option::none(), + utf8(S), + ); + let constructor_ref = token::create_named_token( + deployer, + utf8(S), + utf8(S), + utf8(S), + option::none(), + utf8(S), + ); + let transfer_ref = object::generate_transfer_ref(&constructor_ref); + let linear_transfer_ref = object::generate_linear_transfer_ref(&transfer_ref); + object::transfer_with_ref(linear_transfer_ref, @0xcafe); + let linear_transfer_ref = object::generate_linear_transfer_ref(&transfer_ref); + object::transfer_with_ref(linear_transfer_ref, @0xcafe2); + // Disabling transfer ref after transferring + object::disable_ungated_transfer(&transfer_ref); + let burn_ref = token::generate_burn_ref(&constructor_ref); + token::burn(burn_ref); + } +} diff --git a/test_move_script/sources/scripts/burn_fungible_token.move b/test_move_script/sources/scripts/burn_fungible_token.move new file mode 100644 index 000000000..63efa0b75 --- /dev/null +++ b/test_move_script/sources/scripts/burn_fungible_token.move @@ -0,0 +1,23 @@ +script { + use std::signer; + use std::string::utf8; + + use aptos_framework::fungible_asset::{Metadata}; + use aptos_framework::object::{Self}; + use aptos_token_objects::token; + use test_addr::managed_fungible_asset::{Self}; + + const FT: vector = b"FT2"; + + fun burn_ft(deployer: &signer) { + let deployer_address = signer::address_of(deployer); + let token_address = token::create_token_address(&deployer_address, &utf8(FT), &utf8(FT)); + let metadata = object::address_to_object(token_address); + managed_fungible_asset::burn_from_primary_stores( + deployer, + metadata, + vector[deployer_address], + vector[100], + ); + } +} \ No newline at end of file diff --git a/test_move_script/sources/scripts/fungible_token.move b/test_move_script/sources/scripts/fungible_token.move new file mode 100644 index 000000000..6eae04136 --- /dev/null +++ b/test_move_script/sources/scripts/fungible_token.move @@ -0,0 +1,52 @@ +script { + use std::signer; + use std::string::utf8; + use std::option; + + use aptos_framework::fungible_asset::{Metadata}; + use aptos_framework::object::object_from_constructor_ref; + use aptos_token_objects::collection; + use aptos_token_objects::token; + use test_addr::managed_fungible_asset::{Self}; + + const FT: vector = b"FT2"; + + fun create_ft(deployer: &signer) { + // Create token part + collection::create_unlimited_collection( + deployer, + utf8(FT), + utf8(FT), + option::none(), + utf8(FT), + ); + let constructor_ref = &token::create_named_token( + deployer, + utf8(FT), + utf8(FT), + utf8(FT), + option::none(), + utf8(FT), + ); + + // Create fungible asset part + managed_fungible_asset::initialize( + constructor_ref, + 0, /* maximum_supply */ + utf8(FT), + utf8(FT), + 8, /* decimals */ + utf8(b"http://example.com/favicon.ico"), /* icon */ + utf8(b"http://example.com"), /* project */ + vector[true, true, true], /* ref_flags */ + ); + let metadata = object_from_constructor_ref(constructor_ref); + let deployer_addr = signer::address_of(deployer); + managed_fungible_asset::mint_to_primary_stores( + deployer, + metadata, + vector[deployer_addr, @0xcafe], + vector[200000000, 100000000], + ); + } +} \ No newline at end of file diff --git a/test_move_script/sources/scripts/transfer_fungible_token.move b/test_move_script/sources/scripts/transfer_fungible_token.move new file mode 100644 index 000000000..7e1c508a4 --- /dev/null +++ b/test_move_script/sources/scripts/transfer_fungible_token.move @@ -0,0 +1,24 @@ +script { + use std::signer; + use std::string::utf8; + + use aptos_framework::fungible_asset::{Metadata}; + use aptos_framework::object::{Self}; + use aptos_token_objects::token; + use test_addr::managed_fungible_asset::{Self}; + + const FT: vector = b"FT2"; + + fun transfer_ft(deployer: &signer) { + let deployer_address = signer::address_of(deployer); + let token_address = token::create_token_address(&deployer_address, &utf8(FT), &utf8(FT)); + let metadata = object::address_to_object(token_address); + managed_fungible_asset::transfer_between_primary_stores( + deployer, + metadata, + vector[deployer_address], + vector[@0xdead], + vector[100000000], + ); + } +} \ No newline at end of file diff --git a/typescript/README.md b/typescript/README.md index 65034f1d7..f097b4085 100644 --- a/typescript/README.md +++ b/typescript/README.md @@ -1,5 +1,9 @@ # Custom Processors: Typescript +> [!WARNING] +> The typescript implementation is known to get stuck when there are lots of data to process. The issue is with the GRPC client and we haven't had a chance to optimize. Please proceed with caution. + + ## Directory Guide - `examples`: Contains example processors that you can use as a starting point for your own custom processor. - `sdk`: Contains the custom processor SDK. This package provides a variety of helpful code for writing your own custom processor, such as for connecting to the Transaction Stream Service, creating tables in the database, and keeping track of the last processed transaction.