-
Notifications
You must be signed in to change notification settings - Fork 80
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Migrate Events Processor to Use New Version Tracker Impl #560
Changes from 4 commits
7a3651e
fe0f28d
0a66385
ba8df18
f4ee554
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
-- This file should undo anything in `up.sql` | ||
DROP TABLE IF EXISTS backfill_processor_status; |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
-- Your SQL goes here | ||
CREATE TABLE backfill_processor_status ( | ||
backfill_alias VARCHAR(50) NOT NULL, | ||
backfill_status VARCHAR(50) NOT NULL, | ||
last_success_version BIGINT NOT NULL, | ||
last_updated TIMESTAMP NOT NULL DEFAULT NOW(), | ||
last_transaction_timestamp TIMESTAMP NULL, | ||
backfill_start_version BIGINT NOT NULL, | ||
backfill_end_version BIGINT NOT NULL, | ||
PRIMARY KEY (backfill_alias) | ||
); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
// Copyright © Aptos Foundation | ||
// SPDX-License-Identifier: Apache-2.0 | ||
|
||
#![allow(clippy::extra_unused_lifetimes)] | ||
|
||
use crate::utils::database::DbPoolConnection; | ||
use diesel::{ | ||
deserialize, | ||
deserialize::{FromSql, FromSqlRow}, | ||
expression::AsExpression, | ||
pg::{Pg, PgValue}, | ||
serialize, | ||
serialize::{IsNull, Output, ToSql}, | ||
sql_types::Text, | ||
AsChangeset, ExpressionMethods, Insertable, OptionalExtension, QueryDsl, Queryable, | ||
}; | ||
use diesel_async::RunQueryDsl; | ||
use processor::schema::backfill_processor_status; | ||
use std::io::Write; | ||
|
||
#[derive(Debug, PartialEq, FromSqlRow, AsExpression, Eq)] | ||
#[diesel(sql_type = Text)] | ||
pub enum BackfillStatus { | ||
// #[diesel(rename = "in_progress")] | ||
InProgress, | ||
// #[diesel(rename = "complete")] | ||
Complete, | ||
} | ||
|
||
impl ToSql<Text, Pg> for BackfillStatus { | ||
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { | ||
match *self { | ||
BackfillStatus::InProgress => out.write_all(b"in_progress")?, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: should we have the bytes in a constant var? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done |
||
BackfillStatus::Complete => out.write_all(b"complete")?, | ||
} | ||
Ok(IsNull::No) | ||
} | ||
} | ||
|
||
impl FromSql<Text, Pg> for BackfillStatus { | ||
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> { | ||
match bytes.as_bytes() { | ||
b"in_progress" => Ok(BackfillStatus::InProgress), | ||
b"complete" => Ok(BackfillStatus::Complete), | ||
_ => Err("Unrecognized enum variant".into()), | ||
} | ||
} | ||
} | ||
|
||
#[derive(AsChangeset, Debug, Insertable)] | ||
#[diesel(table_name = backfill_processor_status)] | ||
/// Only tracking the latest version successfully processed | ||
pub struct BackfillProcessorStatus { | ||
pub backfill_alias: String, | ||
pub backfill_status: BackfillStatus, | ||
pub last_success_version: i64, | ||
pub last_transaction_timestamp: Option<chrono::NaiveDateTime>, | ||
pub backfill_start_version: i64, | ||
pub backfill_end_version: i64, | ||
} | ||
|
||
#[derive(AsChangeset, Debug, Queryable)] | ||
#[diesel(table_name = backfill_processor_status)] | ||
/// Only tracking the latest version successfully processed | ||
pub struct BackfillProcessorStatusQuery { | ||
pub backfill_alias: String, | ||
pub backfill_status: BackfillStatus, | ||
pub last_success_version: i64, | ||
pub last_updated: chrono::NaiveDateTime, | ||
pub last_transaction_timestamp: Option<chrono::NaiveDateTime>, | ||
pub backfill_start_version: i64, | ||
pub backfill_end_version: i64, | ||
} | ||
|
||
impl BackfillProcessorStatusQuery { | ||
pub async fn get_by_processor( | ||
backfill_alias: &str, | ||
conn: &mut DbPoolConnection<'_>, | ||
) -> diesel::QueryResult<Option<Self>> { | ||
backfill_processor_status::table | ||
.filter(backfill_processor_status::backfill_alias.eq(backfill_alias)) | ||
.first::<Self>(conn) | ||
.await | ||
.optional() | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,3 @@ | ||
pub mod backfill_processor_status; | ||
pub mod events_models; | ||
pub mod processor_status; |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,9 +4,7 @@ use crate::{ | |
processor_config::ProcessorConfig, | ||
}, | ||
steps::{ | ||
common::latest_processed_version_tracker::{ | ||
LatestVersionProcessedTracker, UPDATE_PROCESSOR_STATUS_SECS, | ||
}, | ||
common::get_processor_status_saver, | ||
events_processor::{EventsExtractor, EventsStorer}, | ||
}, | ||
utils::{ | ||
|
@@ -19,7 +17,10 @@ use anyhow::Result; | |
use aptos_indexer_processor_sdk::{ | ||
aptos_indexer_transaction_stream::{TransactionStream, TransactionStreamConfig}, | ||
builder::ProcessorBuilder, | ||
common_steps::{OrderByVersionStep, TransactionStreamStep}, | ||
common_steps::{ | ||
OrderByVersionStep, TransactionStreamStep, VersionTrackerStep, | ||
DEFAULT_UPDATE_PROCESSOR_STATUS_SECS, | ||
}, | ||
traits::{processor_trait::ProcessorTrait, IntoRunnableStep}, | ||
}; | ||
use std::time::Duration; | ||
|
@@ -62,8 +63,6 @@ impl ProcessorTrait for EventsProcessor { | |
} | ||
|
||
async fn run_processor(&self) -> Result<()> { | ||
let processor_name = self.config.processor_config.name(); | ||
|
||
// Run migrations | ||
match self.config.db_config { | ||
DbConfig::PostgresConfig(ref postgres_config) => { | ||
|
@@ -106,10 +105,12 @@ impl ProcessorTrait for EventsProcessor { | |
let events_storer = EventsStorer::new(self.db_pool.clone(), processor_config); | ||
let order_step = OrderByVersionStep::new( | ||
starting_version, | ||
Duration::from_secs(UPDATE_PROCESSOR_STATUS_SECS), | ||
Duration::from_secs(DEFAULT_UPDATE_PROCESSOR_STATUS_SECS), | ||
); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could we actually remove the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. removed. |
||
let version_tracker = VersionTrackerStep::new( | ||
get_processor_status_saver(self.db_pool.clone(), self.config.clone()), | ||
DEFAULT_UPDATE_PROCESSOR_STATUS_SECS, | ||
); | ||
let version_tracker = | ||
LatestVersionProcessedTracker::new(self.db_pool.clone(), processor_name.to_string()); | ||
|
||
// Connect processor steps together | ||
let (_, buffer_receiver) = ProcessorBuilder::new_with_inputless_first_step( | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1,4 @@ | ||
pub mod latest_processed_version_tracker; | ||
pub mod processor_status_saver; | ||
|
||
pub use processor_status_saver::get_processor_status_saver; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This needed to be changed around otherwise whenever the migrations are re-ran, the ordering of the produced struct's constructor changes causing some issues.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
to add a bit of context: this is reverting the change I made manually. this order should be always the same unless we change in sql file.