From 56f8a8bba91a274eff0b3e39f236a0aa564b3dcf Mon Sep 17 00:00:00 2001 From: alnoki <43892045+alnoki@users.noreply.github.com> Date: Fri, 6 Oct 2023 10:13:03 -0700 Subject: [PATCH] [ECO-616] Add assorted PR tweaks --- src/docker/compose.dss.yaml | 3 +- src/rust/aggregator/README.md | 3 +- src/rust/aggregator/src/data/user_history.rs | 33 ++++++++++++++++--- .../2023-09-04-113144_postgrest/up.sql | 2 +- 4 files changed, 33 insertions(+), 8 deletions(-) diff --git a/src/docker/compose.dss.yaml b/src/docker/compose.dss.yaml index 3bd76373c..d965ee51f 100644 --- a/src/docker/compose.dss.yaml +++ b/src/docker/compose.dss.yaml @@ -37,7 +37,8 @@ services: - postgres environment: - PGWS_DB_URI=postgres://econia:econia@postgres/econia - - PGWS_JWT_SECRET=conjunctivodacryocystorhinostomy + # This has to be at least 32 characters long. + - PGWS_JWT_SECRET=econia_0000000000000000000000000 - PGWS_CHECK_LISTENER_INTERVAL=1000 - PGWS_LISTEN_CHANNEL=econiaws ports: diff --git a/src/rust/aggregator/README.md b/src/rust/aggregator/README.md index 2331f0974..422387b58 100644 --- a/src/rust/aggregator/README.md +++ b/src/rust/aggregator/README.md @@ -49,4 +49,5 @@ For example, a 1-minute candle stick data flow would follow the following princi # SQLX -The aggregator uses SQLX. In order for the requests to be checked and the crate to be compiled when the database is offline, you have to run `cargo sqlx prepare --workspace` from the Rust root (`src/rust`) when updating or creating a request. +The aggregator uses [SQLx](https://github.com/launchbadge/sqlx/blob/main/README.md). +In order for the requests to be checked and the crate to be compiled when the database is offline, you have to run `cargo sqlx prepare --workspace` from the Rust root (`src/rust`) when updating or creating a request. diff --git a/src/rust/aggregator/src/data/user_history.rs b/src/rust/aggregator/src/data/user_history.rs index 32721a97b..2aabf8616 100644 --- a/src/rust/aggregator/src/data/user_history.rs +++ b/src/rust/aggregator/src/data/user_history.rs @@ -1,10 +1,13 @@ use anyhow::anyhow; -use bigdecimal::{BigDecimal, Zero, num_bigint::ToBigInt}; +use bigdecimal::{num_bigint::ToBigInt, BigDecimal, Zero}; use chrono::{DateTime, Duration, Utc}; use sqlx::{PgConnection, PgPool, Postgres, Transaction}; use super::{Data, DataAggregationError, DataAggregationResult}; +/// Number of bits to shift when encoding transaction version. +const SHIFT_TXN_VERSION: u8 = 64; + #[derive(sqlx::Type, Debug)] #[sqlx(type_name = "order_status", rename_all = "lowercase")] pub enum OrderStatus { @@ -137,8 +140,19 @@ impl Data for UserHistory { .await .map_err(|e| DataAggregationError::ProcessingError(anyhow!(e)))?; for x in &limit_events { - let txn = x.txn_version.to_bigint().ok_or(DataAggregationError::ProcessingError(anyhow!("txn_version not integer")))? << 64; - let event = x.event_idx.to_bigint().ok_or(DataAggregationError::ProcessingError(anyhow!("event_idx not integer")))?; + let txn = x + .txn_version + .to_bigint() + .ok_or(DataAggregationError::ProcessingError(anyhow!( + "txn_version not integer" + )))? + << SHIFT_TXN_VERSION; + let event = x + .event_idx + .to_bigint() + .ok_or(DataAggregationError::ProcessingError(anyhow!( + "event_idx not integer" + )))?; let txn_event: BigDecimal = BigDecimal::from(txn & event); sqlx::query!( r#" @@ -425,8 +439,17 @@ async fn aggregate_change<'a>( (record.order_type, record.remaining_size); // If its a limit order and needs reordering if matches!(order_type, OrderType::Limit) && &original_size < new_size { - let txn = txn_version.to_bigint().ok_or(DataAggregationError::ProcessingError(anyhow!("txn_version not integer")))? << 64; - let event = event_idx.to_bigint().ok_or(DataAggregationError::ProcessingError(anyhow!("event_idx not integer")))?; + let txn = txn_version + .to_bigint() + .ok_or(DataAggregationError::ProcessingError(anyhow!( + "txn_version not integer" + )))? + << SHIFT_TXN_VERSION; + let event = event_idx + .to_bigint() + .ok_or(DataAggregationError::ProcessingError(anyhow!( + "event_idx not integer" + )))?; let txn_event: BigDecimal = BigDecimal::from(txn & event); sqlx::query!( r#" diff --git a/src/rust/dbv2/migrations/2023-09-04-113144_postgrest/up.sql b/src/rust/dbv2/migrations/2023-09-04-113144_postgrest/up.sql index 13f09df63..0ef11c49c 100644 --- a/src/rust/dbv2/migrations/2023-09-04-113144_postgrest/up.sql +++ b/src/rust/dbv2/migrations/2023-09-04-113144_postgrest/up.sql @@ -25,5 +25,5 @@ CREATE EXTENSION pgjwt CASCADE; CREATE FUNCTION api.jwt (json) RETURNS TEXT AS $$ - SELECT sign((CONCAT(CONCAT('{"mode": "r","channels": ', $1->>'channels'::text),'}'))::json, 'conjunctivodacryocystorhinostomy') + SELECT sign((CONCAT(CONCAT('{"mode": "r","channels": ', $1->>'channels'::text),'}'))::json, 'econia_0000000000000000000000000') $$ LANGUAGE SQL;