diff --git a/database/bindings/EventType.ts b/database/bindings/EventType.ts new file mode 100644 index 00000000..a43e90ee --- /dev/null +++ b/database/bindings/EventType.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type EventType = "AppConnect" | "AppDisconnect" | "ClientConnectInit" | "ClientConnectResolve" | "ClientDisconnect" | "SignMessage" | "SignTransaction" | "SignAndSendTransaction" | "ChangeWallet" | "ChangeNetwork"; \ No newline at end of file diff --git a/database/migrations/0001_types.sql b/database/migrations/0001_types.sql index 8cb9b479..c4e61961 100644 --- a/database/migrations/0001_types.sql +++ b/database/migrations/0001_types.sql @@ -30,4 +30,17 @@ CREATE TYPE geo_location AS ( city TEXT, lat FLOAT8, lon FLOAT8 +); + +CREATE TYPE event_type_enum AS ENUM ( + 'AppConnect', + 'AppDisconnect', + 'ClientConnectInit', + 'ClientConnectResolve' + 'ClientDisconnect', + 'SingMessage', + 'SignTransaction', + 'SignAndSendTransaction', + 'ChangeWallet', + 'ChangeNetwork' ); \ No newline at end of file diff --git a/database/migrations/0011_events_index.sql b/database/migrations/0011_events_index.sql new file mode 100644 index 00000000..d0d6c3c3 --- /dev/null +++ b/database/migrations/0011_events_index.sql @@ -0,0 +1,6 @@ +CREATE TABLE events( + event_id SERIAL PRIMARY KEY, + app_id TEXT NOT NULL, + event_type event_type_enum NOT NULL, + creation_timestamp TIMESTAMPTZ NOT NULL DEFAULT NOW() +); \ No newline at end of file diff --git a/database/migrations/0011_requests.sql b/database/migrations/0011_requests.sql deleted file mode 100644 index a8b2fa97..00000000 --- a/database/migrations/0011_requests.sql +++ /dev/null @@ -1,10 +0,0 @@ -CREATE TABLE requests( - app_id TEXT NOT NULL, - session_id TEXT NOT NULL, - request_id TEXT NOT NULL, - request_type TEXT NOT NULL, - client_profile_id TEXT, - request_status request_status_enum NOT NULL, - network TEXT NOT NULL, - creation_timestamp TIMESTAMPTZ NOT NULL -); \ No newline at end of file diff --git a/database/migrations/0012_events_tables.sql b/database/migrations/0012_events_tables.sql new file mode 100644 index 00000000..be363263 --- /dev/null +++ b/database/migrations/0012_events_tables.sql @@ -0,0 +1,76 @@ +CREATE TABLE event_app_connect( + event_id BIGINT PRIMARY KEY REFERENCES events(event_id), + session_id TEXT NOT NULL, + device_metadata TEXT NOT NULL, + lang TEXT NOT NULL, + timezone TEXT NOT NULL, + new_session BOOLEAN NOT NULL +); + +CREATE TABLE event_app_disconnect( + event_id BIGINT PRIMARY KEY REFERENCES events(event_id), + session_id TEXT NOT NULL +); + +CREATE TABLE event_client_connect( + event_id BIGINT PRIMARY KEY REFERENCES events(event_id), + client_id TEXT NOT NULL, + session_id TEXT NOT NULL, + addresses TEXT[], + wallet_name TEXT NOT NULL, + wallet_type TEXT NOT NULL, + session_type session_type_enum NOT NULL, + success BOOLEAN NOT NULL +); + +CREATE TABLE event_client_disconnect( + event_id BIGINT PRIMARY KEY REFERENCES events(event_id), + disconnected_session_id TEXT NOT NULL +); + +CREATE TABLE event_sign_message( + event_id BIGINT PRIMARY KEY REFERENCES events(event_id), + session_id TEXT NOT NULL, + request_id TEXT NOT NULL, + request_status request_status_enum NOT NULL, + network TEXT NOT NULL +); + +CREATE TABLE event_sign_transaction( + event_id BIGINT PRIMARY KEY REFERENCES events(event_id), + session_id TEXT NOT NULL, + request_id TEXT NOT NULL, + request_status request_status_enum NOT NULL, + network TEXT NOT NULL, + tx_hash TEXT +); + +CREATE TABLE event_sign_and_send_transaction( + event_id BIGINT PRIMARY KEY REFERENCES events(event_id), + session_id TEXT NOT NULL, + request_id TEXT NOT NULL, + request_status request_status_enum NOT NULL, + network TEXT NOT NULL, + tx_hash TEXT +); + +CREATE TABLE event_change_wallet( + event_id BIGINT PRIMARY KEY REFERENCES events(event_id), + session_id TEXT NOT NULL, + request_id TEXT NOT NULL, + request_status request_status_enum NOT NULL, + network TEXT NOT NULL, + wallet_name TEXT NOT NULL, + wallet_type TEXT NOT NULL, + old_wallet_address TEXT NOT NULL, + new_wallet_address TEXT NOT NULL +); + +CREATE TABLE event_change_network( + event_id BIGINT PRIMARY KEY REFERENCES events(event_id), + session_id TEXT NOT NULL, + request_id TEXT NOT NULL, + request_status request_status_enum NOT NULL, + old_network TEXT NOT NULL, + new_network TEXT NOT NULL +); \ No newline at end of file diff --git a/database/migrations/0012_create_hypertables.sql b/database/migrations/0013_create_hypertables.sql similarity index 66% rename from database/migrations/0012_create_hypertables.sql rename to database/migrations/0013_create_hypertables.sql index b3171cdc..3c39653e 100644 --- a/database/migrations/0012_create_hypertables.sql +++ b/database/migrations/0013_create_hypertables.sql @@ -1,8 +1,8 @@ SELECT create_hypertable('connection_events', 'connected_at'); -SELECT - create_hypertable('requests', 'creation_timestamp'); +-- SELECT +-- create_hypertable('requests', 'creation_timestamp'); SELECT create_hypertable('sessions', 'session_open_timestamp'); diff --git a/database/migrations/0013_requests_stats.sql b/database/migrations/0013_requests_stats.sql deleted file mode 100644 index 97de7ff8..00000000 --- a/database/migrations/0013_requests_stats.sql +++ /dev/null @@ -1,96 +0,0 @@ ------------------ Hourly requests stats per app ----------------- ---- View -CREATE MATERIALIZED VIEW hourly_requests_stats_per_app WITH (timescaledb.continuous) AS -SELECT - app_id, - time_bucket('1 hour' :: interval, creation_timestamp) AS hourly_bucket, - COUNT(*) AS hourly_request_count, - COUNT(*) FILTER ( - WHERE - request_status = 'Completed' - ) :: FLOAT / NULLIF( - COUNT(*) FILTER ( - WHERE - request_status IN ('Completed', 'Rejected', 'TimedOut') - ), - 0 - ) AS hourly_success_rate -FROM - requests -GROUP BY - app_id, - hourly_bucket WITH NO DATA; - ---- Refresh policy -SELECT - add_continuous_aggregate_policy( - 'hourly_requests_stats_per_app', - start_offset => INTERVAL '3 h', - end_offset => INTERVAL '1 h', - schedule_interval => INTERVAL '1 h' - ); - ---- Real time aggregation -ALTER MATERIALIZED VIEW hourly_requests_stats_per_app -set - (timescaledb.materialized_only = false); - - - ------------------ Daily requests stats per app ----------------- ---- View -CREATE MATERIALIZED VIEW daily_requests_stats_per_app WITH (timescaledb.continuous) AS -SELECT - app_id, - time_bucket('1 day' :: interval, hourly_bucket) AS daily_bucket, - SUM(hourly_request_count) :: BIGINT AS daily_request_count, - SUM(hourly_request_count * hourly_success_rate) :: FLOAT / SUM(hourly_request_count) AS daily_success_rate -FROM - hourly_requests_stats_per_app -GROUP BY - app_id, - daily_bucket WITH NO DATA; - ---- Refresh policy -SELECT - add_continuous_aggregate_policy( - 'daily_requests_stats_per_app', - start_offset => INTERVAL '3 d', - end_offset => INTERVAL '1 h', - schedule_interval => INTERVAL '12 h' - ); - ---- Real time aggregation -ALTER MATERIALIZED VIEW daily_requests_stats_per_app -set - (timescaledb.materialized_only = false); - - - ------------------ Monthly requests per app ----------------- ---- View -CREATE MATERIALIZED VIEW monthly_requests_stats_per_app WITH (timescaledb.continuous) AS -SELECT - app_id, - time_bucket('1 month' :: interval, daily_bucket) AS monthly_bucket, - SUM(daily_request_count) :: BIGINT AS monthly_request_count, - SUM(daily_request_count * daily_success_rate) :: FLOAT / SUM(daily_request_count) AS monthly_success_rate -FROM - daily_requests_stats_per_app -GROUP BY - app_id, - monthly_bucket WITH NO DATA; - ---- Refresh policy -SELECT - add_continuous_aggregate_policy( - 'monthly_requests_stats_per_app', - start_offset => INTERVAL '3 month', - end_offset => INTERVAL '1 h', - schedule_interval => INTERVAL '1 month' - ); - ---- Real time aggregation -ALTER MATERIALIZED VIEW monthly_requests_stats_per_app -set - (timescaledb.materialized_only = false); \ No newline at end of file diff --git a/database/migrations/0014_requests_stats.sql b/database/migrations/0014_requests_stats.sql new file mode 100644 index 00000000..974a33bd --- /dev/null +++ b/database/migrations/0014_requests_stats.sql @@ -0,0 +1,96 @@ +-- ----------------- Hourly requests stats per app ----------------- +-- --- View +-- CREATE MATERIALIZED VIEW hourly_requests_stats_per_app WITH (timescaledb.continuous) AS +-- SELECT +-- app_id, +-- time_bucket('1 hour' :: interval, creation_timestamp) AS hourly_bucket, +-- COUNT(*) AS hourly_request_count, +-- COUNT(*) FILTER ( +-- WHERE +-- request_status = 'Completed' +-- ) :: FLOAT / NULLIF( +-- COUNT(*) FILTER ( +-- WHERE +-- request_status IN ('Completed', 'Rejected', 'TimedOut') +-- ), +-- 0 +-- ) AS hourly_success_rate +-- FROM +-- requests +-- GROUP BY +-- app_id, +-- hourly_bucket WITH NO DATA; + +-- --- Refresh policy +-- SELECT +-- add_continuous_aggregate_policy( +-- 'hourly_requests_stats_per_app', +-- start_offset => INTERVAL '3 h', +-- end_offset => INTERVAL '1 h', +-- schedule_interval => INTERVAL '1 h' +-- ); + +-- --- Real time aggregation +-- ALTER MATERIALIZED VIEW hourly_requests_stats_per_app +-- set +-- (timescaledb.materialized_only = false); + + + +-- ----------------- Daily requests stats per app ----------------- +-- --- View +-- CREATE MATERIALIZED VIEW daily_requests_stats_per_app WITH (timescaledb.continuous) AS +-- SELECT +-- app_id, +-- time_bucket('1 day' :: interval, hourly_bucket) AS daily_bucket, +-- SUM(hourly_request_count) :: BIGINT AS daily_request_count, +-- SUM(hourly_request_count * hourly_success_rate) :: FLOAT / SUM(hourly_request_count) AS daily_success_rate +-- FROM +-- hourly_requests_stats_per_app +-- GROUP BY +-- app_id, +-- daily_bucket WITH NO DATA; + +-- --- Refresh policy +-- SELECT +-- add_continuous_aggregate_policy( +-- 'daily_requests_stats_per_app', +-- start_offset => INTERVAL '3 d', +-- end_offset => INTERVAL '1 h', +-- schedule_interval => INTERVAL '12 h' +-- ); + +-- --- Real time aggregation +-- ALTER MATERIALIZED VIEW daily_requests_stats_per_app +-- set +-- (timescaledb.materialized_only = false); + + + +-- ----------------- Monthly requests per app ----------------- +-- --- View +-- CREATE MATERIALIZED VIEW monthly_requests_stats_per_app WITH (timescaledb.continuous) AS +-- SELECT +-- app_id, +-- time_bucket('1 month' :: interval, daily_bucket) AS monthly_bucket, +-- SUM(daily_request_count) :: BIGINT AS monthly_request_count, +-- SUM(daily_request_count * daily_success_rate) :: FLOAT / SUM(daily_request_count) AS monthly_success_rate +-- FROM +-- daily_requests_stats_per_app +-- GROUP BY +-- app_id, +-- monthly_bucket WITH NO DATA; + +-- --- Refresh policy +-- SELECT +-- add_continuous_aggregate_policy( +-- 'monthly_requests_stats_per_app', +-- start_offset => INTERVAL '3 month', +-- end_offset => INTERVAL '1 h', +-- schedule_interval => INTERVAL '1 month' +-- ); + +-- --- Real time aggregation +-- ALTER MATERIALIZED VIEW monthly_requests_stats_per_app +-- set +-- (timescaledb.materialized_only = false); \ No newline at end of file diff --git a/database/migrations/0014_session_stats.sql b/database/migrations/0016_session_stats.sql similarity index 100% rename from database/migrations/0014_session_stats.sql rename to database/migrations/0016_session_stats.sql diff --git a/database/src/aggregated_views_queries/requests_stats.rs b/database/src/aggregated_views_queries/requests_stats.rs index a929f08d..5cbf2313 100644 --- a/database/src/aggregated_views_queries/requests_stats.rs +++ b/database/src/aggregated_views_queries/requests_stats.rs @@ -1,409 +1,409 @@ -use crate::{ - db::Db, - structs::{db_error::DbError, filter_requests::RequestsStats, time_filters::TimeFilter}, - tables::utils::{format_view_keys, format_view_name}, -}; - -pub const REQUESTS_STATS_BASE_VIEW_NAME: &str = "requests_stats_per_app"; -pub const REQUESTS_STATS_BASE_KEYS: [(&'static str, bool); 4] = [ - ("app_id", false), - ("bucket", true), - ("request_count", true), - ("success_rate", true), -]; - -impl Db { - pub async fn get_requests_stats_by_app_id( - &self, - app_id: &str, - filter: TimeFilter, - ) -> Result, DbError> { - let start_date = filter.to_date(); - let bucket_size = filter.bucket_size(); - - // Correctly selecting the view based on the bucket_size - let prefix = match bucket_size { - "1 hour" => "hourly", - "1 day" => "daily", - "1 month" => "monthly", - _ => return Err(DbError::DatabaseError("Invalid bucket size".to_string())), - }; - - let formatted_keys = format_view_keys(prefix, &REQUESTS_STATS_BASE_KEYS); - let formatted_view_name = format_view_name(prefix, REQUESTS_STATS_BASE_VIEW_NAME); - let filter_key = REQUESTS_STATS_BASE_KEYS[1].0; - let filter = format!("{prefix}_{filter_key}"); - - let query = format!( - "SELECT {formatted_keys} - FROM {formatted_view_name} - WHERE app_id = $1 AND {filter} >= $2 - ORDER BY {filter} DESC", - ); - - sqlx::query_as::<_, RequestsStats>(&query) - .bind(app_id) - .bind(start_date) - .fetch_all(&self.connection_pool) - .await - .map_err(|e| e.into()) - } -} - -#[cfg(feature = "cloud_db_tests")] -#[cfg(test)] -mod test { - - use super::*; - use crate::{ - structs::{ - consts::DAY_IN_SECONDS, request_status::RequestStatus, request_type::RequestType, - }, - tables::{ - registered_app::table_struct::DbRegisteredApp, requests::table_struct::Request, - sessions::table_struct::DbNcSession, utils::to_microsecond_precision, - }, - }; - use sqlx::types::chrono::{DateTime, Utc}; - use std::{sync::Arc, time::Duration}; - use tokio::task; - - #[tokio::test] - async fn test_requests_count() { - let db = super::Db::connect_to_the_pool().await; - db.truncate_all_tables().await.unwrap(); - - // Create test team instance - let team_id = "test_team_id".to_string(); - let app_id = "test_app_id".to_string(); - - db.setup_test_team(&team_id, &app_id, Utc::now()) - .await - .unwrap(); - - // Create session - let session = DbNcSession { - session_id: "test_session_id".to_string(), - app_id: app_id.to_string(), - app_metadata: "test_app_metadata".to_string(), - persistent: false, - network: "test_network".to_string(), - client_data: None, - session_open_timestamp: DateTime::from(Utc::now()), - session_close_timestamp: None, - }; - - db.handle_new_session(&session, None, &"127.0.0.1".to_string()) - .await - .unwrap(); - - let result = db.get_sessions_by_app_id(&app_id).await.unwrap(); - assert_eq!(result.len(), 1); - - let db_arc = Arc::new(db); - let mut tasks = Vec::new(); - - for i in 0..33 { - let db_clone = db_arc.clone(); // Clone the db connection or pool if needed - let app_id = app_id.clone(); - tasks.push(task::spawn(async move { - for j in 0..100 - i { - let creation_time: DateTime = Utc::now() - - Duration::from_secs(i as u64 * DAY_IN_SECONDS as u64) - - Duration::from_millis((j + 1) as u64 * 100); - - let request = Request { - request_id: format!("test_request_id_{}_{}", i, j), - app_id: app_id.to_string(), - session_id: "test_session_id".to_string(), - network: "test_network".to_string(), - creation_timestamp: creation_time, - request_status: RequestStatus::Pending, - request_type: RequestType::SignAndSendTransaction, - }; - - if let Err(e) = db_clone.save_request(&request).await { - eprintln!("Failed to save request: {}", e); - } - } - })); - } - - // Await all tasks to complete - for task in tasks { - task.await.unwrap(); - } - - // We need to refresh manually the views - db_arc - .refresh_continuous_aggregates(vec![ - "hourly_requests_stats_per_app".to_string(), - "daily_requests_stats_per_app".to_string(), - "monthly_requests_stats_per_app".to_string(), - ]) - .await - .unwrap(); - - let result = db_arc - .get_requests_stats_by_app_id(&app_id, TimeFilter::Last24Hours) - .await - .unwrap(); - - assert_eq!(result.len(), 2); - assert_eq!(result[0].request_count, 100); - assert_eq!(result[1].request_count, 99); - - let result = db_arc - .get_requests_stats_by_app_id(&app_id, TimeFilter::Last7Days) - .await - .unwrap(); - - assert_eq!(result.len(), 8); - assert_eq!(result[0].request_count, 100); - assert_eq!(result[7].request_count, 93); - - let result = db_arc - .get_requests_stats_by_app_id(&app_id, TimeFilter::Last30Days) - .await - .unwrap(); - - assert_eq!(result.len(), 31); - assert_eq!(result[0].request_count, 100); - assert_eq!(result[30].request_count, 70); - } - - #[tokio::test] - async fn test_requests_success_rate() { - let db = super::Db::connect_to_the_pool().await; - db.truncate_all_tables().await.unwrap(); - - // Create test team instance - let team_id = "test_team_id".to_string(); - let app_id = "test_app_id".to_string(); - - db.setup_test_team(&team_id, &app_id, Utc::now()) - .await - .unwrap(); - - // Create session - let session = DbNcSession { - session_id: "test_session_id".to_string(), - app_id: "test_app_id".to_string(), - app_metadata: "test_app_metadata".to_string(), - persistent: false, - network: "test_network".to_string(), - client_data: None, - session_open_timestamp: to_microsecond_precision(&Utc::now()), - session_close_timestamp: None, - }; - - db.handle_new_session(&session, None, &"127.0.0.1".to_string()) - .await - .unwrap(); - - let result = db.get_sessions_by_app_id(&app_id).await.unwrap(); - assert_eq!(result.len(), 1); - // assert_eq!(session, result[0]); - - let db_arc = Arc::new(db); - let mut tasks = Vec::new(); - - for i in 0..33 { - let db_clone = db_arc.clone(); // Clone the db connection or pool if needed - let app_id = app_id.clone(); - tasks.push(task::spawn(async move { - for j in 0..100 - i { - let creation_time: DateTime = Utc::now() - - Duration::from_secs(i as u64 * DAY_IN_SECONDS as u64) - - Duration::from_millis((j + 1) as u64 * 100); - - let status = if j % 3 == 0 { - RequestStatus::Completed - } else if j % 3 == 1 { - RequestStatus::Completed - } else { - RequestStatus::Rejected - }; - - let request = Request { - request_id: format!("test_request_id_{}_{}", i, j), - app_id: app_id.to_string(), - session_id: "test_session_id".to_string(), - network: "test_network".to_string(), - creation_timestamp: to_microsecond_precision(&creation_time), - request_status: status, - request_type: RequestType::SignAndSendTransaction, - }; - if let Err(e) = db_clone.save_request(&request).await { - eprintln!("Failed to save request: {}", e); - } - } - })); - } - - // Await all tasks to complete - for task in tasks { - task.await.unwrap(); - } - - // We need to refresh manually the views - db_arc - .refresh_continuous_aggregates(vec![ - "hourly_requests_stats_per_app".to_string(), - "daily_requests_stats_per_app".to_string(), - "monthly_requests_stats_per_app".to_string(), - ]) - .await - .unwrap(); - - // Check the success rate on every time filter - let result = db_arc - .get_requests_stats_by_app_id(&app_id, TimeFilter::Last24Hours) - .await - .unwrap(); - - assert_eq!(result.len(), 2); - assert_eq!( - (result[0].success_rate.unwrap() * 100.0).ceil() / 100.0, - 0.67 as f64 - ); - assert_eq!( - (result[1].success_rate.unwrap() * 100.0).ceil() / 100.0, - 0.67 as f64 - ); - - let result = db_arc - .get_requests_stats_by_app_id(&app_id, TimeFilter::Last7Days) - .await - .unwrap(); - - assert_eq!(result.len(), 8); - assert_eq!( - (result[0].success_rate.unwrap() * 100.0).ceil() / 100.0, - 0.67 as f64 - ); - assert_eq!( - (result[7].success_rate.unwrap() * 100.0).ceil() / 100.0, - 0.67 as f64 - ); - - let result = db_arc - .get_requests_stats_by_app_id(&app_id, TimeFilter::Last30Days) - .await - .unwrap(); - - assert_eq!(result.len(), 31); - assert_eq!( - (result[0].success_rate.unwrap() * 100.0).ceil() / 100.0, - 0.67 as f64 - ); - assert_eq!( - (result[30].success_rate.unwrap() * 100.0).ceil() / 100.0, - 0.68 as f64 - ); - - // Test missing success due to all requests having pending status - // Add new app to have a "clean" state - let second_app_id = "test_app_id2".to_string(); - let app = DbRegisteredApp { - team_id: team_id.clone(), - app_id: second_app_id.to_string(), - app_name: "test_app_name".to_string(), - whitelisted_domains: vec!["test_domain".to_string()], - ack_public_keys: vec!["test_key".to_string()], - registration_timestamp: to_microsecond_precision(&Utc::now()), - }; - db_arc.register_new_app(&app).await.unwrap(); - - let result = db_arc - .get_registered_app_by_app_id(&second_app_id) - .await - .unwrap(); - assert_eq!(app, result.unwrap()); - - // Create session - let session = DbNcSession { - session_id: "test_session_id".to_string(), - app_id: second_app_id.to_string(), - app_metadata: "test_app_metadata".to_string(), - persistent: false, - network: "test_network".to_string(), - client_data: None, - session_open_timestamp: to_microsecond_precision(&Utc::now()), - session_close_timestamp: None, - }; - - db_arc - .handle_new_session(&session, None, &"127.0.0.1".to_string()) - .await - .unwrap(); - - let mut tasks = Vec::new(); - for i in 0..10 { - let db_clone = db_arc.clone(); // Clone the db connection or pool if needed - let app_id = second_app_id.clone(); - tasks.push(task::spawn(async move { - for j in 0..11 - i { - let creation_time: DateTime = Utc::now() - - Duration::from_secs(i as u64 * DAY_IN_SECONDS as u64) - - Duration::from_millis((j + 1) as u64 * 100); - - let request = Request { - request_id: format!("test_request_id_{}_{}", i, j), - app_id: app_id.to_string(), - session_id: "test_session_id".to_string(), - network: "test_network".to_string(), - creation_timestamp: to_microsecond_precision(&creation_time), - request_status: RequestStatus::Pending, - request_type: RequestType::SignAndSendTransaction, - }; - if let Err(e) = db_clone.save_request(&request).await { - eprintln!("Failed to save request: {}", e); - } - } - })); - } - - // Await all tasks to complete - for task in tasks { - task.await.unwrap(); - } - - // We need to refresh manually the views - db_arc - .refresh_continuous_aggregates(vec![ - "hourly_requests_stats_per_app".to_string(), - "daily_requests_stats_per_app".to_string(), - "monthly_requests_stats_per_app".to_string(), - ]) - .await - .unwrap(); - - let result = db_arc - .get_requests_stats_by_app_id(&second_app_id, TimeFilter::Last24Hours) - .await - .unwrap(); - - assert_eq!(result.len(), 2); - assert!(result[0].success_rate.is_none()); - assert!(result[1].success_rate.is_none()); - - let result = db_arc - .get_requests_stats_by_app_id(&second_app_id, TimeFilter::Last7Days) - .await - .unwrap(); - - assert_eq!(result.len(), 8); - assert!(result[0].success_rate.is_none()); - assert!(result[7].success_rate.is_none()); - - let result = db_arc - .get_requests_stats_by_app_id(&second_app_id, TimeFilter::Last30Days) - .await - .unwrap(); - - assert_eq!(result.len(), 10); - assert!(result[0].success_rate.is_none()); - assert!(result[9].success_rate.is_none()); - } -} +// use crate::{ +// db::Db, +// structs::{db_error::DbError, filter_requests::RequestsStats, time_filters::TimeFilter}, +// tables::utils::{format_view_keys, format_view_name}, +// }; + +// pub const REQUESTS_STATS_BASE_VIEW_NAME: &str = "requests_stats_per_app"; +// pub const REQUESTS_STATS_BASE_KEYS: [(&'static str, bool); 4] = [ +// ("app_id", false), +// ("bucket", true), +// ("request_count", true), +// ("success_rate", true), +// ]; + +// impl Db { +// pub async fn get_requests_stats_by_app_id( +// &self, +// app_id: &str, +// filter: TimeFilter, +// ) -> Result, DbError> { +// let start_date = filter.to_date(); +// let bucket_size = filter.bucket_size(); + +// // Correctly selecting the view based on the bucket_size +// let prefix = match bucket_size { +// "1 hour" => "hourly", +// "1 day" => "daily", +// "1 month" => "monthly", +// _ => return Err(DbError::DatabaseError("Invalid bucket size".to_string())), +// }; + +// let formatted_keys = format_view_keys(prefix, &REQUESTS_STATS_BASE_KEYS); +// let formatted_view_name = format_view_name(prefix, REQUESTS_STATS_BASE_VIEW_NAME); +// let filter_key = REQUESTS_STATS_BASE_KEYS[1].0; +// let filter = format!("{prefix}_{filter_key}"); + +// let query = format!( +// "SELECT {formatted_keys} +// FROM {formatted_view_name} +// WHERE app_id = $1 AND {filter} >= $2 +// ORDER BY {filter} DESC", +// ); + +// sqlx::query_as::<_, RequestsStats>(&query) +// .bind(app_id) +// .bind(start_date) +// .fetch_all(&self.connection_pool) +// .await +// .map_err(|e| e.into()) +// } +// } + +// #[cfg(feature = "cloud_db_tests")] +// #[cfg(test)] +// mod test { + +// use super::*; +// use crate::{ +// structs::{ +// consts::DAY_IN_SECONDS, request_status::RequestStatus, request_type::RequestType, +// }, +// tables::{ +// registered_app::table_struct::DbRegisteredApp, requests::table_struct::Request, +// sessions::table_struct::DbNcSession, utils::to_microsecond_precision, +// }, +// }; +// use sqlx::types::chrono::{DateTime, Utc}; +// use std::{sync::Arc, time::Duration}; +// use tokio::task; + +// #[tokio::test] +// async fn test_requests_count() { +// let db = super::Db::connect_to_the_pool().await; +// db.truncate_all_tables().await.unwrap(); + +// // Create test team instance +// let team_id = "test_team_id".to_string(); +// let app_id = "test_app_id".to_string(); + +// db.setup_test_team(&team_id, &app_id, Utc::now()) +// .await +// .unwrap(); + +// // Create session +// let session = DbNcSession { +// session_id: "test_session_id".to_string(), +// app_id: app_id.to_string(), +// app_metadata: "test_app_metadata".to_string(), +// persistent: false, +// network: "test_network".to_string(), +// client_data: None, +// session_open_timestamp: DateTime::from(Utc::now()), +// session_close_timestamp: None, +// }; + +// db.handle_new_session(&session, None, &"127.0.0.1".to_string()) +// .await +// .unwrap(); + +// let result = db.get_sessions_by_app_id(&app_id).await.unwrap(); +// assert_eq!(result.len(), 1); + +// let db_arc = Arc::new(db); +// let mut tasks = Vec::new(); + +// for i in 0..33 { +// let db_clone = db_arc.clone(); // Clone the db connection or pool if needed +// let app_id = app_id.clone(); +// tasks.push(task::spawn(async move { +// for j in 0..100 - i { +// let creation_time: DateTime = Utc::now() +// - Duration::from_secs(i as u64 * DAY_IN_SECONDS as u64) +// - Duration::from_millis((j + 1) as u64 * 100); + +// let request = Request { +// request_id: format!("test_request_id_{}_{}", i, j), +// app_id: app_id.to_string(), +// session_id: "test_session_id".to_string(), +// network: "test_network".to_string(), +// creation_timestamp: creation_time, +// request_status: RequestStatus::Pending, +// request_type: RequestType::SignAndSendTransaction, +// }; + +// if let Err(e) = db_clone.save_request(&request).await { +// eprintln!("Failed to save request: {}", e); +// } +// } +// })); +// } + +// // Await all tasks to complete +// for task in tasks { +// task.await.unwrap(); +// } + +// // We need to refresh manually the views +// db_arc +// .refresh_continuous_aggregates(vec![ +// "hourly_requests_stats_per_app".to_string(), +// "daily_requests_stats_per_app".to_string(), +// "monthly_requests_stats_per_app".to_string(), +// ]) +// .await +// .unwrap(); + +// let result = db_arc +// .get_requests_stats_by_app_id(&app_id, TimeFilter::Last24Hours) +// .await +// .unwrap(); + +// assert_eq!(result.len(), 2); +// assert_eq!(result[0].request_count, 100); +// assert_eq!(result[1].request_count, 99); + +// let result = db_arc +// .get_requests_stats_by_app_id(&app_id, TimeFilter::Last7Days) +// .await +// .unwrap(); + +// assert_eq!(result.len(), 8); +// assert_eq!(result[0].request_count, 100); +// assert_eq!(result[7].request_count, 93); + +// let result = db_arc +// .get_requests_stats_by_app_id(&app_id, TimeFilter::Last30Days) +// .await +// .unwrap(); + +// assert_eq!(result.len(), 31); +// assert_eq!(result[0].request_count, 100); +// assert_eq!(result[30].request_count, 70); +// } + +// #[tokio::test] +// async fn test_requests_success_rate() { +// let db = super::Db::connect_to_the_pool().await; +// db.truncate_all_tables().await.unwrap(); + +// // Create test team instance +// let team_id = "test_team_id".to_string(); +// let app_id = "test_app_id".to_string(); + +// db.setup_test_team(&team_id, &app_id, Utc::now()) +// .await +// .unwrap(); + +// // Create session +// let session = DbNcSession { +// session_id: "test_session_id".to_string(), +// app_id: "test_app_id".to_string(), +// app_metadata: "test_app_metadata".to_string(), +// persistent: false, +// network: "test_network".to_string(), +// client_data: None, +// session_open_timestamp: to_microsecond_precision(&Utc::now()), +// session_close_timestamp: None, +// }; + +// db.handle_new_session(&session, None, &"127.0.0.1".to_string()) +// .await +// .unwrap(); + +// let result = db.get_sessions_by_app_id(&app_id).await.unwrap(); +// assert_eq!(result.len(), 1); +// // assert_eq!(session, result[0]); + +// let db_arc = Arc::new(db); +// let mut tasks = Vec::new(); + +// for i in 0..33 { +// let db_clone = db_arc.clone(); // Clone the db connection or pool if needed +// let app_id = app_id.clone(); +// tasks.push(task::spawn(async move { +// for j in 0..100 - i { +// let creation_time: DateTime = Utc::now() +// - Duration::from_secs(i as u64 * DAY_IN_SECONDS as u64) +// - Duration::from_millis((j + 1) as u64 * 100); + +// let status = if j % 3 == 0 { +// RequestStatus::Completed +// } else if j % 3 == 1 { +// RequestStatus::Completed +// } else { +// RequestStatus::Rejected +// }; + +// let request = Request { +// request_id: format!("test_request_id_{}_{}", i, j), +// app_id: app_id.to_string(), +// session_id: "test_session_id".to_string(), +// network: "test_network".to_string(), +// creation_timestamp: to_microsecond_precision(&creation_time), +// request_status: status, +// request_type: RequestType::SignAndSendTransaction, +// }; +// if let Err(e) = db_clone.save_request(&request).await { +// eprintln!("Failed to save request: {}", e); +// } +// } +// })); +// } + +// // Await all tasks to complete +// for task in tasks { +// task.await.unwrap(); +// } + +// // We need to refresh manually the views +// db_arc +// .refresh_continuous_aggregates(vec![ +// "hourly_requests_stats_per_app".to_string(), +// "daily_requests_stats_per_app".to_string(), +// "monthly_requests_stats_per_app".to_string(), +// ]) +// .await +// .unwrap(); + +// // Check the success rate on every time filter +// let result = db_arc +// .get_requests_stats_by_app_id(&app_id, TimeFilter::Last24Hours) +// .await +// .unwrap(); + +// assert_eq!(result.len(), 2); +// assert_eq!( +// (result[0].success_rate.unwrap() * 100.0).ceil() / 100.0, +// 0.67 as f64 +// ); +// assert_eq!( +// (result[1].success_rate.unwrap() * 100.0).ceil() / 100.0, +// 0.67 as f64 +// ); + +// let result = db_arc +// .get_requests_stats_by_app_id(&app_id, TimeFilter::Last7Days) +// .await +// .unwrap(); + +// assert_eq!(result.len(), 8); +// assert_eq!( +// (result[0].success_rate.unwrap() * 100.0).ceil() / 100.0, +// 0.67 as f64 +// ); +// assert_eq!( +// (result[7].success_rate.unwrap() * 100.0).ceil() / 100.0, +// 0.67 as f64 +// ); + +// let result = db_arc +// .get_requests_stats_by_app_id(&app_id, TimeFilter::Last30Days) +// .await +// .unwrap(); + +// assert_eq!(result.len(), 31); +// assert_eq!( +// (result[0].success_rate.unwrap() * 100.0).ceil() / 100.0, +// 0.67 as f64 +// ); +// assert_eq!( +// (result[30].success_rate.unwrap() * 100.0).ceil() / 100.0, +// 0.68 as f64 +// ); + +// // Test missing success due to all requests having pending status +// // Add new app to have a "clean" state +// let second_app_id = "test_app_id2".to_string(); +// let app = DbRegisteredApp { +// team_id: team_id.clone(), +// app_id: second_app_id.to_string(), +// app_name: "test_app_name".to_string(), +// whitelisted_domains: vec!["test_domain".to_string()], +// ack_public_keys: vec!["test_key".to_string()], +// registration_timestamp: to_microsecond_precision(&Utc::now()), +// }; +// db_arc.register_new_app(&app).await.unwrap(); + +// let result = db_arc +// .get_registered_app_by_app_id(&second_app_id) +// .await +// .unwrap(); +// assert_eq!(app, result.unwrap()); + +// // Create session +// let session = DbNcSession { +// session_id: "test_session_id".to_string(), +// app_id: second_app_id.to_string(), +// app_metadata: "test_app_metadata".to_string(), +// persistent: false, +// network: "test_network".to_string(), +// client_data: None, +// session_open_timestamp: to_microsecond_precision(&Utc::now()), +// session_close_timestamp: None, +// }; + +// db_arc +// .handle_new_session(&session, None, &"127.0.0.1".to_string()) +// .await +// .unwrap(); + +// let mut tasks = Vec::new(); +// for i in 0..10 { +// let db_clone = db_arc.clone(); // Clone the db connection or pool if needed +// let app_id = second_app_id.clone(); +// tasks.push(task::spawn(async move { +// for j in 0..11 - i { +// let creation_time: DateTime = Utc::now() +// - Duration::from_secs(i as u64 * DAY_IN_SECONDS as u64) +// - Duration::from_millis((j + 1) as u64 * 100); + +// let request = Request { +// request_id: format!("test_request_id_{}_{}", i, j), +// app_id: app_id.to_string(), +// session_id: "test_session_id".to_string(), +// network: "test_network".to_string(), +// creation_timestamp: to_microsecond_precision(&creation_time), +// request_status: RequestStatus::Pending, +// request_type: RequestType::SignAndSendTransaction, +// }; +// if let Err(e) = db_clone.save_request(&request).await { +// eprintln!("Failed to save request: {}", e); +// } +// } +// })); +// } + +// // Await all tasks to complete +// for task in tasks { +// task.await.unwrap(); +// } + +// // We need to refresh manually the views +// db_arc +// .refresh_continuous_aggregates(vec![ +// "hourly_requests_stats_per_app".to_string(), +// "daily_requests_stats_per_app".to_string(), +// "monthly_requests_stats_per_app".to_string(), +// ]) +// .await +// .unwrap(); + +// let result = db_arc +// .get_requests_stats_by_app_id(&second_app_id, TimeFilter::Last24Hours) +// .await +// .unwrap(); + +// assert_eq!(result.len(), 2); +// assert!(result[0].success_rate.is_none()); +// assert!(result[1].success_rate.is_none()); + +// let result = db_arc +// .get_requests_stats_by_app_id(&second_app_id, TimeFilter::Last7Days) +// .await +// .unwrap(); + +// assert_eq!(result.len(), 8); +// assert!(result[0].success_rate.is_none()); +// assert!(result[7].success_rate.is_none()); + +// let result = db_arc +// .get_requests_stats_by_app_id(&second_app_id, TimeFilter::Last30Days) +// .await +// .unwrap(); + +// assert_eq!(result.len(), 10); +// assert!(result[0].success_rate.is_none()); +// assert!(result[9].success_rate.is_none()); +// } +// } diff --git a/database/src/structs/event_type.rs b/database/src/structs/event_type.rs new file mode 100644 index 00000000..eeb341b0 --- /dev/null +++ b/database/src/structs/event_type.rs @@ -0,0 +1,18 @@ +use serde::{Deserialize, Serialize}; +use sqlx::Type; +use ts_rs::TS; + +#[derive(Clone, Debug, Eq, PartialEq, Type, Serialize, Deserialize, TS)] +#[ts(export)] +#[sqlx(type_name = "event_type_enum")] +pub enum EventType { + AppConnect, + AppDisconnect, + ClientConnect, + ClientDisconnect, + SignMessage, + SignTransaction, + SignAndSendTransaction, + ChangeWallet, + ChangeNetwork, +} diff --git a/database/src/structs/mod.rs b/database/src/structs/mod.rs index 43e6a05c..0923cb0f 100644 --- a/database/src/structs/mod.rs +++ b/database/src/structs/mod.rs @@ -2,6 +2,7 @@ pub mod client_data; pub mod consts; pub mod db_error; pub mod entity_type; +pub mod event_type; pub mod filter_requests; pub mod geo_location; pub mod privilege_level; diff --git a/database/src/tables/events/app_connect/mod.rs b/database/src/tables/events/app_connect/mod.rs new file mode 100644 index 00000000..6027081c --- /dev/null +++ b/database/src/tables/events/app_connect/mod.rs @@ -0,0 +1,2 @@ +pub mod table_struct; +pub mod update; diff --git a/database/src/tables/events/app_connect/table_struct.rs b/database/src/tables/events/app_connect/table_struct.rs new file mode 100644 index 00000000..346b7b8c --- /dev/null +++ b/database/src/tables/events/app_connect/table_struct.rs @@ -0,0 +1,28 @@ +use sqlx::{postgres::PgRow, FromRow, Row}; + +pub const EVENT_APP_CONNECT_TABLE_NAME: &str = "event_app_connect"; +pub const EVENT_APP_CONNECT_KEYS: &str = + "event_id, session_id, device_metadata, lang, timezone, new_session"; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct AppConnectEvent { + pub event_id: String, + pub app_id: String, + pub session_id: String, + pub device_metadata: String, + pub lang: String, + pub timezone: String, +} + +impl FromRow<'_, PgRow> for AppConnectEvent { + fn from_row(row: &sqlx::postgres::PgRow) -> std::result::Result { + Ok(AppConnectEvent { + event_id: row.get("event_id"), + app_id: row.get("app_id"), + session_id: row.get("session_id"), + device_metadata: row.get("device_metadata"), + lang: row.get("lang"), + timezone: row.get("timezone"), + }) + } +} diff --git a/database/src/tables/events/app_connect/update.rs b/database/src/tables/events/app_connect/update.rs new file mode 100644 index 00000000..036ffe94 --- /dev/null +++ b/database/src/tables/events/app_connect/update.rs @@ -0,0 +1,35 @@ +use crate::tables::events::app_connect::table_struct::EVENT_APP_CONNECT_KEYS; +use crate::tables::events::app_connect::table_struct::EVENT_APP_CONNECT_TABLE_NAME; +use crate::{db::Db, structs::db_error::DbError}; +use sqlx::Transaction; +use sqlx::{query, Postgres}; + +impl Db { + pub async fn create_new_event_app_connect( + &self, + tx: &mut Transaction<'_, Postgres>, + session_id: &String, + device_metadata: &String, + lang: &String, + timezone: &String, + new_session: bool, + ) -> Result<(), DbError> { + let query_body = format!( + "INSERT INTO {EVENT_APP_CONNECT_TABLE_NAME} ({EVENT_APP_CONNECT_KEYS}) VALUES (DEFAULT, $1, $2, $3, $4, $5)" + ); + + let query_result = query(&query_body) + .bind(session_id) + .bind(device_metadata) + .bind(lang) + .bind(timezone) + .bind(new_session) + .execute(&mut **tx) + .await; + + match query_result { + Ok(_) => Ok(()), + Err(e) => Err(e).map_err(|e| e.into()), + } + } +} diff --git a/database/src/tables/events/app_disconnect/mod.rs b/database/src/tables/events/app_disconnect/mod.rs new file mode 100644 index 00000000..6027081c --- /dev/null +++ b/database/src/tables/events/app_disconnect/mod.rs @@ -0,0 +1,2 @@ +pub mod table_struct; +pub mod update; diff --git a/database/src/tables/events/app_disconnect/table_struct.rs b/database/src/tables/events/app_disconnect/table_struct.rs new file mode 100644 index 00000000..8efac398 --- /dev/null +++ b/database/src/tables/events/app_disconnect/table_struct.rs @@ -0,0 +1,20 @@ +use sqlx::{postgres::PgRow, FromRow, Row}; + +pub const EVENT_APP_DISCONNECT_TABLE_NAME: &str = "event_app_disconnect"; +pub const EVENT_APP_DISCONNECT_KEYS: &str = + "event_id, session_id, device_metadata, lang, timezone, new_session"; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct AppDisconnectEvent { + pub event_id: String, + pub session_id: String, +} + +impl FromRow<'_, PgRow> for AppDisconnectEvent { + fn from_row(row: &sqlx::postgres::PgRow) -> std::result::Result { + Ok(AppDisconnectEvent { + event_id: row.get("event_id"), + session_id: row.get("session_id"), + }) + } +} diff --git a/database/src/tables/events/app_disconnect/update.rs b/database/src/tables/events/app_disconnect/update.rs new file mode 100644 index 00000000..de33ccbd --- /dev/null +++ b/database/src/tables/events/app_disconnect/update.rs @@ -0,0 +1,25 @@ +use crate::tables::events::app_disconnect::table_struct::{ + EVENT_APP_DISCONNECT_KEYS, EVENT_APP_DISCONNECT_TABLE_NAME, +}; +use crate::{db::Db, structs::db_error::DbError}; +use sqlx::Transaction; +use sqlx::{query, Postgres}; + +impl Db { + pub async fn create_new_event_app_disconnect( + &self, + tx: &mut Transaction<'_, Postgres>, + session_id: &String, + ) -> Result<(), DbError> { + let query_body = format!( + "INSERT INTO {EVENT_APP_DISCONNECT_TABLE_NAME} ({EVENT_APP_DISCONNECT_KEYS}) VALUES (DEFAULT, $1)" + ); + + let query_result = query(&query_body).bind(session_id).execute(&mut **tx).await; + + match query_result { + Ok(_) => Ok(()), + Err(e) => Err(e).map_err(|e| e.into()), + } + } +} diff --git a/database/src/tables/events/events_index/mod.rs b/database/src/tables/events/events_index/mod.rs new file mode 100644 index 00000000..6027081c --- /dev/null +++ b/database/src/tables/events/events_index/mod.rs @@ -0,0 +1,2 @@ +pub mod table_struct; +pub mod update; diff --git a/database/src/tables/events/events_index/table_struct.rs b/database/src/tables/events/events_index/table_struct.rs new file mode 100644 index 00000000..aafcec97 --- /dev/null +++ b/database/src/tables/events/events_index/table_struct.rs @@ -0,0 +1,26 @@ +use crate::structs::event_type::EventType; +use chrono::{DateTime, Utc}; +use sqlx::types::chrono; +use sqlx::{postgres::PgRow, FromRow, Row}; + +pub const EVENTS_TABLE_NAME: &str = "events"; +pub const EVENTS_KEYS: &str = "event_id, app_id, event_type, creation_timestamp"; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Event { + pub event_id: String, + pub app_id: String, + pub event_type: EventType, + pub creation_timestamp: DateTime, +} + +impl FromRow<'_, PgRow> for Event { + fn from_row(row: &sqlx::postgres::PgRow) -> std::result::Result { + Ok(Event { + event_id: row.get("event_id"), + app_id: row.get("app_id"), + event_type: row.get("event_type"), + creation_timestamp: row.get("creation_timestamp"), + }) + } +} diff --git a/database/src/tables/events/events_index/update.rs b/database/src/tables/events/events_index/update.rs new file mode 100644 index 00000000..9ed47a7e --- /dev/null +++ b/database/src/tables/events/events_index/update.rs @@ -0,0 +1,31 @@ +use crate::{ + db::Db, + structs::{db_error::DbError, event_type::EventType}, + tables::events::events_index::table_struct::{EVENTS_KEYS, EVENTS_TABLE_NAME}, +}; +use sqlx::Transaction; +use sqlx::{query, Postgres}; + +impl Db { + pub async fn create_new_event_entry( + &self, + tx: &mut Transaction<'_, Postgres>, + app_id: &String, + event_type: &EventType, + ) -> Result<(), DbError> { + let query_body = format!( + "INSERT INTO {EVENTS_TABLE_NAME} ({EVENTS_KEYS}) VALUES (DEFAULT, $1, $2, DEFAULT)" + ); + + let query_result = query(&query_body) + .bind(app_id) + .bind(event_type) + .execute(&mut **tx) + .await; + + match query_result { + Ok(_) => Ok(()), + Err(e) => Err(e).map_err(|e| e.into()), + } + } +} diff --git a/database/src/tables/events/mod.rs b/database/src/tables/events/mod.rs new file mode 100644 index 00000000..dd01467d --- /dev/null +++ b/database/src/tables/events/mod.rs @@ -0,0 +1,3 @@ +pub mod app_connect; +pub mod app_disconnect; +pub mod events_index; diff --git a/database/src/tables/mod.rs b/database/src/tables/mod.rs index 35555585..ea8e84f6 100644 --- a/database/src/tables/mod.rs +++ b/database/src/tables/mod.rs @@ -1,10 +1,11 @@ pub mod client_profiles; pub mod connection_events; +pub mod events; pub mod grafana_users; pub mod ip_addresses; pub mod public_keys; pub mod registered_app; -pub mod requests; +// pub mod requests; pub mod session_public_keys; pub mod sessions; pub mod team; diff --git a/database/src/tables/registered_app/select.rs b/database/src/tables/registered_app/select.rs index 40c8b4c1..6735b1f0 100644 --- a/database/src/tables/registered_app/select.rs +++ b/database/src/tables/registered_app/select.rs @@ -1,7 +1,5 @@ use super::table_struct::{DbRegisteredApp, REGISTERED_APPS_TABLE_NAME}; -use crate::structs::db_error::DbError; -use crate::tables::requests::table_struct::REQUESTS_TABLE_NAME; -use crate::{db::Db, tables::requests::table_struct::Request}; +use crate::{db::Db, structs::db_error::DbError}; use sqlx::query_as; impl Db { @@ -19,22 +17,6 @@ impl Db { .map_err(|e| e.into()); } - pub async fn get_requests_by_app_id(&self, app_id: &String) -> Result, DbError> { - let query = format!( - "SELECT r.* FROM {REQUESTS_TABLE_NAME} r - INNER JOIN sessions s ON r.session_id = s.session_id - WHERE s.app_id = $1 - ORDER BY r.creation_timestamp DESC" - ); - let typed_query = query_as::<_, Request>(&query); - - return typed_query - .bind(&app_id) - .fetch_all(&self.connection_pool) - .await - .map_err(|e| e.into()); - } - pub async fn get_registered_app_by_app_name_and_team_id( &self, app_name: &String, diff --git a/database/src/tables/sessions/select.rs b/database/src/tables/sessions/select.rs index 7401e87f..b3f7257d 100644 --- a/database/src/tables/sessions/select.rs +++ b/database/src/tables/sessions/select.rs @@ -1,7 +1,6 @@ use super::table_struct::{DbNcSession, SESSIONS_TABLE_NAME}; use crate::db::Db; use crate::structs::db_error::DbError; -use crate::tables::requests::table_struct::{Request, REQUESTS_TABLE_NAME}; use sqlx::query_as; impl Db { @@ -32,15 +31,4 @@ impl Db { .await .map_err(|e| e.into()); } - - pub async fn get_session_requests(&self, session_id: &String) -> Result, DbError> { - let query = format!("SELECT * FROM {REQUESTS_TABLE_NAME} WHERE session_id = $1"); - let typed_query = query_as::<_, Request>(&query); - - return typed_query - .bind(&session_id) - .fetch_all(&self.connection_pool) - .await - .map_err(|e| e.into()); - } } diff --git a/database/src/tables/sessions/update.rs b/database/src/tables/sessions/update.rs index d351fd64..1d7aac3c 100644 --- a/database/src/tables/sessions/update.rs +++ b/database/src/tables/sessions/update.rs @@ -240,7 +240,7 @@ mod tests { use super::*; use crate::{ structs::{request_status::RequestStatus, request_type::RequestType}, - tables::{requests::table_struct::Request, utils::get_date_time}, + tables::utils::get_date_time, }; #[tokio::test] @@ -298,38 +298,38 @@ mod tests { .unwrap(); assert_eq!(session.session_close_timestamp, get_date_time(15)); - // Create a few requests for the session - let request = Request { - request_id: "test_request_id".to_string(), - request_type: RequestType::SignAndSendTransaction, - app_id: "test_app_id".to_string(), - session_id: "test_session_id".to_string(), - request_status: RequestStatus::Pending, - network: "test_network".to_string(), - creation_timestamp: get_date_time(12).unwrap(), - }; - - let second_request = Request { - request_id: "test_request_id2".to_string(), - request_type: RequestType::SignAndSendTransaction, - session_id: "test_session_id".to_string(), - app_id: "test_app_id".to_string(), - request_status: RequestStatus::Pending, - network: "test_network".to_string(), - creation_timestamp: get_date_time(13).unwrap(), - }; - - db.save_request(&request).await.unwrap(); - db.save_request(&second_request).await.unwrap(); - - // Get all requests by session_id - let requests = db - .get_requests_by_session_id(&request.session_id) - .await - .unwrap(); - - assert_eq!(requests.len(), 2); - assert_eq!(request, requests[1]); - assert_eq!(second_request, requests[0]); + // // Create a few requests for the session + // let request = Request { + // request_id: "test_request_id".to_string(), + // request_type: RequestType::SignAndSendTransaction, + // app_id: "test_app_id".to_string(), + // session_id: "test_session_id".to_string(), + // request_status: RequestStatus::Pending, + // network: "test_network".to_string(), + // creation_timestamp: get_date_time(12).unwrap(), + // }; + + // let second_request = Request { + // request_id: "test_request_id2".to_string(), + // request_type: RequestType::SignAndSendTransaction, + // session_id: "test_session_id".to_string(), + // app_id: "test_app_id".to_string(), + // request_status: RequestStatus::Pending, + // network: "test_network".to_string(), + // creation_timestamp: get_date_time(13).unwrap(), + // }; + + // db.save_request(&request).await.unwrap(); + // db.save_request(&second_request).await.unwrap(); + + // // Get all requests by session_id + // let requests = db + // .get_requests_by_session_id(&request.session_id) + // .await + // .unwrap(); + + // assert_eq!(requests.len(), 2); + // assert_eq!(request, requests[1]); + // assert_eq!(second_request, requests[0]); } } diff --git a/sdk/pnpm-lock.yaml b/sdk/pnpm-lock.yaml index ad81d383..4ba19f08 100644 --- a/sdk/pnpm-lock.yaml +++ b/sdk/pnpm-lock.yaml @@ -139,7 +139,7 @@ importers: version: 0.8.0 '@nightlylabs/nightly-connect-polkadot': specifier: 0.0.15 - version: link:../../packages/polkadot + version: 0.0.15 '@nightlylabs/nightly-connect-solana': specifier: 0.0.29 version: link:../../packages/solana @@ -154,7 +154,7 @@ importers: version: link:../../packages/selector-base '@nightlylabs/wallet-selector-polkadot': specifier: 0.2.3 - version: link:../../packages/selector-polkadot + version: 0.2.3(@polkadot/util@12.5.1) '@nightlylabs/wallet-selector-solana': specifier: 0.3.0 version: link:../../packages/selector-solana @@ -617,7 +617,7 @@ importers: packages/selector-polkadot: dependencies: '@nightlylabs/nightly-connect-polkadot': - specifier: ^0.0.15 + specifier: ^0.0.16 version: link:../polkadot '@nightlylabs/wallet-selector-base': specifier: ^0.4.0 @@ -5261,6 +5261,24 @@ packages: - utf-8-validate dev: false + /@nightlylabs/nightly-connect-polkadot@0.0.15: + resolution: {integrity: sha512-WCsumvHwhPipbxPQoswKCwHykwJ48Dffwb9hCf7zjCgEysIBCnA6Dzj/2G80drLqYYpS285nMa8z+3NaXVu2dA==} + dependencies: + '@nightlylabs/nightly-connect-base': 0.0.27 + '@polkadot/api': 10.10.1 + '@polkadot/extension-inject': 0.46.5(@polkadot/api@10.10.1)(@polkadot/util@12.5.1) + '@polkadot/types': 10.10.1 + '@polkadot/util': 12.5.1 + '@polkadot/util-crypto': 12.5.1(@polkadot/util@12.5.1) + eventemitter3: 5.0.1 + uuid: 9.0.0 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: false + /@nightlylabs/nightly-connect-solana@0.0.28: resolution: {integrity: sha512-8PBkmuXzWZNPqu6SGT2tsGK4DgD3yswQsUVb3L+GgFGCdQI7eUqyHd2ofWFWzEgj4a1XuixA29ZcSyw20ajgzw==} dependencies: @@ -5333,6 +5351,21 @@ packages: - utf-8-validate dev: false + /@nightlylabs/wallet-selector-base@0.4.0: + resolution: {integrity: sha512-NCreMsiNzd5Rvu76cNTwtrCrB5iCBV+NtPZbhyUWfagbxROS1AJsZuIt+Af+AVjHFDS/7MQS37SJi1EX6XCp8Q==} + dependencies: + '@nightlylabs/nightly-connect-base': 0.0.27 + '@nightlylabs/wallet-selector-modal': 0.2.1 + '@wallet-standard/core': 1.0.3 + isomorphic-localstorage: 1.0.2 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - ts-node + - utf-8-validate + dev: false + /@nightlylabs/wallet-selector-modal@0.1.2: resolution: {integrity: sha512-vxy9S2dEf3NARW6LDq2ZKpWMlk5JJFIuwUfSxkuJlgUg2OVSlnDS7vdho3h4DmluRU5GM9vVhaXUGHAVp5sDQg==} dependencies: @@ -5347,6 +5380,38 @@ packages: - ts-node dev: false + /@nightlylabs/wallet-selector-modal@0.2.1: + resolution: {integrity: sha512-jJghrmUtKwHiSaH0c4Tc8befpqGP23AjTFsQ/Eucpa7uz90lFZ9FHmw+bZZKabqYgI0j+uyViiyfwaPcVPKjlQ==} + dependencies: + '@nightlylabs/qr-code': 2.0.4 + autoprefixer: 10.4.14(postcss@8.4.24) + lit: 2.7.2 + postcss: 8.4.24 + postcss-lit: 1.1.0(postcss@8.4.24) + tailwindcss: 3.3.2 + transitivePeerDependencies: + - supports-color + - ts-node + dev: false + + /@nightlylabs/wallet-selector-polkadot@0.2.3(@polkadot/util@12.5.1): + resolution: {integrity: sha512-hlMHdd67ye4Ov7+5mSqlwJiSjAblH+ZJ9WRq2hHwDKnjJRvgxVDX/vG+Nn3Ir/bBHfNurrGmBFyzVixaiV6akw==} + dependencies: + '@nightlylabs/nightly-connect-polkadot': 0.0.15 + '@nightlylabs/wallet-selector-base': 0.4.0 + '@polkadot/api': 10.10.1 + '@polkadot/extension-inject': 0.46.5(@polkadot/api@10.10.1)(@polkadot/util@12.5.1) + '@wallet-standard/core': 1.0.3 + eventemitter3: 5.0.1 + transitivePeerDependencies: + - '@polkadot/util' + - bufferutil + - encoding + - supports-color + - ts-node + - utf-8-validate + dev: false + /@nightlylabs/wallet-selector-solana@0.2.6(bs58@4.0.1)(react@18.2.0): resolution: {integrity: sha512-cVTKk+c6tGv4GeSQMlUaZ2si4A6ySKj41emkGJ8OtuwmtzwUym4Xuh3chXZYgGrMQgvPrX5+erIR4oq2GmGIPg==} dependencies: diff --git a/server/bindings/AppDisconnectEvent.ts b/server/bindings/AppDisconnectEvent.ts index 7f1adc38..45063c6a 100644 --- a/server/bindings/AppDisconnectEvent.ts +++ b/server/bindings/AppDisconnectEvent.ts @@ -1,3 +1,3 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -export interface AppDisconnectEvent { connectionId: string, sessionId: string, } \ No newline at end of file +export interface AppDisconnectEvent { sessionId: string, } \ No newline at end of file diff --git a/server/bindings/ChangeNetworkEvent.ts b/server/bindings/ChangeNetworkEvent.ts new file mode 100644 index 00000000..2f6b7099 --- /dev/null +++ b/server/bindings/ChangeNetworkEvent.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export interface ChangeNetworkEvent { sessionId: string, requestId: string, network: string, } \ No newline at end of file diff --git a/server/bindings/ChangeWalletEvent.ts b/server/bindings/ChangeWalletEvent.ts new file mode 100644 index 00000000..b9a9dc59 --- /dev/null +++ b/server/bindings/ChangeWalletEvent.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export interface ChangeWalletEvent { sessionId: string, requestId: string, network: string, walletName: string, walletType: string, } \ No newline at end of file diff --git a/server/bindings/EventData.ts b/server/bindings/EventData.ts index 27d34310..b766bc63 100644 --- a/server/bindings/EventData.ts +++ b/server/bindings/EventData.ts @@ -1,10 +1,16 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. import type { AppConnectEvent } from "./AppConnectEvent"; import type { AppDisconnectEvent } from "./AppDisconnectEvent"; +import type { ChangeNetworkEvent } from "./ChangeNetworkEvent"; +import type { ChangeWalletEvent } from "./ChangeWalletEvent"; import type { ClientConnectInitEvent } from "./ClientConnectInitEvent"; import type { ClientConnectResolveEvent } from "./ClientConnectResolveEvent"; import type { ClientDisconnectEvent } from "./ClientDisconnectEvent"; -import type { NewRequestEvent } from "./NewRequestEvent"; -import type { RequestResolvedEvent } from "./RequestResolvedEvent"; +import type { SignAndSendTransactionEvent } from "./SignAndSendTransactionEvent"; +import type { SignAndSendTransactionResolveEvent } from "./SignAndSendTransactionResolveEvent"; +import type { SignMessageEvent } from "./SignMessageEvent"; +import type { SignMessageResolveEvent } from "./SignMessageResolveEvent"; +import type { SignTransactionEvent } from "./SignTransactionEvent"; +import type { SignTransactionResolveEvent } from "./SignTransactionResolveEvent"; -export type EventData = { type: "AppConnect" } & AppConnectEvent | { type: "AppDisconnect" } & AppDisconnectEvent | { type: "ClientConnectInit" } & ClientConnectInitEvent | { type: "ClientConnectResolve" } & ClientConnectResolveEvent | { type: "ClientDisconnect" } & ClientDisconnectEvent | { type: "NewRequest" } & NewRequestEvent | { type: "RequestResolved" } & RequestResolvedEvent; \ No newline at end of file +export type EventData = { type: "AppConnect" } & AppConnectEvent | { type: "AppDisconnect" } & AppDisconnectEvent | { type: "ClientConnectInit" } & ClientConnectInitEvent | { type: "ClientConnectResolve" } & ClientConnectResolveEvent | { type: "ClientDisconnect" } & ClientDisconnectEvent | { type: "SignMessage" } & SignMessageEvent | { type: "SignMessageResolve" } & SignMessageResolveEvent | { type: "SignTransaction" } & SignTransactionEvent | { type: "SignTransactionResolve" } & SignTransactionResolveEvent | { type: "SignAndSendTransaction" } & SignAndSendTransactionEvent | { type: "SignAndSendTransactionResolve" } & SignAndSendTransactionResolveEvent | { type: "ChangeNetwork" } & ChangeNetworkEvent | { type: "ChangeWallet" } & ChangeWalletEvent; \ No newline at end of file diff --git a/server/bindings/SignAndSendTransactionEvent.ts b/server/bindings/SignAndSendTransactionEvent.ts new file mode 100644 index 00000000..e713f047 --- /dev/null +++ b/server/bindings/SignAndSendTransactionEvent.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export interface SignAndSendTransactionEvent { sessionId: string, requestId: string, network: string, } \ No newline at end of file diff --git a/server/bindings/SignAndSendTransactionResolveEvent.ts b/server/bindings/SignAndSendTransactionResolveEvent.ts new file mode 100644 index 00000000..343baa00 --- /dev/null +++ b/server/bindings/SignAndSendTransactionResolveEvent.ts @@ -0,0 +1,4 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { RequestFail } from "./RequestFail"; + +export interface SignAndSendTransactionResolveEvent { sessionId: string, requestId: string, network: string, txHash: string | null, failureReason?: RequestFail, } \ No newline at end of file diff --git a/server/bindings/SignMessageEvent.ts b/server/bindings/SignMessageEvent.ts new file mode 100644 index 00000000..78195313 --- /dev/null +++ b/server/bindings/SignMessageEvent.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export interface SignMessageEvent { sessionId: string, requestId: string, network: string, } \ No newline at end of file diff --git a/server/bindings/SignMessageResolveEvent.ts b/server/bindings/SignMessageResolveEvent.ts new file mode 100644 index 00000000..3c3eb68f --- /dev/null +++ b/server/bindings/SignMessageResolveEvent.ts @@ -0,0 +1,4 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { RequestFail } from "./RequestFail"; + +export interface SignMessageResolveEvent { sessionId: string, requestId: string, failureReason?: RequestFail, } \ No newline at end of file diff --git a/server/bindings/SignTransactionEvent.ts b/server/bindings/SignTransactionEvent.ts new file mode 100644 index 00000000..143f726f --- /dev/null +++ b/server/bindings/SignTransactionEvent.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export interface SignTransactionEvent { sessionId: string, requestId: string, network: string, } \ No newline at end of file diff --git a/server/bindings/SignTransactionResolveEvent.ts b/server/bindings/SignTransactionResolveEvent.ts new file mode 100644 index 00000000..7c01b17d --- /dev/null +++ b/server/bindings/SignTransactionResolveEvent.ts @@ -0,0 +1,4 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { RequestFail } from "./RequestFail"; + +export interface SignTransactionResolveEvent { sessionId: string, requestId: string, txHash: string | null, failureReason?: RequestFail, } \ No newline at end of file diff --git a/server/src/http/cloud/events/events_handler.rs b/server/src/http/cloud/events/events_handler.rs index c9c3e8b2..5a95137a 100644 --- a/server/src/http/cloud/events/events_handler.rs +++ b/server/src/http/cloud/events/events_handler.rs @@ -6,8 +6,6 @@ use super::{ process_event_client_connect_init::process_event_client_connect_init, process_event_client_connect_resolve::process_event_client_connect_resolve, process_event_client_disconnect::process_event_client_disconnect, - process_event_new_request::process_event_new_request, - process_event_request_resolved::process_event_request_resolved, }, }; use crate::{ @@ -56,11 +54,13 @@ pub async fn process_event( EventData::ClientDisconnect(event) => { process_event_client_disconnect(event, &event_payload.app_id, ip, db_connection).await; } - EventData::NewRequest(event) => { - process_event_new_request(event, &event_payload.app_id, ip, db_connection).await; - } - EventData::RequestResolved(event) => { - process_event_request_resolved(event, &event_payload.app_id, ip, db_connection).await; - } + EventData::SignMessage(_) => todo!(), + EventData::SignMessageResolve(_) => todo!(), + EventData::SignTransaction(_) => todo!(), + EventData::SignTransactionResolve(_) => todo!(), + EventData::SignAndSendTransaction(_) => todo!(), + EventData::SignAndSendTransactionResolve(_) => todo!(), + EventData::ChangeNetwork(_) => todo!(), + EventData::ChangeWallet(_) => todo!(), } } diff --git a/server/src/http/cloud/events/processors/mod.rs b/server/src/http/cloud/events/processors/mod.rs index 828710a6..7d8de239 100644 --- a/server/src/http/cloud/events/processors/mod.rs +++ b/server/src/http/cloud/events/processors/mod.rs @@ -3,5 +3,3 @@ pub mod process_event_app_disconnect; pub mod process_event_client_connect_init; pub mod process_event_client_connect_resolve; pub mod process_event_client_disconnect; -pub mod process_event_new_request; -pub mod process_event_request_resolved; diff --git a/server/src/http/cloud/events/processors/process_event_client_connect_resolve.rs b/server/src/http/cloud/events/processors/process_event_client_connect_resolve.rs index d13ae1b0..faa557da 100644 --- a/server/src/http/cloud/events/processors/process_event_client_connect_resolve.rs +++ b/server/src/http/cloud/events/processors/process_event_client_connect_resolve.rs @@ -9,24 +9,24 @@ pub async fn process_event_client_connect_resolve( ip: SocketAddr, db: &Arc, ) { - // Check if connection attempt by client was successful, if not then there is nothing to do - if event.success { - if let Err(err) = db - .connect_user_to_the_session( - &event.client_id, - &event.wallet_name, - &event.wallet_type, - &get_current_datetime(), - &event.public_keys, - &app_id.clone(), - &event.session_id, - ) - .await - { - error!( - "Failed to process user successful connect, app_id: [{}], ip: [{}], event: [{:?}], err: [{}]", - app_id, ip, event, err - ); - } - } + // // Check if connection attempt by client was successful, if not then there is nothing to do + // if event.success { + // if let Err(err) = db + // .connect_user_to_the_session( + // &event.client_id, + // &event.wallet_name, + // &event.wallet_type, + // &get_current_datetime(), + // &event.public_keys, + // &app_id.clone(), + // &event.session_id, + // ) + // .await + // { + // error!( + // "Failed to process user successful connect, app_id: [{}], ip: [{}], event: [{:?}], err: [{}]", + // app_id, ip, event, err + // ); + // } + // } } diff --git a/server/src/http/cloud/events/processors/process_event_new_request.rs b/server/src/http/cloud/events/processors/process_event_new_request.rs deleted file mode 100644 index 4f9eb262..00000000 --- a/server/src/http/cloud/events/processors/process_event_new_request.rs +++ /dev/null @@ -1,33 +0,0 @@ -use crate::structs::cloud::cloud_events::event_types::new_request::NewRequestEvent; -use database::{ - db::Db, - structs::request_status::RequestStatus, - tables::{requests::table_struct::Request, utils::get_current_datetime}, -}; -use log::error; -use std::{net::SocketAddr, sync::Arc}; - -pub async fn process_event_new_request( - event: &NewRequestEvent, - app_id: &String, - ip: SocketAddr, - db: &Arc, -) { - // Save new request - let request = Request { - request_id: event.request_id.clone(), - session_id: event.session_id.clone(), - app_id: app_id.clone(), - request_type: event.request_type.clone(), - request_status: RequestStatus::Pending, - network: event.network.clone(), - creation_timestamp: get_current_datetime(), - }; - - if let Err(err) = db.save_request(&request).await { - error!( - "Failed to save new request, app_id: [{}], ip: [{}], event: [{:?}], err: [{}]", - app_id, ip, event, err - ); - } -} diff --git a/server/src/http/cloud/events/processors/process_event_request_resolved.rs b/server/src/http/cloud/events/processors/process_event_request_resolved.rs deleted file mode 100644 index 0e36dd69..00000000 --- a/server/src/http/cloud/events/processors/process_event_request_resolved.rs +++ /dev/null @@ -1,28 +0,0 @@ -use crate::structs::cloud::cloud_events::event_types::request_resolved_event::RequestResolvedEvent; -use database::{db::Db, structs::request_status::RequestStatus}; -use log::error; -use std::{net::SocketAddr, sync::Arc}; - -pub async fn process_event_request_resolved( - event: &RequestResolvedEvent, - app_id: &String, - ip: SocketAddr, - db: &Arc, -) { - // Generate new request status based on event data - let new_status: RequestStatus = match event.failure_reason.clone() { - Some(reason) => reason.into(), - None => RequestStatus::Completed, - }; - - // Update request status in database - if let Err(err) = db - .update_request_status(&event.request_id, &new_status) - .await - { - error!( - "Failed to save new request, app_id: [{}], ip: [{}], event: [{:?}], err: [{}]", - app_id, ip, event, err - ); - } -} diff --git a/server/src/structs/cloud/cloud_events/event_types/app_disconnect_event.rs b/server/src/structs/cloud/cloud_events/event_types/app_disconnect_event.rs index 422ba617..8cde235d 100644 --- a/server/src/structs/cloud/cloud_events/event_types/app_disconnect_event.rs +++ b/server/src/structs/cloud/cloud_events/event_types/app_disconnect_event.rs @@ -5,6 +5,5 @@ use ts_rs::TS; #[ts(export)] #[serde(rename_all = "camelCase")] pub struct AppDisconnectEvent { - pub connection_id: String, pub session_id: String, } diff --git a/server/src/structs/cloud/cloud_events/event_types/change_network_event.rs b/server/src/structs/cloud/cloud_events/event_types/change_network_event.rs new file mode 100644 index 00000000..3c1c75b4 --- /dev/null +++ b/server/src/structs/cloud/cloud_events/event_types/change_network_event.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] +#[ts(export)] +#[serde(rename_all = "camelCase")] +pub struct ChangeNetworkEvent { + pub session_id: String, + pub request_id: String, + pub old_network: String, + pub new_network: String, +} diff --git a/server/src/structs/cloud/cloud_events/event_types/change_network_resolve_event.rs b/server/src/structs/cloud/cloud_events/event_types/change_network_resolve_event.rs new file mode 100644 index 00000000..2bab7ec3 --- /dev/null +++ b/server/src/structs/cloud/cloud_events/event_types/change_network_resolve_event.rs @@ -0,0 +1,13 @@ +use database::structs::request_status::RequestFail; +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] +#[ts(export)] +#[serde(rename_all = "camelCase")] +pub struct ChangeNetworkResolveEvent { + pub session_id: String, + pub request_id: String, + #[ts(optional)] + pub failure_reason: Option, +} diff --git a/server/src/structs/cloud/cloud_events/event_types/change_wallet_event.rs b/server/src/structs/cloud/cloud_events/event_types/change_wallet_event.rs new file mode 100644 index 00000000..21eee5f4 --- /dev/null +++ b/server/src/structs/cloud/cloud_events/event_types/change_wallet_event.rs @@ -0,0 +1,14 @@ +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] +#[ts(export)] +#[serde(rename_all = "camelCase")] +pub struct ChangeWalletEvent { + pub session_id: String, + pub request_id: String, + pub network: String, + pub wallet_name: String, + pub wallet_type: String, + pub old_wallet_address: String, +} diff --git a/server/src/structs/cloud/cloud_events/event_types/change_wallet_resolve_event.rs b/server/src/structs/cloud/cloud_events/event_types/change_wallet_resolve_event.rs new file mode 100644 index 00000000..ceed6280 --- /dev/null +++ b/server/src/structs/cloud/cloud_events/event_types/change_wallet_resolve_event.rs @@ -0,0 +1,15 @@ +use database::structs::request_status::RequestFail; +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] +#[ts(export)] +#[serde(rename_all = "camelCase")] +pub struct ChangeWalletResolveEvent { + pub session_id: String, + pub request_id: String, + #[ts(optional)] + pub new_wallet_address: Option, + #[ts(optional)] + pub failure_reason: Option, +} diff --git a/server/src/structs/cloud/cloud_events/event_types/client_connect_init_event.rs b/server/src/structs/cloud/cloud_events/event_types/client_connect_init_event.rs index 50713327..3bc100d7 100644 --- a/server/src/structs/cloud/cloud_events/event_types/client_connect_init_event.rs +++ b/server/src/structs/cloud/cloud_events/event_types/client_connect_init_event.rs @@ -7,6 +7,8 @@ use ts_rs::TS; #[serde(rename_all = "camelCase")] pub struct ClientConnectInitEvent { pub client_id: String, - pub session_type: SessionType, pub session_id: String, + pub wallet_name: String, + pub wallet_type: String, + pub session_type: SessionType, } diff --git a/server/src/structs/cloud/cloud_events/event_types/client_connect_resolve_event.rs b/server/src/structs/cloud/cloud_events/event_types/client_connect_resolve_event.rs index a72a84f9..b567b764 100644 --- a/server/src/structs/cloud/cloud_events/event_types/client_connect_resolve_event.rs +++ b/server/src/structs/cloud/cloud_events/event_types/client_connect_resolve_event.rs @@ -1,4 +1,3 @@ -use database::structs::session_type::SessionType; use serde::{Deserialize, Serialize}; use ts_rs::TS; @@ -8,9 +7,6 @@ use ts_rs::TS; pub struct ClientConnectResolveEvent { pub client_id: String, pub session_id: String, - pub public_keys: Vec, - pub wallet_name: String, - pub wallet_type: String, - pub session_type: SessionType, + pub addresses: Vec, pub success: bool, } diff --git a/server/src/structs/cloud/cloud_events/event_types/mod.rs b/server/src/structs/cloud/cloud_events/event_types/mod.rs index 0beecd9b..472a7a8d 100644 --- a/server/src/structs/cloud/cloud_events/event_types/mod.rs +++ b/server/src/structs/cloud/cloud_events/event_types/mod.rs @@ -1,7 +1,15 @@ pub mod app_connect_event; pub mod app_disconnect_event; +pub mod change_network_event; +pub mod change_network_resolve_event; +pub mod change_wallet_event; +pub mod change_wallet_resolve_event; pub mod client_connect_init_event; pub mod client_connect_resolve_event; pub mod client_disconnect_event; -pub mod new_request; -pub mod request_resolved_event; +pub mod sign_and_send_transaction_event; +pub mod sign_and_send_transaction_resolve_event; +pub mod sign_message_event; +pub mod sign_message_resolve_event; +pub mod sign_transaction_event; +pub mod sign_transaction_resolve_event; diff --git a/server/src/structs/cloud/cloud_events/event_types/new_request.rs b/server/src/structs/cloud/cloud_events/event_types/sign_and_send_transaction_event.rs similarity index 68% rename from server/src/structs/cloud/cloud_events/event_types/new_request.rs rename to server/src/structs/cloud/cloud_events/event_types/sign_and_send_transaction_event.rs index 461463e0..98d8340e 100644 --- a/server/src/structs/cloud/cloud_events/event_types/new_request.rs +++ b/server/src/structs/cloud/cloud_events/event_types/sign_and_send_transaction_event.rs @@ -1,13 +1,11 @@ -use database::structs::request_type::RequestType; use serde::{Deserialize, Serialize}; use ts_rs::TS; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] #[ts(export)] #[serde(rename_all = "camelCase")] -pub struct NewRequestEvent { +pub struct SignAndSendTransactionEvent { pub session_id: String, pub request_id: String, - pub request_type: RequestType, pub network: String, } diff --git a/server/src/structs/cloud/cloud_events/event_types/sign_and_send_transaction_resolve_event.rs b/server/src/structs/cloud/cloud_events/event_types/sign_and_send_transaction_resolve_event.rs new file mode 100644 index 00000000..4e9ab5f6 --- /dev/null +++ b/server/src/structs/cloud/cloud_events/event_types/sign_and_send_transaction_resolve_event.rs @@ -0,0 +1,15 @@ +use database::structs::request_status::RequestFail; +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] +#[ts(export)] +#[serde(rename_all = "camelCase")] +pub struct SignAndSendTransactionResolveEvent { + pub session_id: String, + pub request_id: String, + pub network: String, + pub tx_hash: Option, + #[ts(optional)] + pub failure_reason: Option, +} diff --git a/server/src/structs/cloud/cloud_events/event_types/sign_message_event.rs b/server/src/structs/cloud/cloud_events/event_types/sign_message_event.rs new file mode 100644 index 00000000..7f05d279 --- /dev/null +++ b/server/src/structs/cloud/cloud_events/event_types/sign_message_event.rs @@ -0,0 +1,11 @@ +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] +#[ts(export)] +#[serde(rename_all = "camelCase")] +pub struct SignMessageEvent { + pub session_id: String, + pub request_id: String, + pub network: String, +} diff --git a/server/src/structs/cloud/cloud_events/event_types/request_resolved_event.rs b/server/src/structs/cloud/cloud_events/event_types/sign_message_resolve_event.rs similarity index 90% rename from server/src/structs/cloud/cloud_events/event_types/request_resolved_event.rs rename to server/src/structs/cloud/cloud_events/event_types/sign_message_resolve_event.rs index fb10e5b9..465bfd83 100644 --- a/server/src/structs/cloud/cloud_events/event_types/request_resolved_event.rs +++ b/server/src/structs/cloud/cloud_events/event_types/sign_message_resolve_event.rs @@ -5,7 +5,7 @@ use ts_rs::TS; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] #[ts(export)] #[serde(rename_all = "camelCase")] -pub struct RequestResolvedEvent { +pub struct SignMessageResolveEvent { pub session_id: String, pub request_id: String, #[ts(optional)] diff --git a/server/src/structs/cloud/cloud_events/event_types/sign_transaction_event.rs b/server/src/structs/cloud/cloud_events/event_types/sign_transaction_event.rs new file mode 100644 index 00000000..8059dd39 --- /dev/null +++ b/server/src/structs/cloud/cloud_events/event_types/sign_transaction_event.rs @@ -0,0 +1,11 @@ +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] +#[ts(export)] +#[serde(rename_all = "camelCase")] +pub struct SignTransactionEvent { + pub session_id: String, + pub request_id: String, + pub network: String, +} diff --git a/server/src/structs/cloud/cloud_events/event_types/sign_transaction_resolve_event.rs b/server/src/structs/cloud/cloud_events/event_types/sign_transaction_resolve_event.rs new file mode 100644 index 00000000..0403bad1 --- /dev/null +++ b/server/src/structs/cloud/cloud_events/event_types/sign_transaction_resolve_event.rs @@ -0,0 +1,14 @@ +use database::structs::request_status::RequestFail; +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)] +#[ts(export)] +#[serde(rename_all = "camelCase")] +pub struct SignTransactionResolveEvent { + pub session_id: String, + pub request_id: String, + pub tx_hash: Option, + #[ts(optional)] + pub failure_reason: Option, +} diff --git a/server/src/structs/cloud/cloud_events/events.rs b/server/src/structs/cloud/cloud_events/events.rs index 7b9a8247..2e26e8e1 100644 --- a/server/src/structs/cloud/cloud_events/events.rs +++ b/server/src/structs/cloud/cloud_events/events.rs @@ -1,9 +1,14 @@ use super::event_types::{ app_connect_event::AppConnectEvent, app_disconnect_event::AppDisconnectEvent, + change_network_event::ChangeNetworkEvent, change_wallet_event::ChangeWalletEvent, client_connect_init_event::ClientConnectInitEvent, client_connect_resolve_event::ClientConnectResolveEvent, - client_disconnect_event::ClientDisconnectEvent, new_request::NewRequestEvent, - request_resolved_event::RequestResolvedEvent, + client_disconnect_event::ClientDisconnectEvent, + sign_and_send_transaction_event::SignAndSendTransactionEvent, + sign_and_send_transaction_resolve_event::SignAndSendTransactionResolveEvent, + sign_message_event::SignMessageEvent, sign_message_resolve_event::SignMessageResolveEvent, + sign_transaction_event::SignTransactionEvent, + sign_transaction_resolve_event::SignTransactionResolveEvent, }; use serde::{Deserialize, Serialize}; use ts_rs::TS; @@ -17,6 +22,12 @@ pub enum EventData { ClientConnectInit(ClientConnectInitEvent), ClientConnectResolve(ClientConnectResolveEvent), ClientDisconnect(ClientDisconnectEvent), - NewRequest(NewRequestEvent), - RequestResolved(RequestResolvedEvent), + SignMessage(SignMessageEvent), + SignMessageResolve(SignMessageResolveEvent), + SignTransaction(SignTransactionEvent), + SignTransactionResolve(SignTransactionResolveEvent), + SignAndSendTransaction(SignAndSendTransactionEvent), + SignAndSendTransactionResolve(SignAndSendTransactionResolveEvent), + ChangeNetwork(ChangeNetworkEvent), + ChangeWallet(ChangeWalletEvent), }