Skip to content

Commit

Permalink
Merge branch 'main' into use-uniswapv3-liquidity
Browse files Browse the repository at this point in the history
  • Loading branch information
sunce86 authored Aug 5, 2022
2 parents 93205e2 + bc85274 commit 46983c5
Show file tree
Hide file tree
Showing 55 changed files with 1,621 additions and 1,126 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ jobs:
- uses: cowprotocol/autodeploy-action@v1
if: ${{ github.ref == 'refs/heads/main' }}
with:
pods: dfusion-v2-api-rinkeby,dfusion-v2-solver-rinkeby,dfusion-v2-api-goerli,dfusion-v2-solver-goerli,dfusion-v2-api-mainnet,dfusion-v2-solver-mainnet,dfusion-v2-api-xdai,dfusion-v2-solver-xdai,dfusion-v2-solver-shadow,dfusion-v2-alerter-mainnet
pods: dfusion-v2-autopilot-rinkeby,dfusion-v2-api-rinkeby,dfusion-v2-solver-rinkeby,dfusion-v2-autopilot-goerli,dfusion-v2-api-goerli,dfusion-v2-solver-goerli,dfusion-v2-autopilot-mainnet,dfusion-v2-api-mainnet,dfusion-v2-solver-mainnet,dfusion-v2-autopilot-xdai,dfusion-v2-api-xdai,dfusion-v2-solver-xdai,dfusion-v2-solver-shadow,dfusion-v2-alerter-mainnet
tag: ${{ secrets.AUTODEPLOY_TAG }}
url: ${{ secrets.AUTODEPLOY_URL }}
token: ${{ secrets.AUTODEPLOY_TOKEN }}
10 changes: 10 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

23 changes: 17 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -129,12 +129,12 @@ flyway -user=$USER -password="" -locations="filesystem:database/sql/" -url=jdbc:
In order to run the `e2e` tests you have to have a testnet running locally.
Due to the RPC calls the services issue `Ganache` is incompatible, so we will use `hardhat`.

1. Install [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
2. Install hardhat with `npm install --save-dev hardhat`
1. Install [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
2. Install hardhat with `npm install --save-dev hardhat`
3. Create `hardhat.config.js` in the directory you installed `hardhat` in with following content:
```javascript
module.exports = {
networks: {
networks: {
hardhat: {
initialBaseFeePerGas: 0,
accounts: {
Expand All @@ -157,6 +157,20 @@ Note that the `node-url` is sensitive data. The `orderbook` and `solver` executa

To avoid confusion during your tests, always double check that the token and account addresses you use actually correspond to the network of the `node-url` you are running the executables with.

### Autopilot

To see all supported command line arguments run `cargo run --bin autopilot -- --help`.

Run an `autopilot` with:

```sh
cargo run --bin autopilot -- \
--skip-event-sync \
--node-url <YOUR_NODE_URL>
```

`--skip-event-sync` will skip some work to speed up the initialization process.

### Orderbook

To see all supported command line arguments run `cargo run --bin orderbook -- --help`.
Expand All @@ -166,12 +180,9 @@ Run an `orderbook` on `localhost:8080` with:
```sh
cargo run --bin orderbook -- \
--skip-trace-api true \
--skip-event-sync \
--node-url <YOUR_NODE_URL>
```

`--skip-event-sync` will skip some work to speed up the initialization process.

`--skip-trace-api true` will make the orderbook compatible with more ethereum nodes. If your node supports `trace_callMany` you can drop this argument.

Note: Current version of the code does not compile under Windows OS. Context and workaround are [here](https://github.com/cowprotocol/services/issues/226).
Expand Down
6 changes: 6 additions & 0 deletions crates/autopilot/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,16 @@ path = "src/main.rs"
anyhow = "1.0"
async-trait = "0.1"
clap = { version = "3.1", features = ["derive", "env"] }
contracts = { path = "../contracts" }
database = { path = "../database" }
ethcontract = { version = "0.17.0", default-features = false }
global-metrics = { path = "../global-metrics" }
number_conversions = { path = "../number_conversions" }
prometheus = "0.13"
prometheus-metric-storage = { git = "https://github.com/cowprotocol/prometheus-metric-storage" , tag = "v0.4.0" }
shared= { path = "../shared" }
sqlx = { version = "0.6", default-features = false, features = ["runtime-tokio-native-tls"] }
tokio = { version = "1.15", features = ["macros", "rt-multi-thread", "sync", "time", "signal"] }
tracing = "0.1"
url = "2.2"
web3 = { version = "0.18", default-features = false }
42 changes: 41 additions & 1 deletion crates/autopilot/src/arguments.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
use std::net::SocketAddr;
use shared::arguments::duration_from_seconds;
use std::{net::SocketAddr, time::Duration};
use tracing::level_filters::LevelFilter;
use url::Url;

#[derive(clap::Parser)]
pub struct Arguments {
Expand All @@ -11,13 +13,51 @@ pub struct Arguments {

#[clap(long, env, default_value = "0.0.0.0:9589")]
pub metrics_address: SocketAddr,

/// Url of the Postgres database. By default connects to locally running postgres.
#[clap(long, env, default_value = "postgresql://")]
pub db_url: Url,

/// The Ethereum node URL to connect to.
#[clap(long, env, default_value = "http://localhost:8545")]
pub node_url: Url,

/// Timeout in seconds for all http requests.
#[clap(
long,
default_value = "10",
parse(try_from_str = duration_from_seconds),
)]
pub http_timeout: Duration,

/// Skip syncing past events (useful for local deployments)
#[clap(long)]
pub skip_event_sync: bool,

/// How often in seconds we poll the node to check if the current block has changed.
#[clap(
long,
env,
default_value = "5",
parse(try_from_str = duration_from_seconds),
)]
pub block_stream_poll_interval_seconds: Duration,
}

impl std::fmt::Display for Arguments {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "log_filter: {}", self.log_filter)?;
writeln!(f, "log_stderr_threshold: {}", self.log_stderr_threshold)?;
writeln!(f, "metrics_address: {}", self.metrics_address)?;
writeln!(f, "db_url: SECRET")?;
writeln!(f, "node_url: {}", self.node_url)?;
writeln!(f, "http_timeout: {:?}", self.http_timeout)?;
writeln!(f, "skip_event_sync: {}", self.skip_event_sync)?;
writeln!(
f,
"block_stream_poll_interval_seconds: {:?}",
self.block_stream_poll_interval_seconds
)?;
Ok(())
}
}
76 changes: 76 additions & 0 deletions crates/autopilot/src/database.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
mod events;
mod quotes;

use sqlx::{PgConnection, PgPool};
use std::time::Duration;

#[derive(Clone)]
pub struct Postgres(pub PgPool);

impl Postgres {
pub async fn new(url: &str) -> sqlx::Result<Self> {
Ok(Self(PgPool::connect(url).await?))
}

pub async fn update_table_rows_metric(&self) -> sqlx::Result<()> {
let metrics = Metrics::get();
for &table in database::ALL_TABLES {
let mut ex = self.0.acquire().await?;
let count = count_rows_in_table(&mut ex, table).await?;
metrics.table_rows.with_label_values(&[table]).set(count);
}
Ok(())
}
}

async fn count_rows_in_table(ex: &mut PgConnection, table: &str) -> sqlx::Result<i64> {
let query = format!("SELECT COUNT(*) FROM {};", table);
sqlx::query_scalar(&query).fetch_one(ex).await
}

#[derive(prometheus_metric_storage::MetricStorage)]
struct Metrics {
/// Number of rows in db tables.
#[metric(labels("table"))]
table_rows: prometheus::IntGaugeVec,

/// Timing of db queries.
#[metric(name = "autopilot_database_queries", labels("type"))]
database_queries: prometheus::HistogramVec,
}

impl Metrics {
fn get() -> &'static Self {
Metrics::instance(global_metrics::get_metric_storage_registry()).unwrap()
}
}

pub async fn database_metrics(db: Postgres) -> ! {
loop {
if let Err(err) = db.update_table_rows_metric().await {
tracing::error!(?err, "failed to update table rows metric");
}
tokio::time::sleep(Duration::from_secs(60)).await;
}
}

#[cfg(test)]
mod tests {
use super::*;

#[tokio::test]
#[ignore]
async fn postgres_count_rows_in_table_() {
let db = Postgres::new("postgresql://").await.unwrap();
let mut ex = db.0.begin().await.unwrap();
database::clear_DANGER_(&mut ex).await.unwrap();

let count = count_rows_in_table(&mut ex, "orders").await.unwrap();
assert_eq!(count, 0);
database::orders::insert_order(&mut ex, &Default::default())
.await
.unwrap();
let count = count_rows_in_table(&mut ex, "orders").await.unwrap();
assert_eq!(count, 1);
}
}
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use super::Postgres;
use crate::conversions::u256_to_big_decimal;
use anyhow::{anyhow, Context, Result};
use contracts::gpv2_settlement::{
event_data::{
Expand All @@ -14,6 +13,7 @@ use database::{
OrderUid,
};
use ethcontract::{Event as EthContractEvent, EventMetadata};
use number_conversions::u256_to_big_decimal;
use shared::event_handling::EventStoring;
use std::convert::TryInto;

Expand Down Expand Up @@ -47,7 +47,7 @@ impl EventStoring<ContractEvent> for Postgres {
.with_label_values(&["last_event_block"])
.start_timer();

let mut con = self.pool.acquire().await?;
let mut con = self.0.acquire().await?;
let block_number = database::events::last_block(&mut con)
.await
.context("block_number_of_most_recent_event failed")?;
Expand All @@ -61,7 +61,7 @@ impl EventStoring<ContractEvent> for Postgres {
.start_timer();

let events = contract_to_db_events(events)?;
let mut transaction = self.pool.begin().await?;
let mut transaction = self.0.begin().await?;
database::events::append(&mut transaction, &events)
.await
.context("append_events")?;
Expand All @@ -80,7 +80,7 @@ impl EventStoring<ContractEvent> for Postgres {
.start_timer();

let events = contract_to_db_events(events)?;
let mut transaction = self.pool.begin().await?;
let mut transaction = self.0.begin().await?;
database::events::delete(&mut transaction, range.start().to_u64() as i64)
.await
.context("delete_events failed")?;
Expand Down
26 changes: 26 additions & 0 deletions crates/autopilot/src/database/quotes.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
use super::Postgres;
use anyhow::{Context, Result};
use shared::maintenance::Maintaining;
use sqlx::types::chrono::{DateTime, Utc};

impl Postgres {
pub async fn remove_expired_quotes(&self, max_expiry: DateTime<Utc>) -> Result<()> {
let _timer = super::Metrics::get()
.database_queries
.with_label_values(&["remove_expired_quotes"])
.start_timer();

let mut ex = self.0.acquire().await?;
database::quotes::remove_expired_quotes(&mut ex, max_expiry).await?;
Ok(())
}
}

#[async_trait::async_trait]
impl Maintaining for Postgres {
async fn run_maintenance(&self) -> Result<()> {
self.remove_expired_quotes(Utc::now())
.await
.context("fee measurement maintenance error")
}
}
File renamed without changes.
Loading

0 comments on commit 46983c5

Please sign in to comment.