diff --git a/Cargo.lock b/Cargo.lock index d3a31050b..520f6e7d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,6 +56,16 @@ dependencies = [ "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "async-trait" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "atty" version = "0.2.13" @@ -98,6 +108,11 @@ dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bitflags" version = "1.2.1" @@ -133,6 +148,15 @@ dependencies = [ "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "buf_redux" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bumpalo" version = "2.6.0" @@ -159,6 +183,14 @@ dependencies = [ "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "bytes" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "c2-chacha" version = "0.2.3" @@ -254,7 +286,7 @@ dependencies = [ "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "publicsuffix 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "publicsuffix 1.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", @@ -428,7 +460,6 @@ name = "error-chain" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)", "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -459,7 +490,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "flate2" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -510,6 +541,34 @@ name = "futures" version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "futures" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-executor 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-io 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-channel" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "futures-cpupool" version = "0.1.8" @@ -519,13 +578,77 @@ dependencies = [ "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "futures-executor" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-io" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "futures-macro" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "futures-retry" -version = "0.3.3" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-sink" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "futures-task" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "futures-util" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-io 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-macro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures01" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -563,6 +686,24 @@ dependencies = [ "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "h2" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "hdrhistogram" version = "6.3.4" @@ -574,26 +715,25 @@ dependencies = [ [[package]] name = "headers" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "headers-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "headers-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "headers-core" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -619,6 +759,16 @@ dependencies = [ "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "http" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "http-body" version = "0.1.0" @@ -630,6 +780,15 @@ dependencies = [ "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "http-body" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "httparse" version = "1.3.4" @@ -664,6 +823,29 @@ dependencies = [ "want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "hyper" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "hyper-rustls" version = "0.17.1" @@ -692,6 +874,17 @@ dependencies = [ "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "hyper-tls" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tls 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "idna" version = "0.1.5" @@ -718,11 +911,11 @@ version = "0.3.0" dependencies = [ "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", "thiserror 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tungstenite 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -733,10 +926,11 @@ dependencies = [ "approx 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "config 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "interledger 0.6.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -748,20 +942,20 @@ dependencies = [ "num-bigint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "redis 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", + "redis 0.13.1-alpha.0 (git+https://github.com/mitsuhiko/redis-rs)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", "secrecy 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-retry 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "tracing 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "tracing-futures 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing-futures 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "tracing-subscriber 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "warp 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0-alpha.0 (git+https://github.com/seanmonstar/warp.git)", "yup-oauth2 3.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -814,10 +1008,12 @@ dependencies = [ name = "interledger-api" version = "0.3.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-retry 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-retry 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-btp 0.4.0", "interledger-ccp 0.3.0", "interledger-http 0.4.0", @@ -831,24 +1027,27 @@ dependencies = [ "interledger-stream 0.4.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "secrecy 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", "serde_path_to_error 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "warp 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0-alpha.0 (git+https://github.com/seanmonstar/warp.git)", ] [[package]] name = "interledger-btp" version = "0.4.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "interledger-service 0.4.0", @@ -857,27 +1056,27 @@ dependencies = [ "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", "num-bigint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "secrecy 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "stream-cancel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tungstenite 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tungstenite 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "stream-cancel 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tungstenite 0.10.0 (git+https://github.com/snapview/tokio-tungstenite)", + "tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "warp 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0-alpha.0 (git+https://github.com/seanmonstar/warp.git)", ] [[package]] name = "interledger-ccp" version = "0.3.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "interledger-service 0.4.0", @@ -886,8 +1085,8 @@ dependencies = [ "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -895,37 +1094,43 @@ dependencies = [ name = "interledger-http" version = "0.4.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "interledger-service 0.4.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "secrecy 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", "serde_path_to_error 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "warp 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0-alpha.0 (git+https://github.com/seanmonstar/warp.git)", ] [[package]] name = "interledger-ildcp" version = "0.4.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "interledger-service 0.4.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -934,6 +1139,7 @@ version = "0.4.0" dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -948,12 +1154,14 @@ dependencies = [ name = "interledger-router" version = "0.4.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "interledger-service 0.4.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -961,14 +1169,15 @@ dependencies = [ name = "interledger-service" version = "0.4.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "tracing-futures 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing-futures 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 2.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -978,22 +1187,22 @@ dependencies = [ name = "interledger-service-util" version = "0.4.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "interledger-service 0.4.0", "interledger-settlement 0.3.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", "secrecy 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1001,11 +1210,13 @@ dependencies = [ name = "interledger-settlement" version = "0.3.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-http 0.4.0", "interledger-packet 0.4.0", "interledger-service 0.4.0", @@ -1017,16 +1228,16 @@ dependencies = [ "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "redis 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", + "redis 0.13.1-alpha.0 (git+https://github.com/mitsuhiko/redis-rs)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-retry 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "warp 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0-alpha.0 (git+https://github.com/seanmonstar/warp.git)", ] [[package]] @@ -1036,25 +1247,28 @@ dependencies = [ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "interledger-service 0.4.0", "interledger-stream 0.4.0", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "interledger-store" version = "0.4.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures01 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-api 0.3.0", "interledger-btp 0.4.0", "interledger-ccp 0.3.0", @@ -1072,14 +1286,13 @@ dependencies = [ "os_type 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "redis 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "redis 0.13.1-alpha.0 (git+https://github.com/mitsuhiko/redis-rs)", "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "runtime 0.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "secrecy 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "zeroize 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1089,13 +1302,14 @@ dependencies = [ name = "interledger-stream" version = "0.4.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", "csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-ildcp 0.4.0", "interledger-packet 0.4.0", @@ -1106,7 +1320,7 @@ dependencies = [ "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1270,15 +1484,34 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.14" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "mime" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "mime_guess" +version = "1.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "phf 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_codegen 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "mime_guess" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 2.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1292,9 +1525,10 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.19" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1314,7 +1548,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1344,6 +1578,23 @@ dependencies = [ "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "multipart" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "buf_redux 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 1.8.7 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "twoway 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "native-tls" version = "0.2.3" @@ -1505,6 +1756,69 @@ name = "percent-encoding" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "phf" +version = "0.7.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "phf_shared 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "phf_codegen" +version = "0.7.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "phf_generator 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_shared 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "phf_generator" +version = "0.7.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "phf_shared 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "phf_shared" +version = "0.7.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pin-project" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "pin-project-internal 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pin-project-lite" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "pin-utils" +version = "0.1.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "pkg-config" version = "0.3.16" @@ -1515,6 +1829,21 @@ name = "ppv-lite86" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "proc-macro-hack" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro-nested" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "proc-macro2" version = "1.0.6" @@ -1525,7 +1854,7 @@ dependencies = [ [[package]] name = "publicsuffix" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1751,22 +2080,20 @@ dependencies = [ [[package]] name = "redis" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.13.1-alpha.0" +source = "git+https://github.com/mitsuhiko/redis-rs#df4cbaffc044028a1c6afc1bc248a013a83f3cc7" dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "combine 3.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-executor 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "sha1 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1811,7 +2138,7 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.9.22" +version = "0.9.24" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1819,13 +2146,13 @@ dependencies = [ "cookie 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "cookie_store 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "encoding_rs 0.8.20 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", "hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1842,6 +2169,41 @@ dependencies = [ "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "reqwest" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "encoding_rs 0.8.20 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper-tls 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.30 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tls 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.53 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-futures 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.30 (registry+https://github.com/rust-lang/crates.io-index)", + "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ring" version = "0.16.9" @@ -1856,6 +2218,11 @@ dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "runtime" +version = "0.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "rustc-demangle" version = "0.1.16" @@ -1886,6 +2253,11 @@ name = "ryu" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "safemem" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "same-file" version = "1.0.5" @@ -2046,6 +2418,11 @@ name = "sha1" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "siphasher" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "sized-chunks" version = "0.1.3" @@ -2081,10 +2458,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "stream-cancel" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2189,7 +2569,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2205,6 +2585,26 @@ dependencies = [ "tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-macros 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-buf" version = "0.1.1" @@ -2234,17 +2634,6 @@ dependencies = [ "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "tokio-dns-unofficial" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "tokio-executor" version = "0.1.8" @@ -2274,6 +2663,15 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-reactor" version = "0.1.10" @@ -2283,7 +2681,7 @@ dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2332,7 +2730,7 @@ dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-reactor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2366,27 +2764,25 @@ dependencies = [ [[package]] name = "tokio-tls" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-tungstenite" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.10.0" +source = "git+https://github.com/snapview/tokio-tungstenite#308d9680c0e59dd1e8651659a775c05df937934e" dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-dns-unofficial 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tls 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tungstenite 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tls 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2397,7 +2793,7 @@ dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-reactor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2413,13 +2809,26 @@ dependencies = [ "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-reactor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-util" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "toml" version = "0.4.10" @@ -2428,6 +2837,11 @@ dependencies = [ "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tower-service" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "tracing" version = "0.1.10" @@ -2460,10 +2874,12 @@ dependencies = [ [[package]] name = "tracing-futures" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "tracing 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2509,10 +2925,10 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2520,17 +2936,33 @@ dependencies = [ "input_buffer 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "utf-8 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "twoway" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "typenum" version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "unicase" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "unicase" version = "2.5.1" @@ -2672,26 +3104,37 @@ dependencies = [ ] [[package]] -name = "warp" -version = "0.1.20" +name = "want" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "headers 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "warp" +version = "0.2.0-alpha.0" +source = "git+https://github.com/seanmonstar/warp.git#71aedeab4a935efa65866d6e635ead3a8ac74d94" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "headers 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "multipart 0.16.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "tungstenite 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", "urlencoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2706,6 +3149,8 @@ version = "0.2.53" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-bindgen-macro 0.2.53 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2723,6 +3168,17 @@ dependencies = [ "wasm-bindgen-shared 0.2.53 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.30 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.53 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.30 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.53" @@ -2903,19 +3359,23 @@ dependencies = [ "checksum arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" "checksum ascii 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" "checksum assert-json-diff 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9881d306dee755eccf052d652b774a6b2861e86b4772f555262130e58e4f81d2" +"checksum async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "c8df72488e87761e772f14ae0c2480396810e51b2c2ade912f97f0f7e5b95e3c" "checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" "checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" "checksum backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)" = "924c76597f0d9ca25d762c25a4d369d51267536465dc5064bdf0eb073ed477ea" "checksum backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" "checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" "checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" "checksum block-padding 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6d4dc3af3ee2e12f3e5d224e5e1e3d73668abbeb69e566d361f7d5563a4fdf09" "checksum bstr 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8d6c2c5b58ab920a4f5aeaaca34b4488074e8cc7596af94e6f8c6ff247c60245" +"checksum buf_redux 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" "checksum bumpalo 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ad807f2fc2bf185eeb98ff3a901bd46dc5ad58163d0fa4577ba0d25674d71708" "checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" "checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" "checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" +"checksum bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "10004c15deb332055f7a4a208190aed362cf9a7c2f6ab70a305fba50e1105f38" "checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" "checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427" "checksum cc 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)" = "0213d356d3c4ea2c18c40b037c3be23cd639825c18f25ee670ac7813beeef99c" @@ -2949,7 +3409,7 @@ dependencies = [ "checksum failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9" "checksum failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08" "checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -"checksum flate2 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)" = "ad3c5233c9a940c8719031b423d7e6c16af66e031cb0420b0896f5245bf181d3" +"checksum flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6bd6d6f4752952feb71363cffc9ebac9411b75b87c6ab6058c40c8900cf43c0f" "checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" "checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" "checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" @@ -2957,22 +3417,37 @@ dependencies = [ "checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" "checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" "checksum futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" +"checksum futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6f16056ecbb57525ff698bb955162d0cd03bee84e6241c27ff75c08d8ca5987" +"checksum futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fcae98ca17d102fd8a3603727b9259fcf7fa4239b603d2142926189bc8999b86" +"checksum futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "79564c427afefab1dfb3298535b21eda083ef7935b4f0ecbfcb121f0aec10866" "checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" -"checksum futures-retry 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d79b1e875b3ab07ef294b5bd43ae509d0ed1be990389003ea5fcdecf2e62ec96" +"checksum futures-executor 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1e274736563f686a837a0568b478bdabfeaec2dca794b5649b04e2fe1627c231" +"checksum futures-io 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e676577d229e70952ab25f3945795ba5b16d63ca794ca9d2c860e5595d20b5ff" +"checksum futures-macro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "52e7c56c15537adb4f76d0b7a76ad131cb4d2f4f32d3b0bcabcbe1c7c5e87764" +"checksum futures-retry 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc9a95ec273db7b9d07559e25f9cd75074fee2f437f1e502b0c3b610d129d554" +"checksum futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "171be33efae63c2d59e6dbba34186fe0d6394fb378069a76dfd80fdcffd43c16" +"checksum futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0bae52d6b29cf440e298856fec3965ee6fa71b06aa7495178615953fd669e5f9" +"checksum futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0d66274fb76985d3c62c886d1da7ac4c0903a8c9f754e8fe0f35a6a6cc39e76" +"checksum futures01 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "7ef8cbbf52909170053540c6c05a62433ddb60662dabee714e2a882caa864f22" "checksum generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" "checksum getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e7db7ca94ed4cd01190ceee0d8a8052f08a247aa1b469a7f68c6a3b71afcf407" "checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" +"checksum h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b9433d71e471c1736fd5a61b671fc0b148d7a2992f666c958d03cd8feb3b88d1" "checksum hdrhistogram 6.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08d331ebcdbca4acbefe5da8c3299b2e246f198a8294cc5163354e743398b89d" -"checksum headers 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "882ca7d8722f33ce2c2db44f95425d6267ed59ca96ce02acbe58320054ceb642" -"checksum headers-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "967131279aaa9f7c20c7205b45a391638a83ab118e6509b2d0ccbe08de044237" +"checksum headers 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c9836ffd533e1fb207cfdb2e357079addbd17ef5c68eea5afe2eece40555b905" +"checksum headers-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" "checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" "checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e" "checksum http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)" = "d7e06e336150b178206af098a055e3621e8336027e2b4d126bda0bc64824baaf" +"checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" "checksum http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" +"checksum http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" "checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" "checksum hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)" = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" +"checksum hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8bf49cfb32edee45d890537d9057d1b02ed55f53b7b6a30bae83a38c9231749e" "checksum hyper-rustls 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "719d85c7df4a7f309a77d145340a063ea929dcb2e025bae46a80345cffec2952" "checksum hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" +"checksum hyper-tls 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab58a31960b2f78c5c24cf255216789863754438a1e48849a956846f899e762e" "checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" "checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" "checksum im 12.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "de38d1511a0ce7677538acb1e31b5df605147c458e061b2cdb89858afb1cd182" @@ -2998,13 +3473,16 @@ dependencies = [ "checksum metrics-observer-prometheus 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90707830292a6223c046773caaa7c1121aa71dca75d156a4672cba8654eb8d2d" "checksum metrics-runtime 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2863204c75646de78a93d961562cebca30608508250da9f98bd0754c6e7a5b4" "checksum metrics-util 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3024c8bfd2e14b14ca48c38a47d01472401e46492d1b9a45c2b68821d6ede88" -"checksum mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "dd1d63acd1b78403cc0c325605908475dd9b9a3acbf65ed8bcab97e27014afcf" +"checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" +"checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +"checksum mime_guess 1.8.7 (registry+https://github.com/rust-lang/crates.io-index)" = "0d977de9ee851a0b16e932979515c0f3da82403183879811bc97d50bd9cc50f7" "checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" "checksum miniz_oxide 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6f3f74f726ae935c3f514300cc6773a0c9492abc5e972d42ba0c0ebb88757625" -"checksum mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)" = "83f51996a3ed004ef184e16818edc51fadffe8e7ca68be67f9dee67d84d0ff23" +"checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" "checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum mockito 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aee38c301104cc75a6628a4360be706fbdf84290c15a120b7e54eca5881c3450" +"checksum multipart 0.16.1 (registry+https://github.com/rust-lang/crates.io-index)" = "136eed74cadb9edd2651ffba732b19a450316b680e4f48d6c79e905799e19d01" "checksum native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b2df1a4c22fd44a62147fd8f13dd0f95c9d8ca7b2610299b2a2f9cf8964274e" "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" "checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" @@ -3023,10 +3501,20 @@ dependencies = [ "checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" "checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +"checksum phf 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)" = "b3da44b85f8e8dfaec21adae67f95d93244b2ecf6ad2a692320598dcc8e6dd18" +"checksum phf_codegen 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)" = "b03e85129e324ad4166b06b2c7491ae27fe3ec353af72e72cd1654c7225d517e" +"checksum phf_generator 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)" = "09364cc93c159b8b06b1f4dd8a4398984503483891b0c26b867cf431fb132662" +"checksum phf_shared 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)" = "234f71a15de2288bcb7e3b6515828d22af7ec8598ee6d24c3b526fa0a80b67a0" +"checksum pin-project 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "94b90146c7216e4cb534069fb91366de4ea0ea353105ee45ed297e2d1619e469" +"checksum pin-project-internal 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "44ca92f893f0656d3cba8158dd0f2b99b94de256a4a54e870bd6922fcc6c8355" +"checksum pin-project-lite 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f0af6cbca0e6e3ce8692ee19fb8d734b641899e07b68eb73e9bbbd32f1703991" +"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" "checksum pkg-config 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "72d5370d90f49f70bd033c3d75e87fc529fbfff9d6f7cccef07d6170079d91ea" "checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" +"checksum proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)" = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" +"checksum proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" "checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" -"checksum publicsuffix 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9bf259a81de2b2eb9850ec990ec78e6a25319715584fd7652b9b26f96fcb1510" +"checksum publicsuffix 1.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3bbaa49075179162b49acac1c6aa45fb4dafb5f13cf6794276d77bc7fd95757b" "checksum quanta 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f7a1905379198075914bc93d32a5465c40474f90a078bb13439cb00c547bcc" "checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" "checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" @@ -3050,18 +3538,21 @@ dependencies = [ "checksum rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123" "checksum rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b" "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -"checksum redis 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9bb1079bc5692c03e1479bb7086ac870ac2aaf7115b5e72d314c01368f1a747e" +"checksum redis 0.13.1-alpha.0 (git+https://github.com/mitsuhiko/redis-rs)" = "" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" "checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd" "checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" "checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" "checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" -"checksum reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)" = "2c2064233e442ce85c77231ebd67d9eca395207dec2127fe0bbedde4bd29a650" +"checksum reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0e798e19e258bf6c30a304622e3e9ac820e483b06a1857a026e1f109b113fe4" +"checksum reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)" = "f88643aea3c1343c804950d7bf983bd2067f5ab59db6d613a08e05572f2714ab" "checksum ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6747f8da1f2b1fabbee1aaa4eb8a11abf9adef0bf58a41cee45db5d59cecdfac" +"checksum runtime 0.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "485bae53d3b7af75e4e8c4881ba084d0f5a88913c8cb7a3572522aea706bca32" "checksum rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" "checksum rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" "checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" +"checksum safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" "checksum same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "585e8ddcedc187886a30fa705c47985c3fa88d06624095856b36ca0b82ff4421" "checksum schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "87f550b06b6cba9c8b8be3ee73f391990116bf527450d2556e9b9ce263b9a021" "checksum scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" @@ -3081,13 +3572,14 @@ dependencies = [ "checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" "checksum sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "23962131a91661d643c98940b20fcaffe62d776a823247be80a48fcb8b6fce68" "checksum sha1 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +"checksum siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac" "checksum sized-chunks 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9d3e7f23bad2d6694e0f46f5e470ec27eb07b8f3e8b309a4b0dc17501928b9f2" "checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" "checksum smallvec 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "cefaa50e76a6f10b86f36e640eb1739eafbd4084865067778463913e43a77ff3" "checksum sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" "checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" "checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" -"checksum stream-cancel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9d62fea0968935ec8eedcf671b2738bf49c58e133db968097c301d32e32eaedf" +"checksum stream-cancel 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de4f382c018868d33660134df97baf0fa8eeb1035d57bed05e6ca338189238f7" "checksum string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" "checksum syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "0e7bedb3320d0f3035594b0b723c8a28d7d336a3eda3881db79e61d676fb644c" "checksum synstructure 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3f085a5855930c0441ca1288cf044ea4aecf4f43a91668abdb870b4ba546a203" @@ -3099,13 +3591,14 @@ dependencies = [ "checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" "checksum tinytemplate 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4574b75faccaacddb9b284faecdf0b544b80b6b294f3d062d325c5726a209c20" "checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" +"checksum tokio 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ffa2fdcfa937b20cb3c822a635ceecd5fc1a27a6a474527e5516aa24b8c8820a" "checksum tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" "checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f" "checksum tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d16217cad7f1b840c5a97dfb3c43b0c871fef423a6e8d2118c604e843662a443" -"checksum tokio-dns-unofficial 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bb9bf62ca2c53bf2f2faec3e48a98b6d8c9577c27011cb0203a4beacdc8ab328" "checksum tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0f27ee0e6db01c5f0b2973824547ce7e637b2ed79b891a9677b0de9bd532b6ac" "checksum tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe6dc22b08d6993916647d108a1a7d15b9cd29c4f4496c62b92c45b5041b7af" "checksum tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5090db468dad16e1a7a54c8c67280c5e4b544f3d3e018f0b913b400261f85926" +"checksum tokio-macros 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "50a61f268a3db2acee8dcab514efc813dc6dbe8a00e86076f935f94304b59a7a" "checksum tokio-reactor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "c56391be9805bc80163151c0b9e5164ee64f4b0200962c346fea12773158f22d" "checksum tokio-retry 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c03755b956458582182941061def32b8123a26c98b08fc6ddcf49ae89d18f33" "checksum tokio-rustls 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1df2fa53ac211c136832f530ccb081af9af891af22d685a9493e232c7a359bc2" @@ -3113,21 +3606,25 @@ dependencies = [ "checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119" "checksum tokio-threadpool 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2bd2c6a3885302581f4401c82af70d792bb9df1700e7437b0aeb4ada94d5388c" "checksum tokio-timer 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "f2106812d500ed25a4f38235b9cae8f78a09edf43203e16e59c3b769a342a60e" -"checksum tokio-tls 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "354b8cd83825b3c20217a9dc174d6a0c67441a2fae5c41bcb1ea6679f6ae0f7c" -"checksum tokio-tungstenite 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "38f95da5281a1a52e72fa3657e571279bcc2b163ba2897ed8eaa34ef97f24fda" +"checksum tokio-tls 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7bde02a3a5291395f59b06ec6945a3077602fac2b07eeeaf0dee2122f3619828" +"checksum tokio-tungstenite 0.10.0 (git+https://github.com/snapview/tokio-tungstenite)" = "" "checksum tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f02298505547f73e60f568359ef0d016d5acd6e830ab9bc7c4a5b3403440121b" "checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" +"checksum tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "571da51182ec208780505a32528fc5512a8fe1443ab960b3f2f3ef093cd16930" "checksum toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" +"checksum tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" "checksum tracing 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ff4e4f59e752cb3beb5b61c6d5e11191c7946231ba84faec2902c9efdd8691c5" "checksum tracing-attributes 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a4263b12c3d3c403274493eb805966093b53214124796552d674ca1dd5d27c2b" "checksum tracing-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bc913647c520c959b6d21e35ed8fa6984971deca9f0a2fcb8c51207e0c56af1d" -"checksum tracing-futures 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "85a260ae9e7fdf7402955e761af889eb56203284295c0482b1fe33641cb948f0" +"checksum tracing-futures 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "107ae59580d2a1d994b6b965b16fe94c969fe86d3f7fd2572a1ee243bcaf7f09" "checksum tracing-log 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" "checksum tracing-subscriber 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "192ca16595cdd0661ce319e8eede9c975f227cdaabc4faaefdc256f43d852e45" "checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" "checksum try_from 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "283d3b89e1368717881a9d51dad843cc435380d8109c9e47d38780a324698d8b" -"checksum tungstenite 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "577caf571708961603baf59d2e148d12931e0da2e4bb6c5b471dd4a524fef3aa" +"checksum tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8a0c2bd5aeb7dcd2bb32e472c8872759308495e5eccc942e929a513cd8d36110" +"checksum twoway 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" "checksum typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" +"checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" "checksum unicase 2.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2e2e6bd1e59e56598518beb94fd6db628ded570326f0a98c679a304bd9f00150" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" "checksum unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "141339a08b982d942be2ca06ff8b076563cbe223d1befd5450716790d44e2426" @@ -3148,10 +3645,12 @@ dependencies = [ "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9658c94fa8b940eab2250bd5a457f9c48b748420d71293b165c8cdbe2f55f71e" "checksum want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" -"checksum warp 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)" = "3921463c44f680d24f1273ea55efd985f31206a22a02dee207a2ec72684285ca" +"checksum want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +"checksum warp 0.2.0-alpha.0 (git+https://github.com/seanmonstar/warp.git)" = "" "checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d" "checksum wasm-bindgen 0.2.53 (registry+https://github.com/rust-lang/crates.io-index)" = "4c29d57d5c3b3bc53bbe35c5a4f4a0df994d870b7d3cb0ad1c2065e21822ae41" "checksum wasm-bindgen-backend 0.2.53 (registry+https://github.com/rust-lang/crates.io-index)" = "aa2868fa93e5bf36a9364d1277a0f97392748a8217d9aa0ec3f1cdbdf7ad1a60" +"checksum wasm-bindgen-futures 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7db7b3a0ec2120b67321367eda2f8f4f65ff2482c7560dc7c7a9b35aab8d06" "checksum wasm-bindgen-macro 0.2.53 (registry+https://github.com/rust-lang/crates.io-index)" = "41e80594782a241bf3d92ee5d1247b8fb496250a8a2ff1e136942d433fbbce14" "checksum wasm-bindgen-macro-support 0.2.53 (registry+https://github.com/rust-lang/crates.io-index)" = "74b9950355b1d92ca09de0984bdd4de7edda5e8af12daf0c052a0a075e8c9157" "checksum wasm-bindgen-shared 0.2.53 (registry+https://github.com/rust-lang/crates.io-index)" = "7493fe67ad99672ef3de3e6ba513fb03db276358c8cc9588ce5a008c6e48ad68" diff --git a/crates/ilp-node/Cargo.toml b/crates/ilp-node/Cargo.toml index 695170d77..00da6dd11 100644 --- a/crates/ilp-node/Cargo.toml +++ b/crates/ilp-node/Cargo.toml @@ -24,9 +24,10 @@ required-features = ["redis"] [dependencies] bytes = { version = "0.4.12", default-features = false } +bytes05 = { package = "bytes", version = "0.5", default-features = false } clap = { version = "2.33.0", default-features = false } config = { version = "0.9.3", default-features = false, features = ["json", "toml", "yaml"] } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3.1", default-features = false, features = ["compat"] } hex = { version = "0.4.0", default-features = false } interledger = { path = "../interledger", version = "^0.6.0", default-features = false, features = ["node"] } lazy_static = { version = "1.4.0", default-features = false } @@ -34,16 +35,18 @@ metrics = { version = "0.12.0", default-features = false, features = ["std"] } metrics-core = { version = "0.5.1", default-features = false } metrics-runtime = { version = "0.12.0", default-features = false, features = ["metrics-observer-prometheus"] } num-bigint = { version = "0.2.3", default-features = false, features = ["std"] } -redis_crate = { package = "redis", version = "0.13.0", default-features = false, features = ["executor"], optional = true } +# redis_crate = { package = "redis", version = "0.13.0", default-features = false, features = ["executor"], optional = true } +redis_crate = { package = "redis", git = "https://github.com/mitsuhiko/redis-rs", optional = true, features = ["tokio-rt-core"] } ring = { version = "0.16.9", default-features = false } serde = { version = "1.0.101", default-features = false } -tokio = { version = "0.1.22", default-features = false } +tokio = { version = "0.2.8", features = ["rt-core", "macros", "time"] } tracing = { version = "0.1.9", default-features = true, features = ["log"] } -tracing-futures = { version = "0.1.1", default-features = true, features = ["tokio", "futures-01"] } +tracing-futures = { version = "0.2", default-features = true, features = ["tokio", "futures-03"] } tracing-subscriber = { version = "0.1.6", default-features = true, features = ["tracing-log"] } url = { version = "2.1.0", default-features = false } libc = { version = "0.2.62", default-features = false } -warp = { version = "0.1.20", default-features = false, features = ["websocket"] } +# warp = { version = "0.1.20", default-features = false, features = ["websocket"] } +warp = { git = "https://github.com/seanmonstar/warp", default-features = false } secrecy = { version = "0.5.1", default-features = false, features = ["alloc", "serde"] } uuid = { version = "0.8.1", default-features = false} @@ -51,7 +54,7 @@ uuid = { version = "0.8.1", default-features = false} base64 = { version = "0.10.1", default-features = false, optional = true } chrono = { version = "0.4.9", default-features = false, features = [], optional = true} parking_lot = { version = "0.9.0", default-features = false, optional = true } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"], optional = true } +reqwest = { version = "0.10.0", default-features = false, features = ["default-tls", "json"], optional = true } serde_json = { version = "1.0.41", default-features = false, optional = true } yup-oauth2 = { version = "3.1.1", default-features = false, optional = true } @@ -60,7 +63,7 @@ approx = { version = "0.3.2", default-features = false } base64 = { version = "0.10.1", default-features = false } net2 = { version = "0.2.33", default-features = false } rand = { version = "0.7.2", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10.0", default-features = false, features = ["default-tls", "json"] } serde_json = { version = "1.0.41", default-features = false } tokio-retry = { version = "0.2.0", default-features = false } diff --git a/crates/ilp-node/src/google_pubsub.rs b/crates/ilp-node/src/google_pubsub.rs index 3f667a49e..f96094b4a 100644 --- a/crates/ilp-node/src/google_pubsub.rs +++ b/crates/ilp-node/src/google_pubsub.rs @@ -1,15 +1,18 @@ +#[cfg(feature = "google_pubsub")] use base64; use chrono::Utc; use futures::{ + compat::Future01CompatExt, future::{ok, Either}, - Future, + Future, TryFutureExt, }; use interledger::{ + ccp::CcpRoutingAccount, packet::Address, - service::{Account, BoxedIlpFuture, OutgoingRequest, OutgoingService, Username}, + service::{Account, IlpResult, OutgoingRequest, OutgoingService, Username}, }; use parking_lot::Mutex; -use reqwest::r#async::Client; +use reqwest::Client; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, sync::Arc}; use tokio::spawn; @@ -58,16 +61,17 @@ struct PacketRecord { timestamp: String, } +use std::pin::Pin; /// Create an Interledger service wrapper that publishes records /// of fulfilled packets to Google Cloud PubSub. /// /// This is an experimental feature that may be removed in the future. -pub fn create_google_pubsub_wrapper< +pub async fn create_google_pubsub_wrapper< A: Account + 'static, O: OutgoingService + Clone + Send + 'static, >( config: Option, -) -> impl Fn(OutgoingRequest, O) -> BoxedIlpFuture + Clone { +) -> impl Fn(OutgoingRequest, O) -> Pin>> + Clone { // If Google credentials were passed in, create an HTTP client and // OAuth2 client that will automatically fetch and cache access tokens let utilities = if let Some(config) = config { @@ -91,10 +95,16 @@ pub fn create_google_pubsub_wrapper< None }; - move |request: OutgoingRequest, mut next: O| -> BoxedIlpFuture { + move |request: OutgoingRequest, mut next: O| -> Pin>> { + let mut next_clone = next.clone(); + let mut next_clone2 = next.clone(); match &utilities { // Just pass the request on if no Google Pubsub details were configured - None => Box::new(next.send_request(request)), + // Due to using async_trait this becomes a Box::pin! + None => Box::pin(async move { + let fulfill = next_clone.send_request(request).await?; + Ok(fulfill) + }), Some((client, api_endpoint, token_fetcher)) => { let prev_hop_account = request.from.username().clone(); let prev_hop_asset_code = request.from.asset_code().to_string(); @@ -108,78 +118,80 @@ pub fn create_google_pubsub_wrapper< let client = client.clone(); let api_endpoint = api_endpoint.clone(); let token_fetcher = token_fetcher.clone(); - - Box::new(next.send_request(request).map(move |fulfill| { + Box::pin(async move { // Only fulfilled packets are published for now + let fulfill = next_clone2.send_request(request).await?; let fulfillment = base64::encode(fulfill.fulfillment()); - let get_token_future = token_fetcher.lock() + let get_token_future = token_fetcher + .lock() .token(TOKEN_SCOPES) + .compat() + .map_ok(|token: yup_oauth2::Token| token.access_token) .map_err(|err| { error!("Error fetching OAuth token for Google PubSub: {:?}", err) }); + // Spawn a task to submit the packet to PubSub so we // don't block returning the fulfillment // Note this means that if there is a problem submitting the // packet record to PubSub, it will only log an error - spawn( - get_token_future - .and_then(move |token| { - let record = PacketRecord { - prev_hop_account, - prev_hop_asset_code, - prev_hop_asset_scale, - prev_hop_amount, - next_hop_account, - next_hop_asset_code, - next_hop_asset_scale, - next_hop_amount, - destination_ilp_address, - fulfillment, - timestamp: Utc::now().to_rfc3339(), - }; - let data = base64::encode(&serde_json::to_string(&record).unwrap()); + spawn(async move { + let token = get_token_future.await?; + + let record = PacketRecord { + prev_hop_account, + prev_hop_asset_code, + prev_hop_asset_scale, + prev_hop_amount, + next_hop_account, + next_hop_asset_code, + next_hop_asset_scale, + next_hop_amount, + destination_ilp_address, + fulfillment, + timestamp: Utc::now().to_rfc3339(), + }; + let data = base64::encode(&serde_json::to_string(&record).unwrap()); + + let res = client + .post(api_endpoint.as_str()) + .bearer_auth(token) + .json(&PubsubRequest { + messages: vec![PubsubMessage { + // TODO should there be an ID? + message_id: None, + data: Some(data), + attributes: None, + publish_time: None, + }], + }) + .send() + .map_err(|err| { + error!("Error sending packet details to Google PubSub: {:?}", err) + }) + .await?; + + if res.status().is_success() { + return Ok(()); + } else { + let status = res.status(); + let body = res + .text() + .map_err(|err| error!("Error getting response body: {:?}", err)) + .await?; + error!( + %status, + "Error sending packet details to Google PubSub: {}", + body + ); + } + + Ok::<(), ()>(()) + }); - client - .post(api_endpoint.as_str()) - .bearer_auth(token.access_token) - .json(&PubsubRequest { - messages: vec![PubsubMessage { - // TODO should there be an ID? - message_id: None, - data: Some(data), - attributes: None, - publish_time: None, - }], - }) - .send() - .map_err(|err| { - error!( - "Error sending packet details to Google PubSub: {:?}", - err - ) - }) - .and_then(|mut res| { - if res.status().is_success() { - Either::A(ok(())) - } else { - let status = res.status(); - Either::B(res.text() - .map_err(|err| error!("Error getting response body: {:?}", err)) - .and_then(move |body| { - error!( - %status, - "Error sending packet details to Google PubSub: {}", - body - ); - Ok(()) - })) - } - }) - }), - ); - fulfill - })) + Ok(fulfill) + }) } } } diff --git a/crates/ilp-node/src/lib.rs b/crates/ilp-node/src/lib.rs index 1086e4991..85434c41f 100644 --- a/crates/ilp-node/src/lib.rs +++ b/crates/ilp-node/src/lib.rs @@ -1,15 +1,12 @@ -#![type_length_limit = "1152909"] +#![type_length_limit = "6000000"] -mod metrics; +// mod metrics; mod node; -mod trace; +// mod trace; -#[cfg(feature = "google-pubsub")] -mod google_pubsub; +// #[cfg(feature = "google-pubsub")] +// mod google_pubsub; #[cfg(feature = "redis")] mod redis_store; pub use node::*; -#[allow(deprecated)] -#[cfg(feature = "redis")] -pub use redis_store::insert_account_with_redis_store; diff --git a/crates/ilp-node/src/main.rs b/crates/ilp-node/src/main.rs index eb4921fa4..3534a8ee4 100644 --- a/crates/ilp-node/src/main.rs +++ b/crates/ilp-node/src/main.rs @@ -1,4 +1,4 @@ -#![type_length_limit = "1152909"] +#![type_length_limit = "6000000"] mod metrics; mod node; @@ -24,7 +24,8 @@ use tracing_subscriber::{ fmt::{time::ChronoUtc, Subscriber}, }; -pub fn main() { +#[tokio::main] +async fn main() { Subscriber::builder() .with_timer(ChronoUtc::rfc3339()) .with_env_filter(EnvFilter::from_default_env()) @@ -143,7 +144,9 @@ pub fn main() { } let matches = app.clone().get_matches(); merge_args(&mut config, &matches); - config.try_into::().unwrap().run(); + + let node = config.try_into::().unwrap(); + node.serve().await; } // returns (subcommand paths, config path) diff --git a/crates/ilp-node/src/metrics.rs b/crates/ilp-node/src/metrics.rs index 35ba411ab..e12d10f63 100644 --- a/crates/ilp-node/src/metrics.rs +++ b/crates/ilp-node/src/metrics.rs @@ -2,15 +2,17 @@ use futures::Future; use interledger::{ ccp::CcpRoutingAccount, packet::{Fulfill, Reject}, - service::{Account, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService}, + service::{ + Account, IlpResult, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService, + }, }; use metrics::{self, labels, recorder, Key}; use std::time::Instant; -pub fn incoming_metrics( +pub async fn incoming_metrics( request: IncomingRequest, mut next: impl IncomingService, -) -> impl Future { +) -> IlpResult { let labels = labels!( "from_asset_code" => request.from.asset_code().to_string(), "from_routing_relation" => request.from.routing_relation().to_string(), @@ -21,30 +23,30 @@ pub fn incoming_metrics( ); let start_time = Instant::now(); - next.handle_request(request).then(move |result| { - if result.is_ok() { - recorder().increment_counter( - Key::from_name_and_labels("requests.incoming.fulfill", labels.clone()), - 1, - ); - } else { - recorder().increment_counter( - Key::from_name_and_labels("requests.incoming.reject", labels.clone()), - 1, - ); - } - recorder().record_histogram( - Key::from_name_and_labels("requests.incoming.duration", labels), - (Instant::now() - start_time).as_nanos() as u64, + let result = next.handle_request(request).await; + if result.is_ok() { + recorder().increment_counter( + Key::from_name_and_labels("requests.incoming.fulfill", labels.clone()), + 1, ); - result - }) + } else { + recorder().increment_counter( + Key::from_name_and_labels("requests.incoming.reject", labels.clone()), + 1, + ); + } + + recorder().record_histogram( + Key::from_name_and_labels("requests.incoming.duration", labels), + (Instant::now() - start_time).as_nanos() as u64, + ); + result } -pub fn outgoing_metrics( +pub async fn outgoing_metrics( request: OutgoingRequest, mut next: impl OutgoingService, -) -> impl Future { +) -> IlpResult { let labels = labels!( "from_asset_code" => request.from.asset_code().to_string(), "to_asset_code" => request.to.asset_code().to_string(), @@ -60,23 +62,23 @@ pub fn outgoing_metrics( ); let start_time = Instant::now(); - next.send_request(request).then(move |result| { - if result.is_ok() { - recorder().increment_counter( - Key::from_name_and_labels("requests.outgoing.fulfill", labels.clone()), - 1, - ); - } else { - recorder().increment_counter( - Key::from_name_and_labels("requests.outgoing.reject", labels.clone()), - 1, - ); - } - - recorder().record_histogram( - Key::from_name_and_labels("requests.outgoing.duration", labels.clone()), - (Instant::now() - start_time).as_nanos() as u64, + let result = next.send_request(request).await; + if result.is_ok() { + recorder().increment_counter( + Key::from_name_and_labels("requests.outgoing.fulfill", labels.clone()), + 1, ); - result - }) + } else { + recorder().increment_counter( + Key::from_name_and_labels("requests.outgoing.reject", labels.clone()), + 1, + ); + } + + recorder().record_histogram( + Key::from_name_and_labels("requests.outgoing.duration", labels.clone()), + (Instant::now() - start_time).as_nanos() as u64, + ); + + result } diff --git a/crates/ilp-node/src/node.rs b/crates/ilp-node/src/node.rs index c8275c0aa..47ae34609 100644 --- a/crates/ilp-node/src/node.rs +++ b/crates/ilp-node/src/node.rs @@ -1,9 +1,9 @@ -use crate::metrics::{incoming_metrics, outgoing_metrics}; -use crate::trace::{trace_forwarding, trace_incoming, trace_outgoing}; +// use crate::metrics::{incoming_metrics, outgoing_metrics}; +// use crate::trace::{trace_forwarding, trace_incoming, trace_outgoing}; use bytes::Bytes; use futures::{ future::{err, Either}, - Future, + Future, TryFutureExt, }; use hex::FromHex; use interledger::{ @@ -52,8 +52,8 @@ use warp::{ Filter, }; -#[cfg(feature = "google-pubsub")] -use crate::google_pubsub::{create_google_pubsub_wrapper, PubsubConfig}; +// #[cfg(feature = "google-pubsub")] +// use crate::google_pubsub::{create_google_pubsub_wrapper, PubsubConfig}; #[cfg(feature = "redis")] use crate::redis_store::*; #[cfg(feature = "balance-tracking")] @@ -228,8 +228,8 @@ pub struct InterledgerNode { /// If this configuration is not provided, the node will not collect metrics. #[serde(default)] pub prometheus: Option, - #[cfg(feature = "google-pubsub")] - pub google_pubsub: Option, + // #[cfg(feature = "google-pubsub")] + // pub google_pubsub: Option, } impl InterledgerNode { @@ -239,19 +239,21 @@ impl InterledgerNode { /// also run the Prometheus metrics server on the given address. // TODO when a BTP connection is made, insert a outgoing HTTP entry into the Store to tell other // connector instances to forward packets for that account to us - pub fn serve(self) -> impl Future { - if self.prometheus.is_some() { - Either::A( - self.serve_prometheus() - .join(self.serve_node()) - .and_then(|_| Ok(())), - ) - } else { - Either::B(self.serve_node()) - } + pub async fn serve(self) -> Result<(), ()> { + // if self.prometheus.is_some() { + // let res = + // futures::future::join(self.clone().serve_prometheus(), self.serve_node()).await; + // if res.0.is_ok() || res.1.is_ok() { + // Ok(()) + // } else { + // Err(()) + // } + // } else { + self.serve_node().await + // } } - fn serve_node(self) -> Box + Send + 'static> { + async fn serve_node(self) -> Result<(), ()> { let ilp_address = if let Some(address) = &self.ilp_address { address.clone() } else { @@ -266,26 +268,22 @@ impl InterledgerNode { "The string '{}' could not be parsed as a URL: {}", &self.database_url, e ); - return Box::new(err(())); + return Err(()); } }; match database_url.scheme() { #[cfg(feature = "redis")] - "redis" | "redis+unix" => Box::new(serve_redis_node(self, ilp_address)), + "redis" | "redis+unix" => serve_redis_node(self, ilp_address).await, other => { error!("unsupported data source scheme: {}", other); - Box::new(err(())) + Err(()) } } } #[allow(clippy::cognitive_complexity)] - pub(crate) fn chain_services( - self, - store: S, - ilp_address: Address, - ) -> impl Future + pub(crate) async fn chain_services(self, store: S, ilp_address: Address) -> Result<(), ()> where S: NodeStore + BtpStore @@ -324,260 +322,260 @@ impl InterledgerNode { let exchange_rate_poll_interval = self.exchange_rate.poll_interval; let exchange_rate_poll_failure_tolerance = self.exchange_rate.poll_failure_tolerance; let exchange_rate_spread = self.exchange_rate.spread; - #[cfg(feature = "google-pubsub")] - let google_pubsub = self.google_pubsub.clone(); - - store.clone().get_btp_outgoing_accounts() - .map_err(|_| error!(target: "interledger-node", "Error getting accounts")) - .and_then(move |btp_accounts| { - let outgoing_service = - outgoing_service_fn(move |request: OutgoingRequest| { - // Don't log anything for failed route updates sent to child accounts - // because there's a good chance they'll be offline - if request.prepare.destination().scheme() != "peer" - || request.to.routing_relation() != RoutingRelation::Child { - error!(target: "interledger-node", "No route found for outgoing request"); - } - Err(RejectBuilder { - code: ErrorCode::F02_UNREACHABLE, - message: &format!( - // TODO we might not want to expose the internal account ID in the error - "No outgoing route for account: {} (ILP address of the Prepare packet: {})", - request.to.id(), - request.prepare.destination(), - ) - .as_bytes(), - triggered_by: Some(&ilp_address_clone), - data: &[], - } - .build()) - }); - - // Connect to all of the accounts that have outgoing ilp_over_btp_urls configured - // but don't fail if we are unable to connect - // TODO try reconnecting to those accounts later - connect_client(ilp_address_clone2.clone(), btp_accounts, false, outgoing_service) - .and_then( - move |btp_client_service| { - let btp_server_service = BtpOutgoingService::new(ilp_address_clone2, btp_client_service.clone()); - let btp_server_service_clone = btp_server_service.clone(); - let btp = btp_client_service.clone(); - - // The BTP service is both an Incoming and Outgoing one so we pass it first as the Outgoing - // service to others like the router and then call handle_incoming on it to set up the incoming handler - let outgoing_service = btp_server_service.clone(); - let outgoing_service = HttpClientService::new( - store.clone(), - outgoing_service, - ); - - let outgoing_service = outgoing_service.wrap(outgoing_metrics); - - // Note: the expiry shortener must come after the Validator so that the expiry duration - // is shortened before we check whether there is enough time left - let outgoing_service = ValidatorService::outgoing( - store.clone(), - outgoing_service - ); - let outgoing_service = - ExpiryShortenerService::new(outgoing_service); - let outgoing_service = StreamReceiverService::new( - secret_seed.clone(), - store.clone(), - outgoing_service, - ); - #[cfg(feature = "balance-tracking")] - let outgoing_service = BalanceService::new( - store.clone(), - outgoing_service, - ); - let outgoing_service = ExchangeRateService::new( - exchange_rate_spread, - store.clone(), - outgoing_service, - ); - - #[cfg(feature = "google-pubsub")] - let outgoing_service = outgoing_service.wrap(create_google_pubsub_wrapper(google_pubsub)); - - // Set up the Router and Routing Manager - let incoming_service = Router::new( - store.clone(), - // Add tracing to add the outgoing request details to the incoming span - outgoing_service.clone().wrap(trace_forwarding), - ); - - // Add tracing to track the outgoing request details - let outgoing_service = outgoing_service.wrap(trace_outgoing).in_current_span(); - - let mut ccp_builder = CcpRouteManagerBuilder::new( - ilp_address.clone(), - store.clone(), - outgoing_service.clone(), - incoming_service, - ); - ccp_builder.ilp_address(ilp_address.clone()); - if let Some(ms) = route_broadcast_interval { - ccp_builder.broadcast_interval(ms); - } - let incoming_service = ccp_builder.to_service(); - let incoming_service = EchoService::new(store.clone(), incoming_service); - let incoming_service = SettlementMessageService::new(incoming_service); - let incoming_service = IldcpService::new(incoming_service); - let incoming_service = - MaxPacketAmountService::new( - store.clone(), - incoming_service - ); - let incoming_service = - ValidatorService::incoming(store.clone(), incoming_service); - let incoming_service = RateLimitService::new( - store.clone(), - incoming_service, - ); - - // Add tracing to track the incoming request details - let incoming_service = incoming_service.wrap(trace_incoming).in_current_span(); - - let incoming_service = incoming_service.wrap(incoming_metrics); - - // Handle incoming packets sent via BTP - btp_server_service.handle_incoming(incoming_service.clone().wrap(|request, mut next| { - let btp = debug_span!(target: "interledger-node", "btp"); - let _btp_scope = btp.enter(); - next.handle_request(request).in_current_span() - }).in_current_span()); - btp_client_service.handle_incoming(incoming_service.clone().wrap(|request, mut next| { - let btp = debug_span!(target: "interledger-node", "btp"); - let _btp_scope = btp.enter(); - next.handle_request(request).in_current_span() - }).in_current_span()); - - // Node HTTP API - let mut api = NodeApi::new( - secret_seed, - admin_auth_token, - store.clone(), - incoming_service.clone().wrap(|request, mut next| { - let api = debug_span!(target: "interledger-node", "api"); - let _api_scope = api.enter(); - next.handle_request(request).in_current_span() - }).in_current_span(), - outgoing_service.clone(), - btp.clone(), - ); - if let Some(username) = default_spsp_account { - api.default_spsp_account(username); + // #[cfg(feature = "google-pubsub")] + // let google_pubsub = self.google_pubsub.clone(); + + let btp_accounts = store + .get_btp_outgoing_accounts() + .map_err(|_| error!(target: "interledger-node", "Error getting accounts")) + .await?; + + let outgoing_service = outgoing_service_fn(move |request: OutgoingRequest| { + // Don't log anything for failed route updates sent to child accounts + // because there's a good chance they'll be offline + if request.prepare.destination().scheme() != "peer" + || request.to.routing_relation() != RoutingRelation::Child + { + error!(target: "interledger-node", "No route found for outgoing request"); + } + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: &format!( + // TODO we might not want to expose the internal account ID in the error + "No outgoing route for account: {} (ILP address of the Prepare packet: {})", + request.to.id(), + request.prepare.destination(), + ) + .as_bytes(), + triggered_by: Some(&ilp_address_clone), + data: &[], + } + .build()) + }); + + // Connect to all of the accounts that have outgoing ilp_over_btp_urls configured + // but don't fail if we are unable to connect + // TODO try reconnecting to those accounts later + let btp_client_service = connect_client( + ilp_address_clone2.clone(), + btp_accounts, + false, + outgoing_service, + ) + .await?; + let btp_server_service = + BtpOutgoingService::new(ilp_address_clone2, btp_client_service.clone()); + let btp_server_service_clone = btp_server_service.clone(); + let btp = btp_client_service.clone(); + + // The BTP service is both an Incoming and Outgoing one so we pass it first as the Outgoing + // service to others like the router and then call handle_incoming on it to set up the incoming handler + let outgoing_service = btp_server_service.clone(); + let outgoing_service = HttpClientService::new(store.clone(), outgoing_service); + + // let outgoing_service = outgoing_service.wrap(outgoing_metrics); + + // Note: the expiry shortener must come after the Validator so that the expiry duration + // is shortened before we check whether there is enough time left + let outgoing_service = ValidatorService::outgoing(store.clone(), outgoing_service); + let outgoing_service = ExpiryShortenerService::new(outgoing_service); + let outgoing_service = + StreamReceiverService::new(secret_seed.clone(), store.clone(), outgoing_service); + #[cfg(feature = "balance-tracking")] + let outgoing_service = BalanceService::new(store.clone(), outgoing_service); + let outgoing_service = + ExchangeRateService::new(exchange_rate_spread, store.clone(), outgoing_service); + + // TODO: Why does this cause problems? + // #[cfg(feature = "google-pubsub")] + // let outgoing_service = outgoing_service.wrap(create_google_pubsub_wrapper(google_pubsub)); + + // Set up the Router and Routing Manager + let incoming_service = Router::new( + store.clone(), + // Add tracing to add the outgoing request details to the incoming span + outgoing_service.clone(), // .wrap(trace_forwarding), + ); + + // Add tracing to track the outgoing request details + // let outgoing_service = outgoing_service.wrap(trace_outgoing).in_current_span(); + + let mut ccp_builder = CcpRouteManagerBuilder::new( + ilp_address.clone(), + store.clone(), + outgoing_service.clone(), + incoming_service, + ); + ccp_builder.ilp_address(ilp_address.clone()); + if let Some(ms) = route_broadcast_interval { + ccp_builder.broadcast_interval(ms); + } + + let incoming_service = ccp_builder.to_service(); + let incoming_service = EchoService::new(store.clone(), incoming_service); + let incoming_service = SettlementMessageService::new(incoming_service); + let incoming_service = IldcpService::new(incoming_service); + let incoming_service = MaxPacketAmountService::new(store.clone(), incoming_service); + let incoming_service = ValidatorService::incoming(store.clone(), incoming_service); + let incoming_service = RateLimitService::new(store.clone(), incoming_service); + + // Add tracing to track the incoming request details + // TODO: Re-enable metrics + // let incoming_service = incoming_service.wrap(trace_incoming).in_current_span(); + // let incoming_service = incoming_service.wrap(incoming_metrics); + + // Handle incoming packets sent via BTP + btp_server_service + .handle_incoming( + incoming_service + .clone() + .wrap(|request, mut next| { + async move { + let btp = debug_span!(target: "interledger-node", "btp"); + let _btp_scope = btp.enter(); + next.handle_request(request).in_current_span().await + } + }) + .in_current_span(), + ) + .await; + + btp_client_service + .handle_incoming( + incoming_service + .clone() + .wrap(|request, mut next| { + async move { + let btp = debug_span!(target: "interledger-node", "btp"); + let _btp_scope = btp.enter(); + next.handle_request(request).in_current_span().await + } + }) + .in_current_span(), + ) + .await; + + // Node HTTP API + let mut api = NodeApi::new( + secret_seed, + admin_auth_token, + store.clone(), + incoming_service + .clone() + .wrap(|request, mut next| { + async move { + let api = debug_span!(target: "interledger-node", "api"); + let _api_scope = api.enter(); + next.handle_request(request).in_current_span().await } - api.node_version(env!("CARGO_PKG_VERSION").to_string()); - // add an API of ILP over HTTP and add rejection handler - let api = api.into_warp_filter() - .or(IlpOverHttpServer::new(incoming_service.clone().wrap(|request, mut next| { + }) + .in_current_span(), + outgoing_service.clone(), + btp.clone(), // btp client service! + ); + if let Some(username) = default_spsp_account { + api.default_spsp_account(username); + } + api.node_version(env!("CARGO_PKG_VERSION").to_string()); + + // add an API of ILP over HTTP and add rejection handler + let api = api + .into_warp_filter() + .or(IlpOverHttpServer::new( + incoming_service + .clone() + .wrap(|request, mut next| { + async move { let http = debug_span!(target: "interledger-node", "http"); let _http_scope = http.enter(); - next.handle_request(request).in_current_span() - }).in_current_span(), store.clone()).as_filter()) - .or(btp_service_as_filter(btp_server_service_clone, store.clone())) - .recover(default_rejection_handler) - .with(warp::log("interledger-api")).boxed(); - - info!(target: "interledger-node", "Interledger.rs node HTTP API listening on: {}", http_bind_address); - spawn(warp::serve(api).bind(http_bind_address)); - - // Settlement API - let settlement_api = create_settlements_filter( - store.clone(), - outgoing_service.clone(), - ); - info!(target: "interledger-node", "Settlement API listening on: {}", settlement_api_bind_address); - spawn(warp::serve(settlement_api).bind(settlement_api_bind_address)); - - // Exchange Rate Polling - if let Some(provider) = exchange_rate_provider { - let exchange_rate_fetcher = ExchangeRateFetcher::new(provider, exchange_rate_poll_failure_tolerance, store.clone()); - exchange_rate_fetcher.spawn_interval(Duration::from_millis(exchange_rate_poll_interval)); - } else { - debug!(target: "interledger-node", "Not using exchange rate provider. Rates must be set via the HTTP API"); - } - - Ok(()) - }, + next.handle_request(request).in_current_span().await + } + }) + .in_current_span(), + store.clone(), ) - }) - .in_current_span() - } - - /// Starts a Prometheus metrics server that will listen on the configured address. - /// - /// # Errors - /// This will fail if another Prometheus server is already running in this - /// process or on the configured port. - #[allow(clippy::cognitive_complexity)] - fn serve_prometheus(&self) -> impl Future { - Box::new(if let Some(ref prometheus) = self.prometheus { - // Set up the metrics collector - let receiver = metrics_runtime::Builder::default() - .histogram( - Duration::from_millis(prometheus.histogram_window), - Duration::from_millis(prometheus.histogram_granularity), - ) - .build() - .expect("Failed to create metrics Receiver"); - let controller = receiver.controller(); - // Try installing the global recorder - match metrics::set_boxed_recorder(Box::new(receiver)) { - Ok(_) => { - let observer = - Arc::new(metrics_runtime::observers::PrometheusBuilder::default()); - - let filter = warp::get2().and(warp::path::end()).map(move || { - let mut observer = observer.build(); - controller.observe(&mut observer); - let prometheus_response = observer.drain(); - Response::builder() - .status(StatusCode::OK) - .header("Content-Type", "text/plain; version=0.0.4") - .body(prometheus_response) - }); - - info!(target: "interledger-node", - "Prometheus metrics server listening on: {}", - prometheus.bind_address - ); - Either::A( - warp::serve(filter) - .bind(prometheus.bind_address) - .map_err(|_| { - error!(target: "interledger-node", "Error binding Prometheus server to the configured address") - }), - ) - } - Err(e) => { - error!(target: "interledger-node", "Error installing global metrics recorder (this is likely caused by trying to run two nodes with Prometheus metrics in the same process): {:?}", e); - Either::B(err(())) - } - } + .as_filter()) + .or(btp_service_as_filter( + btp_server_service_clone, + store.clone(), + )) + .recover(default_rejection_handler) + .with(warp::log("interledger-api")) + .boxed(); + + info!(target: "interledger-node", "Interledger.rs node HTTP API listening on: {}", http_bind_address); + spawn(warp::serve(api).bind(http_bind_address)); + + // Settlement API + let settlement_api = create_settlements_filter(store.clone(), outgoing_service.clone()); + info!(target: "interledger-node", "Settlement API listening on: {}", settlement_api_bind_address); + spawn(warp::serve(settlement_api).bind(settlement_api_bind_address)); + + // Exchange Rate Polling + if let Some(provider) = exchange_rate_provider { + let exchange_rate_fetcher = ExchangeRateFetcher::new( + provider, + exchange_rate_poll_failure_tolerance, + store.clone(), + ); + // This function does not compile on 1.39 for some reason. + exchange_rate_fetcher + .spawn_interval(Duration::from_millis(exchange_rate_poll_interval)); } else { - error!(target: "interledger-node", "No prometheus configuration provided"); - Either::B(err(())) - }) - } + debug!(target: "interledger-node", "Not using exchange rate provider. Rates must be set via the HTTP API"); + } - /// Run the node on the default Tokio runtime - pub fn run(self) { - tokio_run(self.serve()); + Ok(()) } -} -#[doc(hidden)] -pub fn tokio_run(fut: impl Future + Send + 'static) { - let mut runtime = tokio::runtime::Builder::new() - // Don't swallow panics - .panic_handler(|err| std::panic::resume_unwind(err)) - .name_prefix("interledger-rs-worker-") - .build() - .expect("failed to start new runtime"); - - runtime.spawn(fut); - runtime.shutdown_on_idle().wait().unwrap(); + // /// Starts a Prometheus metrics server that will listen on the configured address. + // /// + // /// # Errors + // /// This will fail if another Prometheus server is already running in this + // /// process or on the configured port. + // #[allow(clippy::cognitive_complexity)] + // async fn serve_prometheus(&self) -> Result<(), ()> { + // if let Some(ref prometheus) = self.prometheus { + // // Set up the metrics collector + // let receiver = metrics_runtime::Builder::default() + // .histogram( + // Duration::from_millis(prometheus.histogram_window), + // Duration::from_millis(prometheus.histogram_granularity), + // ) + // .build() + // .expect("Failed to create metrics Receiver"); + // let controller = receiver.controller(); + // // Try installing the global recorder + // match metrics::set_boxed_recorder(Box::new(receiver)) { + // Ok(_) => { + // let observer = + // Arc::new(metrics_runtime::observers::PrometheusBuilder::default()); + + // let filter = warp::get().and(warp::path::end()).map(move || { + // let mut observer = observer.build(); + // controller.observe(&mut observer); + // let prometheus_response = observer.drain(); + // Response::builder() + // .status(StatusCode::OK) + // .header("Content-Type", "text/plain; version=0.0.4") + // .body(prometheus_response) + // }); + + // info!(target: "interledger-node", + // "Prometheus metrics server listening on: {}", + // prometheus.bind_address + // ); + + // Ok(warp::serve(filter).bind(prometheus.bind_address).await) + // } + // Err(e) => { + // error!(target: "interledger-node", "Error installing global metrics recorder (this is likely caused by trying to run two nodes with Prometheus metrics in the same process): {:?}", e); + // Err(()) + // } + // } + // } else { + // error!(target: "interledger-node", "No prometheus configuration provided"); + // Err(()) + // } + // } } diff --git a/crates/ilp-node/src/redis_store.rs b/crates/ilp-node/src/redis_store.rs index ef9a957c0..7377ab884 100644 --- a/crates/ilp-node/src/redis_store.rs +++ b/crates/ilp-node/src/redis_store.rs @@ -1,7 +1,7 @@ #![cfg(feature = "redis")] use crate::node::InterledgerNode; -use futures::{future::result, Future}; +use futures::TryFutureExt; pub use interledger::{ api::{AccountDetails, NodeStore}, packet::Address, @@ -10,8 +10,7 @@ pub use interledger::{ }; pub use redis_crate::{ConnectionInfo, IntoConnectionInfo}; use ring::hmac; -use tracing::{debug, error}; -use uuid::Uuid; +use tracing::error; static REDIS_SECRET_GENERATION_STRING: &str = "ilp_redis_secret"; @@ -22,18 +21,16 @@ pub fn default_redis_url() -> String { // This function could theoretically be defined as an inherent method on InterledgerNode itself. // However, we define it in this module in order to consolidate conditionally-compiled code // into as few discrete units as possible. -pub fn serve_redis_node( - node: InterledgerNode, - ilp_address: Address, -) -> impl Future { +pub async fn serve_redis_node(node: InterledgerNode, ilp_address: Address) -> Result<(), ()> { let redis_connection_info = node.database_url.clone().into_connection_info().unwrap(); let redis_addr = redis_connection_info.addr.clone(); let redis_secret = generate_redis_secret(&node.secret_seed); - Box::new(RedisStoreBuilder::new(redis_connection_info, redis_secret) - .node_ilp_address(ilp_address.clone()) - .connect() - .map_err(move |err| error!(target: "interledger-node", "Error connecting to Redis: {:?} {:?}", redis_addr, err)) - .and_then(move |store| node.chain_services(store, ilp_address))) + let store = RedisStoreBuilder::new(redis_connection_info, redis_secret) + .node_ilp_address(ilp_address.clone()) + .connect() + .map_err(move |err| error!(target: "interledger-node", "Error connecting to Redis: {:?} {:?}", redis_addr, err)) + .await?; + node.chain_services(store, ilp_address).await } pub fn generate_redis_secret(secret_seed: &[u8; 32]) -> [u8; 32] { @@ -45,28 +42,3 @@ pub fn generate_redis_secret(secret_seed: &[u8; 32]) -> [u8; 32] { redis_secret.copy_from_slice(sig.as_ref()); redis_secret } - -#[doc(hidden)] -#[allow(dead_code)] -#[deprecated(note = "use HTTP API instead")] -pub fn insert_account_with_redis_store( - node: &InterledgerNode, - account: AccountDetails, -) -> impl Future { - let redis_secret = generate_redis_secret(&node.secret_seed); - result(node.database_url.clone().into_connection_info()) - .map_err( - |err| error!(target: "interledger-node", "Invalid Redis connection details: {:?}", err), - ) - .and_then(move |redis_url| RedisStoreBuilder::new(redis_url, redis_secret).connect()) - .map_err(|err| error!(target: "interledger-node", "Error connecting to Redis: {:?}", err)) - .and_then(move |store| { - store - .insert_account(account) - .map_err(|_| error!(target: "interledger-node", "Unable to create account")) - .and_then(|account| { - debug!(target: "interledger-node", "Created account: {}", account.id()); - Ok(account.id()) - }) - }) -} diff --git a/crates/ilp-node/src/trace.rs b/crates/ilp-node/src/trace.rs index 911517926..6ba0e4985 100644 --- a/crates/ilp-node/src/trace.rs +++ b/crates/ilp-node/src/trace.rs @@ -2,7 +2,9 @@ use futures::Future; use interledger::{ ccp::{CcpRoutingAccount, RoutingRelation}, packet::{ErrorCode, Fulfill, Reject}, - service::{Account, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService}, + service::{ + Account, IlpResult, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService, + }, }; use std::str; use tracing::{debug_span, error_span, info, info_span}; @@ -12,10 +14,10 @@ use uuid::Uuid; /// Add tracing context for the incoming request. /// This adds minimal information for the ERROR log /// level and more information for the DEBUG level. -pub fn trace_incoming( +pub async fn trace_incoming( request: IncomingRequest, mut next: impl IncomingService, -) -> impl Future { +) -> IlpResult { let request_span = error_span!(target: "interledger-node", "incoming", request.id = %Uuid::new_v4(), @@ -37,19 +39,17 @@ pub fn trace_incoming( ); let _details_scope = details_span.enter(); - next.handle_request(request) - .then(trace_response) - .in_current_span() + next.handle_request(request).in_current_span().await } /// Add tracing context when the incoming request is /// being forwarded and turned into an outgoing request. /// This adds minimal information for the ERROR log /// level and more information for the DEBUG level. -pub fn trace_forwarding( +pub async fn trace_forwarding( request: OutgoingRequest, mut next: impl OutgoingService, -) -> impl Future { +) -> IlpResult { // Here we only include the outgoing details because this will be // inside the "incoming" span that includes the other details let request_span = error_span!(target: "interledger-node", @@ -66,16 +66,16 @@ pub fn trace_forwarding( ); let _details_scope = details_span.enter(); - next.send_request(request).in_current_span() + next.send_request(request).in_current_span().await } /// Add tracing context for the outgoing request (created by this node). /// This adds minimal information for the ERROR log /// level and more information for the DEBUG level. -pub fn trace_outgoing( +pub async fn trace_outgoing( request: OutgoingRequest, mut next: impl OutgoingService, -) -> impl Future { +) -> IlpResult { let request_span = error_span!(target: "interledger-node", "outgoing", request.id = %Uuid::new_v4(), @@ -100,16 +100,14 @@ pub fn trace_outgoing( // because there's a good chance they'll be offline let ignore_rejects = request.prepare.destination().scheme() == "peer" && request.to.routing_relation() == RoutingRelation::Child; - next.send_request(request) - .then(move |result| { - if let Err(ref err) = result { - if err.code() == ErrorCode::F02_UNREACHABLE && ignore_rejects { - return result; - } - } - trace_response(result) - }) - .in_current_span() + + let result = next.send_request(request).in_current_span().await; + if let Err(ref err) = result { + if err.code() == ErrorCode::F02_UNREACHABLE && ignore_rejects { + return result; + } + } + trace_response(result) } /// Log whether the response was a Fulfill or Reject diff --git a/crates/ilp-node/tests/redis/btp.rs b/crates/ilp-node/tests/redis/btp.rs index 2ccbaebe8..2e92ff33a 100644 --- a/crates/ilp-node/tests/redis/btp.rs +++ b/crates/ilp-node/tests/redis/btp.rs @@ -7,10 +7,10 @@ use tokio::runtime::Builder as RuntimeBuilder; use tracing::error_span; use tracing_futures::Instrument; -#[test] -fn two_nodes_btp() { +#[tokio::test] +async fn two_nodes_btp() { // Nodes 1 and 2 are peers, Node 2 is the parent of Node 2 - install_tracing_subscriber(); + // install_tracing_subscriber(); let context = TestContext::new(); // Each node will use its own DB within the redis instance @@ -24,11 +24,6 @@ fn two_nodes_btp() { let node_b_http = get_open_port(None); let node_b_settlement = get_open_port(None); - let mut runtime = RuntimeBuilder::new() - .panic_handler(|err| std::panic::resume_unwind(err)) - .build() - .unwrap(); - let alice_on_a = json!({ "username": "alice_on_a", "asset_code": "XYZ", @@ -87,117 +82,82 @@ fn two_nodes_btp() { })) .expect("Error creating node_b."); - // FIXME This should be fixed after SQL store is implemented. - // https://github.com/interledger-rs/interledger-rs/issues/464 - let alice_fut = create_account_on_node(node_a_http, alice_on_a, "admin") - .and_then(move |_| create_account_on_node(node_a_http, b_on_a, "admin")); + node_b.serve().await.unwrap(); // .instrument(error_span!(target: "interledger", "node_b")), + create_account_on_node(node_b_http, a_on_b, "admin") + .await + .unwrap(); + create_account_on_node(node_b_http, bob_on_b, "admin") + .await + .unwrap(); - runtime.spawn( - node_a - .serve() - .instrument(error_span!(target: "interledger", "node_a")), - ); + node_a.serve().await.unwrap(); // .instrument(error_span!(target: "interledger", "node_a")), + create_account_on_node(node_a_http, alice_on_a, "admin") + .await + .unwrap(); + create_account_on_node(node_a_http, b_on_a, "admin") + .await + .unwrap(); - let bob_fut = join_all(vec![ - create_account_on_node(node_b_http, a_on_b, "admin"), - create_account_on_node(node_b_http, bob_on_b, "admin"), - ]); + let get_balances = move || { + futures::future::join_all(vec![ + get_balance("alice_on_a", node_a_http, "admin"), + get_balance("bob_on_b", node_b_http, "admin"), + ]) + }; - runtime.spawn( - node_b - .serve() - .instrument(error_span!(target: "interledger", "node_b")), - ); + send_money_to_username( + node_a_http, + node_b_http, + 1000, + "bob_on_b", + "alice_on_a", + "default account holder", + ) + .await + .unwrap(); - runtime - .block_on( - // Wait for the nodes to spin up - delay(500) - .map_err(|_| panic!("Something strange happened when `delay`")) - .and_then(move |_| { - bob_fut - .and_then(|_| alice_fut) - .and_then(|_| delay(500).map_err(|_| panic!("delay error"))) - }) - .and_then(move |_| { - let send_1_to_2 = send_money_to_username( - node_a_http, - node_b_http, - 1000, - "bob_on_b", - "alice_on_a", - "default account holder", - ); - let send_2_to_1 = send_money_to_username( - node_b_http, - node_a_http, - 2000, - "alice_on_a", - "bob_on_b", - "default account holder", - ); + let ret = get_balances().await; + let ret: Vec<_> = ret.into_iter().map(|r| r.unwrap()).collect(); + assert_eq!( + ret[0], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: -1e-6 + } + ); + assert_eq!( + ret[1], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: 1e-6 + } + ); - let get_balances = move || { - futures::future::join_all(vec![ - get_balance("alice_on_a", node_a_http, "admin"), - get_balance("bob_on_b", node_b_http, "admin"), - ]) - }; + send_money_to_username( + node_b_http, + node_a_http, + 2000, + "alice_on_a", + "bob_on_b", + "default account holder", + ) + .await + .unwrap(); - send_1_to_2 - .map_err(|err| { - eprintln!("Error sending from node 1 to node 2: {:?}", err); - err - }) - .and_then(move |_| { - get_balances().and_then(move |ret| { - assert_eq!( - ret[0], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: -1e-6 - } - ); - assert_eq!( - ret[1], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: 1e-6 - } - ); - Ok(()) - }) - }) - .and_then(move |_| { - send_2_to_1.map_err(|err| { - eprintln!("Error sending from node 2 to node 1: {:?}", err); - err - }) - }) - .and_then(move |_| { - get_balances().and_then(move |ret| { - assert_eq!( - ret[0], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: 1e-6 - } - ); - assert_eq!( - ret[1], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: -1e-6 - } - ); - Ok(()) - }) - }) - }), - ) - .map_err(|err| { - eprintln!("Error executing tests: {:?}", err); - err - }) - .unwrap(); + let ret = get_balances().await; + let ret: Vec<_> = ret.into_iter().map(|r| r.unwrap()).collect(); + assert_eq!( + ret[0], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: 1e-6 + } + ); + assert_eq!( + ret[1], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: -1e-6 + } + ); } diff --git a/crates/ilp-node/tests/redis/exchange_rates.rs b/crates/ilp-node/tests/redis/exchange_rates.rs index 194ceff9a..744faedbc 100644 --- a/crates/ilp-node/tests/redis/exchange_rates.rs +++ b/crates/ilp-node/tests/redis/exchange_rates.rs @@ -2,25 +2,20 @@ use crate::redis_helpers::*; use crate::test_helpers::*; use futures::Future; use ilp_node::InterledgerNode; -use reqwest::r#async::Client; +use reqwest::Client; use secrecy::SecretString; use serde_json::{self, json, Value}; use std::env; +use std::time::Duration; use tokio::runtime::Builder as RuntimeBuilder; use tokio_retry::{strategy::FibonacciBackoff, Retry}; use tracing::error; use tracing_subscriber; -#[test] -fn coincap() { - install_tracing_subscriber(); +#[tokio::test] +async fn coincap() { let context = TestContext::new(); - let mut runtime = RuntimeBuilder::new() - .panic_handler(|err| std::panic::resume_unwind(err)) - .build() - .unwrap(); - let http_port = get_open_port(None); let node: InterledgerNode = serde_json::from_value(json!({ @@ -33,56 +28,38 @@ fn coincap() { "secret_seed": random_secret(), "route_broadcast_interval": 200, "exchange_rate": { - "poll_interval": 60000, + "poll_interval": 100, "provider": "coincap", }, })) .unwrap(); - runtime.spawn(node.serve()); + node.serve().await.unwrap(); - let get_rates = move || { - Client::new() - .get(&format!("http://localhost:{}/rates", http_port)) - .send() - .map_err(|_| panic!("Error getting rates")) - .and_then(|mut res| res.json().map_err(|_| panic!("Error getting body"))) - .and_then(|body: Value| { - if let Value::Object(obj) = body { - if obj.is_empty() { - error!("Rates are empty"); - return Err(()); - } - assert_eq!( - format!("{}", obj.get("USD").expect("Should have USD rate")).as_str(), - "1.0" - ); - assert!(obj.contains_key("EUR")); - assert!(obj.contains_key("JPY")); - assert!(obj.contains_key("BTC")); - assert!(obj.contains_key("ETH")); - assert!(obj.contains_key("XRP")); - } else { - panic!("Not an object"); - } + // Wait a few seconds so our node can poll the API + tokio::time::delay_for(Duration::from_millis(1000)).await; - Ok(()) - }) - }; - - runtime - .block_on( - delay(1000) - .map_err(|_| panic!("Something strange happened")) - .and_then(move |_| { - Retry::spawn(FibonacciBackoff::from_millis(1000).take(5), get_rates) - }), - ) + let ret = Client::new() + .get(&format!("http://localhost:{}/rates", http_port)) + .send() + .await .unwrap(); + let txt = ret.text().await.unwrap(); + let obj: Value = serde_json::from_str(&txt).unwrap(); + + assert_eq!( + format!("{}", obj.get("USD").expect("Should have USD rate")).as_str(), + "1.0" + ); + assert!(obj.get("EUR").is_some()); + assert!(obj.get("JPY").is_some()); + assert!(obj.get("BTC").is_some()); + assert!(obj.get("ETH").is_some()); + assert!(obj.get("XRP").is_some()); } // TODO can we disable this with conditional compilation? -#[test] -fn cryptocompare() { +#[tokio::test] +async fn cryptocompare() { tracing_subscriber::fmt::try_init().unwrap_or(()); let context = TestContext::new(); @@ -93,11 +70,6 @@ fn cryptocompare() { } let api_key = SecretString::new(api_key.unwrap()); - let mut runtime = RuntimeBuilder::new() - .panic_handler(|err| std::panic::resume_unwind(err)) - .build() - .unwrap(); - let http_port = get_open_port(Some(3011)); let node: InterledgerNode = serde_json::from_value(json!({ @@ -118,42 +90,21 @@ fn cryptocompare() { }, })) .unwrap(); - runtime.spawn(node.serve()); - - let get_rates = move || { - Client::new() - .get(&format!("http://localhost:{}/rates", http_port)) - .send() - .map_err(|_| panic!("Error getting rates")) - .and_then(|mut res| res.json().map_err(|_| panic!("Error getting body"))) - .and_then(|body: Value| { - if let Value::Object(obj) = body { - if obj.is_empty() { - error!("Rates are empty"); - return Err(()); - } - assert_eq!( - format!("{}", obj.get("USD").expect("Should have USD rate")).as_str(), - "1.0" - ); - assert!(obj.contains_key("BTC")); - assert!(obj.contains_key("ETH")); - assert!(obj.contains_key("XRP")); - } else { - panic!("Not an object"); - } + node.serve().await.unwrap(); - Ok(()) - }) - }; - - runtime - .block_on( - delay(1000) - .map_err(|_| panic!("Something strange happened")) - .and_then(move |_| { - Retry::spawn(FibonacciBackoff::from_millis(1000).take(5), get_rates) - }), - ) + let ret = Client::new() + .get(&format!("http://localhost:{}/rates", http_port)) + .send() + .await .unwrap(); + let txt = ret.text().await.unwrap(); + let obj: Value = serde_json::from_str(&txt).unwrap(); + + assert_eq!( + format!("{}", obj.get("USD").expect("Should have USD rate")).as_str(), + "1.0" + ); + assert!(obj.get("BTC").is_some()); + assert!(obj.get("ETH").is_some()); + assert!(obj.get("XRP").is_some()); } diff --git a/crates/ilp-node/tests/redis/redis_helpers.rs b/crates/ilp-node/tests/redis/redis_helpers.rs index 0c61f4475..b73011d25 100644 --- a/crates/ilp-node/tests/redis/redis_helpers.rs +++ b/crates/ilp-node/tests/redis/redis_helpers.rs @@ -1,7 +1,7 @@ // Copied from https://github.com/mitsuhiko/redis-rs/blob/9a1777e8a90c82c315a481cdf66beb7d69e681a2/tests/support/mod.rs #![allow(dead_code)] -use futures::Future; +use futures::{Future, TryFutureExt}; use redis_crate::{self as redis, ConnectionAddr, ConnectionInfo, RedisError}; use std::env; use std::fs; @@ -9,7 +9,6 @@ use std::path::PathBuf; use std::process; use std::thread::sleep; use std::time::{Duration, Instant}; -use tokio::timer::Delay; #[allow(unused)] pub fn connection_info_to_string(info: ConnectionInfo) -> String { @@ -40,8 +39,8 @@ pub fn get_open_port(try_port: Option) -> u16 { panic!("Cannot find open port!"); } -pub fn delay(ms: u64) -> impl Future { - Delay::new(Instant::now() + Duration::from_millis(ms)).map_err(|err| panic!(err)) +pub async fn delay(ms: u64) { + tokio::time::delay_for(Duration::from_millis(ms)).await; } #[derive(PartialEq)] @@ -190,20 +189,21 @@ impl TestContext { self.client.get_connection().unwrap() } - pub fn async_connection( - &self, - ) -> impl Future { - self.client.get_async_connection() + pub async fn async_connection(&self) -> Result { + self.client + .get_async_connection() + .map_err(|err| panic!(err)) + .await } pub fn stop_server(&mut self) { self.server.stop(); } - pub fn shared_async_connection( + pub async fn shared_async_connection( &self, - ) -> impl Future { - self.client.get_shared_async_connection() + ) -> Result { + self.client.get_multiplexed_tokio_connection().await } } diff --git a/crates/ilp-node/tests/redis/redis_tests.rs b/crates/ilp-node/tests/redis/redis_tests.rs index 0ec67ff68..e51f52256 100644 --- a/crates/ilp-node/tests/redis/redis_tests.rs +++ b/crates/ilp-node/tests/redis/redis_tests.rs @@ -1,6 +1,7 @@ +#![type_length_limit = "6000000"] mod btp; mod exchange_rates; -mod prometheus; +// mod prometheus; mod three_nodes; mod redis_helpers; diff --git a/crates/ilp-node/tests/redis/test_helpers.rs b/crates/ilp-node/tests/redis/test_helpers.rs index 8f2b0e00a..26849a25c 100644 --- a/crates/ilp-node/tests/redis/test_helpers.rs +++ b/crates/ilp-node/tests/redis/test_helpers.rs @@ -1,4 +1,4 @@ -use futures::{stream::Stream, Future}; +use futures::{stream::Stream, Future, TryFutureExt}; use hex; use interledger::stream::StreamDelivery; use interledger::{packet::Address, service::Account as AccountTrait, store::account::Account}; @@ -8,16 +8,16 @@ use serde_json::json; use std::collections::HashMap; use std::fmt::{Debug, Display}; use std::str; -use tracing_subscriber; +// use tracing_subscriber; use uuid::Uuid; -pub fn install_tracing_subscriber() { - tracing_subscriber::fmt::Subscriber::builder() - .with_timer(tracing_subscriber::fmt::time::ChronoUtc::rfc3339()) - .with_env_filter(tracing_subscriber::filter::EnvFilter::from_default_env()) - .try_init() - .unwrap_or(()); -} +// pub fn install_tracing_subscriber() { +// tracing_subscriber::fmt::Subscriber::builder() +// .with_timer(tracing_subscriber::fmt::time::ChronoUtc::rfc3339()) +// .with_env_filter(tracing_subscriber::filter::EnvFilter::from_default_env()) +// .try_init() +// .unwrap_or(()); +// } #[allow(unused)] pub fn random_secret() -> String { @@ -33,56 +33,58 @@ pub struct BalanceData { } #[allow(unused)] -pub fn create_account_on_node( +pub async fn create_account_on_node( api_port: u16, data: T, auth: &str, -) -> impl Future { - let client = reqwest::r#async::Client::new(); - client +) -> Result { + let client = reqwest::Client::new(); + let res = client .post(&format!("http://localhost:{}/accounts", api_port)) .header("Content-Type", "application/json") .header("Authorization", format!("Bearer {}", auth)) .json(&data) .send() - .and_then(move |res| res.error_for_status()) - .and_then(move |res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error creating account on node: {:?}", err); - }) - .and_then(move |chunk| Ok(str::from_utf8(&chunk).unwrap().to_string())) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + + Ok(res.json::().map_err(|_| ()).await.unwrap()) } #[allow(unused)] -pub fn create_account_on_engine( +pub async fn create_account_on_engine( engine_port: u16, account_id: T, -) -> impl Future { - let client = reqwest::r#async::Client::new(); - client +) -> Result { + let client = reqwest::Client::new(); + let res = client .post(&format!("http://localhost:{}/accounts", engine_port)) .header("Content-Type", "application/json") .json(&json!({ "id": account_id })) .send() - .and_then(move |res| res.error_for_status()) - .and_then(move |res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error creating account: {:?}", err); - }) - .and_then(move |chunk| Ok(str::from_utf8(&chunk).unwrap().to_string())) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + + let data: bytes05::Bytes = res.bytes().map_err(|_| ()).await?; + + Ok(str::from_utf8(&data).unwrap().to_string()) } #[allow(unused)] -pub fn send_money_to_username( +pub async fn send_money_to_username( from_port: u16, to_port: u16, amount: u64, to_username: T, from_username: &str, from_auth: &str, -) -> impl Future { - let client = reqwest::r#async::Client::new(); - client +) -> Result { + let client = reqwest::Client::new(); + let res = client .post(&format!( "http://localhost:{}/accounts/{}/payments", from_port, from_username @@ -93,36 +95,27 @@ pub fn send_money_to_username( "source_amount": amount, })) .send() - .and_then(|res| res.error_for_status()) - .and_then(|res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error sending SPSP payment: {:?}", err); - }) - .and_then(move |body| { - let ret: StreamDelivery = serde_json::from_slice(&body).unwrap(); - Ok(ret) - }) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + Ok(res.json::().await.unwrap()) } #[allow(unused)] -pub fn get_all_accounts( - node_port: u16, - admin_token: &str, -) -> impl Future, Error = ()> { - let client = reqwest::r#async::Client::new(); - client +pub async fn get_all_accounts(node_port: u16, admin_token: &str) -> Result, ()> { + let client = reqwest::Client::new(); + let res = client .get(&format!("http://localhost:{}/accounts", node_port)) .header("Authorization", format!("Bearer {}", admin_token)) .send() - .and_then(|res| res.error_for_status()) - .and_then(|res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error getting account data: {:?}", err); - }) - .and_then(move |body| { - let ret: Vec = serde_json::from_slice(&body).unwrap(); - Ok(ret) - }) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + let body: bytes05::Bytes = res.bytes().map_err(|_| ()).await?; + let ret: Vec = serde_json::from_slice(&body).unwrap(); + Ok(ret) } #[allow(unused)] @@ -135,26 +128,24 @@ pub fn accounts_to_ids(accounts: Vec) -> HashMap { } #[allow(unused)] -pub fn get_balance( +pub async fn get_balance( account_id: T, node_port: u16, admin_token: &str, -) -> impl Future { - let client = reqwest::r#async::Client::new(); - client +) -> Result { + let client = reqwest::Client::new(); + let res = client .get(&format!( "http://localhost:{}/accounts/{}/balance", node_port, account_id )) .header("Authorization", format!("Bearer {}", admin_token)) .send() - .and_then(|res| res.error_for_status()) - .and_then(|res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error getting account data: {:?}", err); - }) - .and_then(|body| { - let ret: BalanceData = serde_json::from_slice(&body).unwrap(); - Ok(ret) - }) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + let body: bytes05::Bytes = res.bytes().map_err(|_| ()).await?; + let ret: BalanceData = serde_json::from_slice(&body).unwrap(); + Ok(ret) } diff --git a/crates/ilp-node/tests/redis/three_nodes.rs b/crates/ilp-node/tests/redis/three_nodes.rs index caf90cb8d..d7798e744 100644 --- a/crates/ilp-node/tests/redis/three_nodes.rs +++ b/crates/ilp-node/tests/redis/three_nodes.rs @@ -1,6 +1,6 @@ use crate::redis_helpers::*; use crate::test_helpers::*; -use futures::{future::join_all, stream::*, sync::mpsc, Future}; +use futures::{future::join_all, stream::*, Future}; use ilp_node::InterledgerNode; use interledger::packet::Address; use interledger::stream::StreamDelivery; @@ -8,14 +8,14 @@ use serde_json::json; use std::str::FromStr; use tokio::runtime::Builder as RuntimeBuilder; use tracing::{debug, error_span}; -use tracing_futures::Instrument; +// use tracing_futures::Instrument; -const LOG_TARGET: &str = "interledger-tests-three-nodes"; +// const LOG_TARGET: &str = "interledger-tests-three-nodes"; -#[test] -fn three_nodes() { +#[tokio::test] +async fn three_nodes() { // Nodes 1 and 2 are peers, Node 2 is the parent of Node 3 - install_tracing_subscriber(); + // install_tracing_subscriber(); let context = TestContext::new(); // Each node will use its own DB within the redis instance @@ -32,12 +32,6 @@ fn three_nodes() { let node2_settlement = get_open_port(None); let node3_http = get_open_port(None); let node3_settlement = get_open_port(None); - - let mut runtime = RuntimeBuilder::new() - .panic_handler(|err| std::panic::resume_unwind(err)) - .build() - .unwrap(); - let alice_on_alice = json!({ "ilp_address": "example.alice", "username": "alice_on_a", @@ -138,213 +132,154 @@ fn three_nodes() { })) .expect("Error creating node3."); - let (finish_sender, finish_receiver) = mpsc::channel(0); + node1.serve().await.unwrap(); // .instrument(error_span!(target: "interledger", "node1")).await.unwrap(); + create_account_on_node(node1_http, alice_on_alice, "admin") + .await + .unwrap(); + create_account_on_node(node1_http, bob_on_alice, "admin") + .await + .unwrap(); - let alice_fut = join_all(vec![ - create_account_on_node(node1_http, alice_on_alice, "admin"), - create_account_on_node(node1_http, bob_on_alice, "admin"), - ]); + node2.serve().await.unwrap(); // .instrument(error_span!(target: "interledger", "node2")).await.unwrap(); + create_account_on_node(node2_http, alice_on_bob, "admin") + .await + .unwrap(); + create_account_on_node(node2_http, charlie_on_bob, "admin") + .await + .unwrap(); + // Also set exchange rates + let client = reqwest::Client::new(); + client + .put(&format!("http://localhost:{}/rates", node2_http)) + .header("Authorization", "Bearer admin") + .json(&json!({"ABC": 1, "XYZ": 2})) + .send() + .await + .unwrap(); - let mut node1_finish_sender = finish_sender.clone(); - runtime.spawn( - node1 - .serve() - .and_then(move |_| alice_fut) - .and_then(move |_| { - node1_finish_sender - .try_send(1) - .expect("Could not send message from node_1"); - Ok(()) - }) - .instrument(error_span!(target: "interledger", "node1")), - ); + node3.serve().await.unwrap(); // .instrument(error_span!(target: "interledger", "node3")).await.unwrap(); + create_account_on_node(node3_http, charlie_on_charlie, "admin") + .await + .unwrap(); + create_account_on_node(node3_http, bob_on_charlie, "admin") + .await + .unwrap(); - let bob_fut = join_all(vec![ - create_account_on_node(node2_http, alice_on_bob, "admin"), - create_account_on_node(node2_http, charlie_on_bob, "admin"), - ]); + delay(1000).await; - let mut node2_finish_sender = finish_sender; - runtime.spawn( - node2 - .serve() - .and_then(move |_| bob_fut) - .and_then(move |_| { - let client = reqwest::r#async::Client::new(); - client - .put(&format!("http://localhost:{}/rates", node2_http)) - .header("Authorization", "Bearer admin") - .json(&json!({"ABC": 1, "XYZ": 2})) - .send() - .map_err(|err| panic!(err)) - .and_then(move |res| { - res.error_for_status() - .expect("Error setting exchange rates"); - node2_finish_sender - .try_send(2) - .expect("Could not send message from node_2"); - Ok(()) - }) - }) - .instrument(error_span!(target: "interledger", "node2")), - ); + let get_balances = move || { + futures::future::join_all(vec![ + get_balance("alice_on_a", node1_http, "admin"), + get_balance("charlie_on_b", node2_http, "admin"), + get_balance("charlie_on_c", node3_http, "admin"), + ]) + }; - // We execute the futures one after the other to avoid race conditions where - // Bob gets added before the node's main account - let charlie_fut = create_account_on_node(node3_http, charlie_on_charlie, "admin") - .and_then(move |_| create_account_on_node(node3_http, bob_on_charlie, "admin")); + // Node 1 sends 1000 to Node 3. However, Node1's scale is 9, + // while Node 3's scale is 6. This means that Node 3 will + // see 1000x less. In addition, the conversion rate is 2:1 + // for 3's asset, so he will receive 2 total. + let receipt = send_money_to_username( + node1_http, + node3_http, + 1000, + "charlie_on_c", + "alice_on_a", + "default account holder", + ) + .await + .unwrap(); - runtime - .block_on( - node3 - .serve() - .and_then(move |_| finish_receiver.collect()) - .and_then(move |messages| { - debug!( - target: LOG_TARGET, - "Received finish messages: {:?}", messages - ); - charlie_fut - }) - .instrument(error_span!(target: "interledger", "node3")) - // we wait some time after the node is up so that we get the - // necessary routes from bob - .and_then(move |_| { - delay(1000).map_err(|_| panic!("Something strange happened when `delay`")) - }) - .and_then(move |_| { - let send_1_to_3 = send_money_to_username( - node1_http, - node3_http, - 1000, - "charlie_on_c", - "alice_on_a", - "default account holder", - ); - let send_3_to_1 = send_money_to_username( - node3_http, - node1_http, - 1000, - "alice_on_a", - "charlie_on_c", - "default account holder", - ); + assert_eq!( + receipt.from, + Address::from_str("example.alice").unwrap(), + "Payment receipt incorrect (1)" + ); + assert!(receipt + .to + .to_string() + .starts_with("example.bob.charlie_on_b.charlie_on_c.")); + assert_eq!(receipt.sent_asset_code, "XYZ"); + assert_eq!(receipt.sent_asset_scale, 9); + assert_eq!(receipt.sent_amount, 1000); + assert_eq!(receipt.delivered_asset_code.unwrap(), "ABC"); + assert_eq!(receipt.delivered_amount, 2); + assert_eq!(receipt.delivered_asset_scale.unwrap(), 6); + let ret = get_balances().await; + let ret: Vec<_> = ret.into_iter().map(|r| r.unwrap()).collect(); + // -1000 divided by asset scale 9 + assert_eq!( + ret[0], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: -1e-6 + } + ); + // 2 divided by asset scale 6 + assert_eq!( + ret[1], + BalanceData { + asset_code: "ABC".to_owned(), + balance: 2e-6 + } + ); + // 2 divided by asset scale 6 + assert_eq!( + ret[2], + BalanceData { + asset_code: "ABC".to_owned(), + balance: 2e-6 + } + ); - let get_balances = move || { - futures::future::join_all(vec![ - get_balance("alice_on_a", node1_http, "admin"), - get_balance("charlie_on_b", node2_http, "admin"), - get_balance("charlie_on_c", node3_http, "admin"), - ]) - }; + // Charlie sends to Alice + let receipt = send_money_to_username( + node3_http, + node1_http, + 1000, + "alice_on_a", + "charlie_on_c", + "default account holder", + ) + .await + .unwrap(); - // Node 1 sends 1000 to Node 3. However, Node1's scale is 9, - // while Node 3's scale is 6. This means that Node 3 will - // see 1000x less. In addition, the conversion rate is 2:1 - // for 3's asset, so he will receive 2 total. - send_1_to_3 - .map_err(|err| { - eprintln!("Error sending from node 1 to node 3: {:?}", err); - err - }) - .and_then(move |receipt: StreamDelivery| { - debug!(target: LOG_TARGET, "send_1_to_3 receipt: {:?}", receipt); - assert_eq!( - receipt.from, - Address::from_str("example.alice").unwrap(), - "Payment receipt incorrect (1)" - ); - assert!(receipt - .to - .to_string() - .starts_with("example.bob.charlie_on_b.charlie_on_c.")); - assert_eq!(receipt.sent_asset_code, "XYZ"); - assert_eq!(receipt.sent_asset_scale, 9); - assert_eq!(receipt.sent_amount, 1000); - assert_eq!(receipt.delivered_asset_code.unwrap(), "ABC"); - assert_eq!(receipt.delivered_amount, 2); - assert_eq!(receipt.delivered_asset_scale.unwrap(), 6); - get_balances().and_then(move |ret| { - // -1000 divided by asset scale 9 - assert_eq!( - ret[0], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: -1e-6 - } - ); - // 2 divided by asset scale 6 - assert_eq!( - ret[1], - BalanceData { - asset_code: "ABC".to_owned(), - balance: 2e-6 - } - ); - // 2 divided by asset scale 6 - assert_eq!( - ret[2], - BalanceData { - asset_code: "ABC".to_owned(), - balance: 2e-6 - } - ); - Ok(()) - }) - }) - .and_then(move |_| { - send_3_to_1.map_err(|err| { - eprintln!("Error sending from node 3 to node 1: {:?}", err); - err - }) - }) - .and_then(move |receipt| { - debug!(target: LOG_TARGET, "send_3_to_1 receipt: {:?}", receipt); - assert_eq!( - receipt.from, - Address::from_str("example.bob.charlie_on_b.charlie_on_c").unwrap(), - "Payment receipt incorrect (2)" - ); - assert!(receipt.to.to_string().starts_with("example.alice")); - assert_eq!(receipt.sent_asset_code, "ABC"); - assert_eq!(receipt.sent_asset_scale, 6); - assert_eq!(receipt.sent_amount, 1000); - assert_eq!(receipt.delivered_asset_code.unwrap(), "XYZ"); - assert_eq!(receipt.delivered_amount, 500_000); - assert_eq!(receipt.delivered_asset_scale.unwrap(), 9); - get_balances().and_then(move |ret| { - // 499,000 divided by asset scale 9 - assert_eq!( - ret[0], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: 499e-6 - } - ); - // -998 divided by asset scale 6 - assert_eq!( - ret[1], - BalanceData { - asset_code: "ABC".to_owned(), - balance: -998e-6 - } - ); - // -998 divided by asset scale 6 - assert_eq!( - ret[2], - BalanceData { - asset_code: "ABC".to_owned(), - balance: -998e-6 - } - ); - Ok(()) - }) - }) - }), - ) - .map_err(|err| { - eprintln!("Error executing tests: {:?}", err); - err - }) - .unwrap(); + assert_eq!( + receipt.from, + Address::from_str("example.bob.charlie_on_b.charlie_on_c").unwrap(), + "Payment receipt incorrect (2)" + ); + assert!(receipt.to.to_string().starts_with("example.alice")); + assert_eq!(receipt.sent_asset_code, "ABC"); + assert_eq!(receipt.sent_asset_scale, 6); + assert_eq!(receipt.sent_amount, 1000); + assert_eq!(receipt.delivered_asset_code.unwrap(), "XYZ"); + assert_eq!(receipt.delivered_amount, 500_000); + assert_eq!(receipt.delivered_asset_scale.unwrap(), 9); + let ret = get_balances().await; + let ret: Vec<_> = ret.into_iter().map(|r| r.unwrap()).collect(); + // 499,000 divided by asset scale 9 + assert_eq!( + ret[0], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: 499e-6 + } + ); + // -998 divided by asset scale 6 + assert_eq!( + ret[1], + BalanceData { + asset_code: "ABC".to_owned(), + balance: -998e-6 + } + ); + // -998 divided by asset scale 6 + assert_eq!( + ret[2], + BalanceData { + asset_code: "ABC".to_owned(), + balance: -998e-6 + } + ); } diff --git a/crates/interledger-api/Cargo.toml b/crates/interledger-api/Cargo.toml index 6d32c8850..bce85afa2 100644 --- a/crates/interledger-api/Cargo.toml +++ b/crates/interledger-api/Cargo.toml @@ -9,9 +9,10 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] bytes = { version = "0.4.12", default-features = false } -futures = { version = "0.1.29", default-features = false } -futures-retry = { version = "0.3.3", default-features = false } -http = { version = "0.1.18", default-features = false } +bytes05 = { package = "bytes", version = "0.5", default-features = false } +futures = { version = "0.3.1", default-features = false } +futures-retry = { version = "0.4", default-features = false } +http = { version = "0.2", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-http = { path = "../interledger-http", version = "^0.4.0", default-features = false } interledger-ildcp = { path = "../interledger-ildcp", version = "^0.4.0", default-features = false } @@ -26,13 +27,19 @@ interledger-btp = { path = "../interledger-btp", version = "^0.4.0", default-fea log = { version = "0.4.8", default-features = false } serde = { version = "1.0.101", default-features = false, features = ["derive"] } serde_json = { version = "1.0.41", default-features = false } -serde_path_to_error = { version = "0.1", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +serde_path_to_error = { version = "0.1.2", default-features = false } +reqwest = { version = "0.10", default-features = false, features = ["default-tls", "json"] } url = { version = "2.1.0", default-features = false, features = ["serde"] } uuid = { version = "0.8.1", default-features = false} -warp = { version = "0.1.20", default-features = false } +# warp = { version = "0.1.20", default-features = false } +warp = { git = "https://github.com/seanmonstar/warp.git" } secrecy = { version = "0.5.2", default-features = false, features = ["serde"] } lazy_static = "1.4.0" +async-trait = "0.1.22" +mime = "0.3.16" + +[dev-dependencies] +tokio = { version = "0.2.9", features = ["rt-core", "macros"] } [badges] circle-ci = { repository = "interledger-rs/interledger-rs" } diff --git a/crates/interledger-api/src/http_retry.rs b/crates/interledger-api/src/http_retry.rs index 8fb35f03f..f74b071d9 100644 --- a/crates/interledger-api/src/http_retry.rs +++ b/crates/interledger-api/src/http_retry.rs @@ -1,9 +1,9 @@ // Adapted from the futures-retry example: https://gitlab.com/mexus/futures-retry/blob/master/examples/tcp-client-complex.rs -use futures::future::Future; +use futures::TryFutureExt; use futures_retry::{ErrorHandler, FutureRetry, RetryPolicy}; use http::StatusCode; use log::trace; -use reqwest::r#async::Client as HttpClient; +use reqwest::Client as HttpClient; use serde_json::json; use std::{default::Default, fmt::Display, time::Duration}; use url::Url; @@ -28,11 +28,11 @@ impl Client { } } - pub fn create_engine_account( + pub async fn create_engine_account( &self, engine_url: Url, id: T, - ) -> impl Future { + ) -> Result { let mut se_url = engine_url.clone(); se_url .path_segments_mut() @@ -46,26 +46,27 @@ impl Client { // The actual HTTP request which gets made to the engine let client = self.client.clone(); - let create_settlement_engine_account = move || { - client - .post(se_url.as_ref()) - .json(&json!({"id" : id.to_string()})) - .send() - .and_then(move |response| { - // If the account is not found on the peer's connector, the - // retry logic will not get triggered. When the counterparty - // tries to add the account, they will complete the handshake. - Ok(response.status()) - }) - }; - FutureRetry::new( - create_settlement_engine_account, + // If the account is not found on the peer's connector, the + // retry logic will not get triggered. When the counterparty + // tries to add the account, they will complete the handshake. + + let msg = format!("[Engine: {}, Account: {}]", engine_url, id); + let res = FutureRetry::new( + move || { + client + .post(se_url.as_ref()) + .json(&json!({"id" : id.to_string()})) + .send() + .map_ok(move |response| response.status()) + }, IoHandler::new( self.max_retries, - format!("[Engine: {}, Account: {}]", engine_url, id), + msg, ), ) + .await?; + Ok(res) } } @@ -111,12 +112,22 @@ where self.max_attempts ); - if e.is_client_error() { - // do not retry 4xx - RetryPolicy::ForwardError(e) - } else if e.is_timeout() || e.is_server_error() { - // Retry timeouts and 5xx every 5 seconds + // TODO: Should we make this policy more sophisticated? + + // Retry timeouts every 5s + if e.is_timeout() { RetryPolicy::WaitRetry(Duration::from_secs(5)) + } else if let Some(status) = e.status() { + if status.is_client_error() { + // do not retry 4xx + RetryPolicy::ForwardError(e) + } else if status.is_server_error() { + // Retry 5xx every 5 seconds + RetryPolicy::WaitRetry(Duration::from_secs(5)) + } else { + // Otherwise just retry every second + RetryPolicy::WaitRetry(Duration::from_secs(1)) + } } else { // Retry other errors slightly more frequently since they may be // related to the engine not having started yet diff --git a/crates/interledger-api/src/lib.rs b/crates/interledger-api/src/lib.rs index 7e85a47af..9fa71efef 100644 --- a/crates/interledger-api/src/lib.rs +++ b/crates/interledger-api/src/lib.rs @@ -1,5 +1,7 @@ +use async_trait::async_trait; use bytes::Bytes; -use futures::Future; +use interledger_btp::{BtpAccount, BtpOutgoingService}; +use interledger_ccp::CcpRoutingAccount; use interledger_http::{HttpAccount, HttpStore}; use interledger_packet::Address; use interledger_router::RouterStore; @@ -7,17 +9,15 @@ use interledger_service::{Account, AddressStore, IncomingService, OutgoingServic use interledger_service_util::{BalanceStore, ExchangeRateStore}; use interledger_settlement::core::types::{SettlementAccount, SettlementStore}; use interledger_stream::StreamNotificationsStore; +use secrecy::SecretString; use serde::{de, Deserialize, Serialize}; use std::{boxed::*, collections::HashMap, fmt::Display, net::SocketAddr, str::FromStr}; +use url::Url; use uuid::Uuid; use warp::{self, Filter}; -mod routes; -use interledger_btp::{BtpAccount, BtpOutgoingService}; -use interledger_ccp::CcpRoutingAccount; -use secrecy::SecretString; -use url::Url; pub(crate) mod http_retry; +mod routes; // This enum and the following functions are used to allow clients to send either // numbers or strings and have them be properly deserialized into the appropriate @@ -71,52 +71,39 @@ where // One argument against doing that is that the NodeStore allows admin-only // modifications to the values, whereas many of the other traits mostly // read from the configured values. +#[async_trait] pub trait NodeStore: AddressStore + Clone + Send + Sync + 'static { type Account: Account; - fn insert_account( - &self, - account: AccountDetails, - ) -> Box + Send>; + async fn insert_account(&self, account: AccountDetails) -> Result; - fn delete_account(&self, id: Uuid) -> Box + Send>; + async fn delete_account(&self, id: Uuid) -> Result; - fn update_account( - &self, - id: Uuid, - account: AccountDetails, - ) -> Box + Send>; + async fn update_account(&self, id: Uuid, account: AccountDetails) -> Result; - fn modify_account_settings( + async fn modify_account_settings( &self, id: Uuid, settings: AccountSettings, - ) -> Box + Send>; + ) -> Result; // TODO limit the number of results and page through them - fn get_all_accounts(&self) -> Box, Error = ()> + Send>; + async fn get_all_accounts(&self) -> Result, ()>; - fn set_static_routes(&self, routes: R) -> Box + Send> + async fn set_static_routes(&self, routes: R) -> Result<(), ()> where - R: IntoIterator; + R: IntoIterator + Send + 'async_trait; - fn set_static_route( - &self, - prefix: String, - account_id: Uuid, - ) -> Box + Send>; + async fn set_static_route(&self, prefix: String, account_id: Uuid) -> Result<(), ()>; - fn set_default_route(&self, account_id: Uuid) -> Box + Send>; + async fn set_default_route(&self, account_id: Uuid) -> Result<(), ()>; - fn set_settlement_engines( + async fn set_settlement_engines( &self, - asset_to_url_map: impl IntoIterator, - ) -> Box + Send>; + asset_to_url_map: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()>; - fn get_asset_settlement_engine( - &self, - asset_code: &str, - ) -> Box, Error = ()> + Send>; + async fn get_asset_settlement_engine(&self, asset_code: &str) -> Result, ()>; } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -281,8 +268,8 @@ where .boxed() } - pub fn bind(self, addr: SocketAddr) -> impl Future { - warp::serve(self.into_warp_filter()).bind(addr) + pub async fn bind(self, addr: SocketAddr) { + warp::serve(self.into_warp_filter()).bind(addr).await } } diff --git a/crates/interledger-api/src/routes/accounts.rs b/crates/interledger-api/src/routes/accounts.rs index bca63e768..06575acbc 100644 --- a/crates/interledger-api/src/routes/accounts.rs +++ b/crates/interledger-api/src/routes/accounts.rs @@ -1,12 +1,9 @@ use crate::{http_retry::Client, number_or_string, AccountDetails, AccountSettings, NodeStore}; use bytes::Bytes; -use futures::{ - future::{err, join_all, ok, Either}, - Future, Stream, -}; +use futures::{future::join_all, TryFutureExt}; use interledger_btp::{connect_to_service_account, BtpAccount, BtpOutgoingService}; use interledger_ccp::{CcpRoutingAccount, Mode, RouteControlRequest, RoutingRelation}; -use interledger_http::{deserialize_json, error::*, HttpAccount, HttpStore}; +use interledger_http::{error::*, HttpAccount, HttpStore}; use interledger_ildcp::IldcpRequest; use interledger_ildcp::IldcpResponse; use interledger_router::RouterStore; @@ -16,14 +13,14 @@ use interledger_service::{ use interledger_service_util::{BalanceStore, ExchangeRateStore}; use interledger_settlement::core::types::SettlementAccount; use interledger_spsp::{pay, SpspResponder}; -use interledger_stream::{PaymentNotification, StreamNotificationsStore}; +use interledger_stream::StreamNotificationsStore; use log::{debug, error, trace}; use secrecy::{ExposeSecret, SecretString}; use serde::{Deserialize, Serialize}; use serde_json::json; use std::convert::TryFrom; use uuid::Uuid; -use warp::{self, Filter, Rejection}; +use warp::{self, reply::Json, Filter, Rejection}; pub const BEARER_TOKEN_START: usize = 7; @@ -64,402 +61,405 @@ where + 'static, { // TODO can we make any of the Filters const or put them in lazy_static? + let with_store = warp::any().map(move || store.clone()).boxed(); + let with_incoming_handler = warp::any().map(move || incoming_handler.clone()).boxed(); // Helper filters let admin_auth_header = format!("Bearer {}", admin_api_token); + let admin_auth_header_clone = admin_auth_header.clone(); + let with_admin_auth_header = warp::any().map(move || admin_auth_header.clone()).boxed(); let admin_only = warp::header::("authorization") - .and_then( - move |authorization: SecretString| -> Result<(), Rejection> { + .and_then(move |authorization: SecretString| { + let admin_auth_header = admin_auth_header_clone.clone(); + async move { if authorization.expose_secret() == &admin_auth_header { - Ok(()) + Ok::<(), Rejection>(()) } else { - Err(ApiError::unauthorized().into()) + Err(Rejection::from(ApiError::unauthorized())) } - }, - ) + } + }) // This call makes it so we do not pass on a () value on // success to the next filter, it just gets rid of it .untuple_one() .boxed(); - let with_store = warp::any().map(move || store.clone()).boxed(); - let admin_auth_header = format!("Bearer {}", admin_api_token); - let with_admin_auth_header = warp::any().map(move || admin_auth_header.clone()).boxed(); - let with_incoming_handler = warp::any().map(move || incoming_handler.clone()).boxed(); - // Note that the following path filters should be applied before others - // (such as method and authentication) to avoid triggering unexpected errors for requests - // that do not match this path. - let accounts = warp::path("accounts"); - let accounts_index = accounts.and(warp::path::end()); - // This is required when using `admin_or_authorized_user_only` or `authorized_user_only` filter. - // Sets Username from path into ext for context. - let account_username = accounts - .and(warp::path::param2::()) - .and_then(|username: Username| -> Result<_, Rejection> { - warp::filters::ext::set(username); - Ok(()) - }) - .untuple_one() - .boxed(); - let account_username_to_id = accounts - .and(warp::path::param2::()) + + // Converts an account username to an account id or errors out + let account_username_to_id = warp::path::param::() .and(with_store.clone()) - .and_then(|username: Username, store: S| { - store - .get_account_id_from_username(&username) - .map_err::<_, Rejection>(move |_| { - // TODO differentiate between server error and not found - error!("Error getting account id from username: {}", username); - ApiError::account_not_found().into() - }) + .and_then(move |username: Username, store: S| { + async move { + store + .get_account_id_from_username(&username) + .map_err(|_| { + // TODO differentiate between server error and not found + error!("Error getting account id from username: {}", username); + Rejection::from(ApiError::account_not_found()) + }) + .await + } }) .boxed(); - // Receives parameters which were prepared by `account_username` and - // considers the request is eligible to be processed or not, checking the auth. - // Why we separate `account_username` and this filter is that - // we want to check whether the sender is eligible to access this path but at the same time, - // we don't want to spawn any `Rejection`s at `account_username`. - // At the point of `account_username`, there might be some other - // remaining path filters. So we have to process those first, not to spawn errors of - // unauthorized that the the request actually should not cause. - // This function needs parameters which can be prepared by `account_username`. - let admin_or_authorized_user_only = warp::filters::ext::get::() + let is_authorized_user = move |store: S, path_username: Username, auth_string: SecretString| { + async move { + if auth_string.expose_secret().len() < BEARER_TOKEN_START { + return Err(Rejection::from(ApiError::bad_request())); + } + + // Try getting the account from the store + let authorized_account = store + .get_account_from_http_auth( + &path_username, + &auth_string.expose_secret()[BEARER_TOKEN_START..], + ) + .map_err(|_| Rejection::from(ApiError::unauthorized())) + .await?; + + // Only return the account if the provided username matched the fetched one + // This maybe is redundant? + if &path_username == authorized_account.username() { + Ok(authorized_account) + } else { + Err(ApiError::unauthorized().into()) + } + } + }; + + // Checks if the account is an admin or if they have provided a valid password + let admin_or_authorized_user_only = warp::path::param::() .and(warp::header::("authorization")) .and(with_store.clone()) - .and(with_admin_auth_header.clone()) + .and(with_admin_auth_header) .and_then( - |path_username: Username, - auth_string: SecretString, - store: S, - admin_auth_header: String| { - if auth_string.expose_secret().len() < BEARER_TOKEN_START { - return Either::A(err(ApiError::bad_request().into())); + move |path_username: Username, + auth_string: SecretString, + store: S, + admin_auth_header: String| { + async move { + // If it's an admin, there's no need for more checks + if auth_string.expose_secret() == &admin_auth_header { + let account_id = store + .get_account_id_from_username(&path_username) + .map_err(|_| { + // TODO differentiate between server error and not found + error!("Error getting account id from username: {}", path_username); + Rejection::from(ApiError::account_not_found()) + }) + .await?; + return Ok(account_id); + } + let account = is_authorized_user(store, path_username, auth_string).await?; + Ok::(account.id()) } - Either::B(store.get_account_id_from_username(&path_username).then( - move |account_id: Result| { - if account_id.is_err() { - return Either::A(err::( - ApiError::account_not_found().into(), - )); - } - let account_id = account_id.unwrap(); - if auth_string.expose_secret() == &admin_auth_header { - return Either::A(ok(account_id)); - } - Either::B( - store - .get_account_from_http_auth( - &path_username, - &auth_string.expose_secret()[BEARER_TOKEN_START..], - ) - .then(move |authorized_account: Result| { - if authorized_account.is_err() { - return err(ApiError::unauthorized().into()); - } - let authorized_account = authorized_account.unwrap(); - if &path_username == authorized_account.username() { - ok(authorized_account.id()) - } else { - err(ApiError::unauthorized().into()) - } - }), - ) - }, - )) }, ) .boxed(); - // The same structure as `admin_or_authorized_user_only`. - // This function needs parameters which can be prepared by `account_username`. - let authorized_user_only = warp::filters::ext::get::() + // Checks if the account has provided a valid password (same as admin-or-auth call, minus one call, can we refactor them together?) + let authorized_user_only = warp::path::param::() .and(warp::header::("authorization")) .and(with_store.clone()) .and_then( - |path_username: Username, auth_string: SecretString, store: S| { - if auth_string.expose_secret().len() < BEARER_TOKEN_START { - return Either::A(err(ApiError::bad_request().into())); + move |path_username: Username, auth_string: SecretString, store: S| { + async move { + let account = is_authorized_user(store, path_username, auth_string).await?; + Ok::(account) } - Either::B( - store - .get_account_from_http_auth( - &path_username, - &auth_string.expose_secret()[BEARER_TOKEN_START..], - ) - .then(move |authorized_account: Result| { - if authorized_account.is_err() { - return err::(ApiError::unauthorized().into()); - } - let authorized_account = authorized_account.unwrap(); - if &path_username == authorized_account.username() { - ok(authorized_account) - } else { - err(ApiError::unauthorized().into()) - } - }), - ) }, ) .boxed(); - // POST /accounts + // // POST /accounts let btp_clone = btp.clone(); let outgoing_handler_clone = outgoing_handler.clone(); - let post_accounts = warp::post2() - .and(accounts_index) + let post_accounts = warp::post() + .and(warp::path("accounts")) + .and(warp::path::end()) .and(admin_only.clone()) - .and(deserialize_json()) + .and(deserialize_json()) // Why does warp::body::json not work? .and(with_store.clone()) .and_then(move |account_details: AccountDetails, store: S| { let store_clone = store.clone(); let handler = outgoing_handler_clone.clone(); let btp = btp_clone.clone(); - store - .insert_account(account_details.clone()) - .map_err(move |_| { - error!("Error inserting account into store: {:?}", account_details); - // TODO need more information - ApiError::internal_server_error().into() - }) - .and_then(move |account| { - connect_to_external_services(handler, account, store_clone, btp) - }) - .and_then(|account: A| Ok(warp::reply::json(&account))) + async move { + let account = store + .insert_account(account_details.clone()) + .map_err(move |_| { + error!("Error inserting account into store: {:?}", account_details); + // TODO need more information + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + + connect_to_external_services(handler, account.clone(), store_clone, btp).await?; + Ok::(warp::reply::json(&account)) + } }) .boxed(); // GET /accounts - let get_accounts = warp::get2() - .and(accounts_index) + let get_accounts = warp::get() + .and(warp::path("accounts")) + .and(warp::path::end()) .and(admin_only.clone()) .and(with_store.clone()) .and_then(|store: S| { - store - .get_all_accounts() - .map_err::<_, Rejection>(|_| ApiError::internal_server_error().into()) - .and_then(|accounts| Ok(warp::reply::json(&accounts))) + async move { + let accounts = store + .get_all_accounts() + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + Ok::(warp::reply::json(&accounts)) + } }) .boxed(); // PUT /accounts/:username - let put_account = warp::put2() + let put_account = warp::put() + .and(warp::path("accounts")) .and(account_username_to_id.clone()) .and(warp::path::end()) .and(admin_only.clone()) - .and(deserialize_json()) + .and(deserialize_json()) // warp::body::json() is not able to decode this! .and(with_store.clone()) .and_then(move |id: Uuid, account_details: AccountDetails, store: S| { - let store_clone = store.clone(); - let handler = outgoing_handler.clone(); + let outgoing_handler = outgoing_handler.clone(); let btp = btp.clone(); - store - .update_account(id, account_details) - .map_err::<_, Rejection>(move |_| ApiError::internal_server_error().into()) - .and_then(move |account| { - connect_to_external_services(handler, account, store_clone, btp) - }) - .and_then(|account: A| Ok(warp::reply::json(&account))) + async move { + let account = store + .update_account(id, account_details) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + connect_to_external_services(outgoing_handler, account.clone(), store, btp).await?; + + Ok::(warp::reply::json(&account)) + } }) .boxed(); // GET /accounts/:username - let get_account = warp::get2() - .and(account_username.clone()) - .and(warp::path::end()) + let get_account = warp::get() + .and(warp::path("accounts")) + // takes the username and the authorization header and checks if it's authorized, returns the uid .and(admin_or_authorized_user_only.clone()) + .and(warp::path::end()) .and(with_store.clone()) .and_then(|id: Uuid, store: S| { - store - .get_accounts(vec![id]) - .map_err::<_, Rejection>(|_| ApiError::account_not_found().into()) - .and_then(|accounts| Ok(warp::reply::json(&accounts[0]))) + async move { + let accounts = store + .get_accounts(vec![id]) + .map_err(|_| Rejection::from(ApiError::account_not_found())) + .await?; + + Ok::(warp::reply::json(&accounts[0])) + } }) .boxed(); // GET /accounts/:username/balance - let get_account_balance = warp::get2() - .and(account_username.clone()) + let get_account_balance = warp::get() + .and(warp::path("accounts")) + // takes the username and the authorization header and checks if it's authorized, returns the uid + .and(admin_or_authorized_user_only.clone()) .and(warp::path("balance")) .and(warp::path::end()) - .and(admin_or_authorized_user_only.clone()) .and(with_store.clone()) .and_then(|id: Uuid, store: S| { - // TODO reduce the number of store calls it takes to get the balance - store - .get_accounts(vec![id]) - .map_err(|_| warp::reject::not_found()) - .and_then(move |mut accounts| { - let account = accounts.pop().unwrap(); - let acc_clone = account.clone(); - let asset_scale = acc_clone.asset_scale(); - let asset_code = acc_clone.asset_code().to_owned(); - store - .get_balance(account) - .map_err(move |_| { - error!("Error getting balance for account: {}", id); - ApiError::internal_server_error().into() - }) - .and_then(move |balance: i64| { - Ok(warp::reply::json(&json!({ - // normalize to the base unit - "balance": balance as f64 / 10_u64.pow(asset_scale.into()) as f64, - "asset_code": asset_code, - }))) - }) - }) + async move { + // TODO reduce the number of store calls it takes to get the balance + let mut accounts = store + .get_accounts(vec![id]) + .map_err(|_| warp::reject::not_found()) + .await?; + let account = accounts.pop().unwrap(); + + let balance = store + .get_balance(account.clone()) + .map_err(move |_| { + error!("Error getting balance for account: {}", id); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + + let asset_scale = account.asset_scale(); + let asset_code = account.asset_code().to_owned(); + Ok::(warp::reply::json(&json!({ + // normalize to the base unit + "balance": balance as f64 / 10_u64.pow(asset_scale.into()) as f64, + "asset_code": asset_code, + }))) + } }) .boxed(); // DELETE /accounts/:username - let delete_account = warp::delete2() + let delete_account = warp::delete() + .and(warp::path("accounts")) .and(account_username_to_id.clone()) .and(warp::path::end()) - .and(admin_only.clone()) + .and(admin_only) .and(with_store.clone()) .and_then(|id: Uuid, store: S| { - store - .delete_account(id) - .map_err::<_, Rejection>(move |_| { - error!("Error deleting account {}", id); - ApiError::internal_server_error().into() - }) - .and_then(|account| Ok(warp::reply::json(&account))) + async move { + let account = store + .delete_account(id) + .map_err(|_| { + error!("Error deleting account {}", id); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + Ok::(warp::reply::json(&account)) + } }) .boxed(); // PUT /accounts/:username/settings - let put_account_settings = warp::put2() - .and(account_username.clone()) + let put_account_settings = warp::put() + .and(warp::path("accounts")) + .and(admin_or_authorized_user_only) .and(warp::path("settings")) .and(warp::path::end()) - .and(admin_or_authorized_user_only.clone()) .and(deserialize_json()) .and(with_store.clone()) .and_then(|id: Uuid, settings: AccountSettings, store: S| { - store - .modify_account_settings(id, settings) - .map_err::<_, Rejection>(move |_| { - error!("Error updating account settings {}", id); - ApiError::internal_server_error().into() - }) - .and_then(|settings| Ok(warp::reply::json(&settings))) + async move { + let modified_account = store + .modify_account_settings(id, settings) + .map_err(move |_| { + error!("Error updating account settings {}", id); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + Ok::(warp::reply::json(&modified_account)) + } }) .boxed(); + // TODO: Re-enable. // (Websocket) /accounts/:username/payments/incoming - let incoming_payment_notifications = account_username - .clone() - .and(warp::path("payments")) - .and(warp::path("incoming")) - .and(warp::path::end()) - .and(admin_or_authorized_user_only.clone()) - .and(warp::ws2()) - .and(with_store.clone()) - .map(|id: Uuid, ws: warp::ws::Ws2, store: S| { - ws.on_upgrade(move |ws: warp::ws::WebSocket| { - let (tx, rx) = futures::sync::mpsc::unbounded::(); - store.add_payment_notification_subscription(id, tx); - rx.map_err(|_| -> warp::Error { unreachable!("unbounded rx never errors") }) - .map(|notification| { - warp::ws::Message::text(serde_json::to_string(¬ification).unwrap()) - }) - .forward(ws) - .map(|_| ()) - .map_err(|err| error!("Error forwarding notifications to websocket: {:?}", err)) - }) - }) - .boxed(); + // let incoming_payment_notifications = account_username + // .clone() + // .and(warp::path("payments")) + // .and(warp::path("incoming")) + // .and(warp::path::end()) + // .and(admin_or_authorized_user_only.clone()) + // .and(warp::ws2()) + // .and(with_store.clone()) + // .map(|id: Uuid, ws: warp::ws::Ws2, store: S| { + // ws.on_upgrade(move |ws: warp::ws::WebSocket| { + // let (tx, rx) = futures::sync::mpsc::unbounded::(); + // store.add_payment_notification_subscription(id, tx); + // rx.map_err(|_| -> warp::Error { unreachable!("unbounded rx never errors") }) + // .map(|notification| { + // warp::ws::Message::text(serde_json::to_string(¬ification).unwrap()) + // }) + // .forward(ws) + // .map(|_| ()) + // .map_err(|err| error!("Error forwarding notifications to websocket: {:?}", err)) + // }) + // }) + // .boxed(); // POST /accounts/:username/payments - let post_payments = warp::post2() - .and(account_username.clone()) + let post_payments = warp::post() + .and(warp::path("accounts")) + .and(authorized_user_only) .and(warp::path("payments")) .and(warp::path::end()) - .and(authorized_user_only.clone()) .and(deserialize_json()) - .and(with_incoming_handler.clone()) + .and(with_incoming_handler) .and_then( move |account: A, pay_request: SpspPayRequest, incoming_handler: I| { - pay( - incoming_handler, - account.clone(), - &pay_request.receiver, - pay_request.source_amount, - ) - .and_then(move |receipt| { + async move { + let receipt = pay( + incoming_handler, + account.clone(), + &pay_request.receiver, + pay_request.source_amount, + ) + .map_err(|err| { + error!("Error sending SPSP payment: {:?}", err); + // TODO give a different error message depending on what type of error it is + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + debug!("Sent SPSP payment, receipt: {:?}", receipt); - Ok(warp::reply::json(&json!(receipt))) - }) - .map_err::<_, Rejection>(|err| { - error!("Error sending SPSP payment: {:?}", err); - // TODO give a different error message depending on what type of error it is - ApiError::internal_server_error().into() - }) + Ok::(warp::reply::json(&json!(receipt))) + } }, ) .boxed(); // GET /accounts/:username/spsp let server_secret_clone = server_secret.clone(); - let get_spsp = warp::get2() - .and(account_username_to_id.clone()) + let get_spsp = warp::get() + .and(warp::path("accounts")) + .and(account_username_to_id) .and(warp::path("spsp")) .and(warp::path::end()) .and(with_store.clone()) .and_then(move |id: Uuid, store: S| { let server_secret_clone = server_secret_clone.clone(); - store - .get_accounts(vec![id]) - .map_err::<_, Rejection>(|_| ApiError::internal_server_error().into()) - .and_then(move |accounts| { - // TODO return the response without instantiating an SpspResponder (use a simple fn) - Ok(SpspResponder::new( + async move { + let accounts = store + .get_accounts(vec![id]) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + // TODO return the response without instantiating an SpspResponder (use a simple fn) + Ok::<_, Rejection>( + SpspResponder::new( accounts[0].ilp_address().clone(), server_secret_clone.clone(), ) - .generate_http_response()) - }) + .generate_http_response(), + ) + } }) .boxed(); // GET /.well-known/pay // This is the endpoint a [Payment Pointer](https://github.com/interledger/rfcs/blob/master/0026-payment-pointers/0026-payment-pointers.md) // with no path resolves to - let server_secret_clone = server_secret.clone(); - let get_spsp_well_known = warp::get2() + let get_spsp_well_known = warp::get() .and(warp::path(".well-known")) .and(warp::path("pay")) .and(warp::path::end()) - .and(with_store.clone()) + .and(with_store) .and_then(move |store: S| { - // TODO don't clone this - if let Some(username) = default_spsp_account.clone() { - let server_secret_clone = server_secret_clone.clone(); - Either::A( - store + let default_spsp_account = default_spsp_account.clone(); + let server_secret_clone = server_secret.clone(); + async move { + // TODO don't clone this + if let Some(username) = default_spsp_account.clone() { + let id = store .get_account_id_from_username(&username) - .map_err(move |_| { + .map_err(|_| { error!("Account not found: {}", username); warp::reject::not_found() }) - .and_then(move |id| { - // TODO this shouldn't take multiple store calls - store - .get_accounts(vec![id]) - .map_err(|_| ApiError::internal_server_error().into()) - .map(|mut accounts| accounts.pop().unwrap()) - }) - .and_then(move |account| { - // TODO return the response without instantiating an SpspResponder (use a simple fn) - Ok(SpspResponder::new( - account.ilp_address().clone(), - server_secret_clone.clone(), - ) - .generate_http_response()) - }), - ) - } else { - Either::B(err(ApiError::not_found().into())) + .await?; + + // TODO this shouldn't take multiple store calls + let mut accounts = store + .get_accounts(vec![id]) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + + let account = accounts.pop().unwrap(); + // TODO return the response without instantiating an SpspResponder (use a simple fn) + Ok::<_, Rejection>( + SpspResponder::new( + account.ilp_address().clone(), + server_secret_clone.clone(), + ) + .generate_http_response(), + ) + } else { + Err(Rejection::from(ApiError::not_found())) + } } }) .boxed(); @@ -469,20 +469,20 @@ where .or(post_accounts) .or(get_accounts) .or(put_account) + .or(delete_account) .or(get_account) .or(get_account_balance) - .or(delete_account) .or(put_account_settings) - .or(incoming_payment_notifications) + // .or(incoming_payment_notifications) // Commented out until tungenstite ws support is added .or(post_payments) .boxed() } -fn get_address_from_parent_and_update_routes( +async fn get_address_from_parent_and_update_routes( mut service: O, parent: A, store: S, -) -> impl Future +) -> Result<(), ()> where O: OutgoingService + Clone + Send + Sync + 'static, A: CcpRoutingAccount + Clone + Send + Sync + 'static, @@ -494,7 +494,7 @@ where parent.id() ); let prepare = IldcpRequest {}.to_prepare(); - service + let fulfill = service .send_request(OutgoingRequest { from: parent.clone(), // Does not matter what we put here, they will get the account from the HTTP/BTP credentials to: parent.clone(), @@ -502,54 +502,53 @@ where original_amount: 0, }) .map_err(|err| error!("Error getting ILDCP info: {:?}", err)) - .and_then(|fulfill| { - let response = IldcpResponse::try_from(fulfill.into_data().freeze()).map_err(|err| { - error!( - "Unable to parse ILDCP response from fulfill packet: {:?}", - err - ); - }); - debug!("Got ILDCP response from parent: {:?}", response); - let ilp_address = match response { - Ok(info) => info.ilp_address(), - Err(_) => return err(()), - }; - ok(ilp_address) - }) - .and_then(move |ilp_address| { - debug!("ILP address is now: {}", ilp_address); - // TODO we may want to make this trigger the CcpRouteManager to request - let prepare = RouteControlRequest { - mode: Mode::Sync, - last_known_epoch: 0, - last_known_routing_table_id: [0; 16], - features: Vec::new(), - } - .to_prepare(); - debug!("Asking for routes from {:?}", parent.clone()); - join_all(vec![ - // Set the parent to be the default route for everything - // that starts with their global prefix - store.set_default_route(parent.id()), - // Update our store's address - store.set_ilp_address(ilp_address), - // Get the parent's routes for us - Box::new( - service - .send_request(OutgoingRequest { - from: parent.clone(), - to: parent.clone(), - original_amount: prepare.amount(), - prepare: prepare.clone(), - }) - .and_then(move |_| Ok(())) - .map_err(move |err| { - error!("Got error when trying to update routes {:?}", err) - }), - ), - ]) - }) - .and_then(move |_| Ok(())) + .await?; + + let info = IldcpResponse::try_from(fulfill.into_data().freeze()).map_err(|err| { + error!( + "Unable to parse ILDCP response from fulfill packet: {:?}", + err + ); + })?; + debug!("Got ILDCP response from parent: {:?}", info); + let ilp_address = info.ilp_address(); + + debug!("ILP address is now: {}", ilp_address); + // TODO we may want to make this trigger the CcpRouteManager to request + let prepare = RouteControlRequest { + mode: Mode::Sync, + last_known_epoch: 0, + last_known_routing_table_id: [0; 16], + features: Vec::new(), + } + .to_prepare(); + + debug!("Asking for routes from {:?}", parent.clone()); + let ret = join_all(vec![ + // Set the parent to be the default route for everything + // that starts with their global prefix + store.set_default_route(parent.id()), + // Update our store's address + store.set_ilp_address(ilp_address), + // Get the parent's routes for us + Box::pin( + service + .send_request(OutgoingRequest { + from: parent.clone(), + to: parent.clone(), + original_amount: prepare.amount(), + prepare: prepare.clone(), + }) + .map_err(|_| ()) + .map_ok(|_| ()), + ), + ]) + .await; + // If any of the 3 futures errored, propagate the error outside + if ret.into_iter().any(|r| r.is_err()) { + return Err(()); + } + Ok(()) } // Helper function which gets called whenever a new account is added or @@ -562,12 +561,12 @@ where // 2b. Perform a RouteControl Request to make them send us any new routes // 3. If they have a settlement engine endpoitn configured: Make a POST to the // engine's account creation endpoint with the account's id -fn connect_to_external_services( +async fn connect_to_external_services( service: O, account: A, store: S, btp: BtpOutgoingService, -) -> impl Future +) -> Result where O: OutgoingService + Clone + Send + Sync + 'static, A: CcpRoutingAccount + BtpAccount + SettlementAccount + Clone + Send + Sync + 'static, @@ -576,158 +575,178 @@ where { // Try to connect to the account's BTP socket if they have // one configured - let btp_connect_fut = if account.get_ilp_over_btp_url().is_some() { + if account.get_ilp_over_btp_url().is_some() { trace!("Newly inserted account has a BTP URL configured, will try to connect"); - Either::A( - connect_to_service_account(account.clone(), true, btp) - .map_err(|_| ApiError::internal_server_error().into()), - ) - } else { - Either::B(ok(())) - }; + connect_to_service_account(account.clone(), true, btp) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await? + } + + // If we added a parent, get the address assigned to us by + // them and update all of our routes + if account.routing_relation() == RoutingRelation::Parent { + get_address_from_parent_and_update_routes(service, account.clone(), store.clone()) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + } + + // Register the account with the settlement engine + // if a settlement_engine_url was configured on the account + // or if there is a settlement engine configured for that + // account's asset_code + let default_settlement_engine = store + .get_asset_settlement_engine(account.asset_code()) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + + let settlement_engine_url = account + .settlement_engine_details() + .map(|details| details.url) + .or(default_settlement_engine); + if let Some(se_url) = settlement_engine_url { + let id = account.id(); + let http_client = Client::default(); + trace!( + "Sending account {} creation request to settlement engine: {:?}", + id, + se_url.clone() + ); + + let status_code = http_client + .create_engine_account(se_url, id) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; - btp_connect_fut.and_then(move |_| { - // If we added a parent, get the address assigned to us by - // them and update all of our routes - let get_ilp_address_fut = if account.routing_relation() == RoutingRelation::Parent { - Either::A( - get_address_from_parent_and_update_routes(service, account.clone(), store.clone()) - .map_err(|_| ApiError::internal_server_error().into()) - ) + if status_code.is_success() { + trace!("Account {} created on the SE", id); } else { - Either::B(ok(())) - }; - - let default_settlement_engine_fut = store.get_asset_settlement_engine(account.asset_code()) - .map_err(|_| ApiError::internal_server_error().into()); - - // Register the account with the settlement engine - // if a settlement_engine_url was configured on the account - // or if there is a settlement engine configured for that - // account's asset_code - default_settlement_engine_fut.join(get_ilp_address_fut).and_then(move |(default_settlement_engine, _)| { - let settlement_engine_url = account.settlement_engine_details().map(|details| details.url).or(default_settlement_engine); - if let Some(se_url) = settlement_engine_url { - let id = account.id(); - let http_client = Client::default(); - trace!( - "Sending account {} creation request to settlement engine: {:?}", - id, - se_url.clone() - ); - Either::A( - http_client.create_engine_account(se_url, id) - .map_err(|_| ApiError::internal_server_error().into()) - .and_then(move |status_code| { - if status_code.is_success() { - trace!("Account {} created on the SE", id); - } else { - error!("Error creating account. Settlement engine responded with HTTP code: {}", status_code); - } - Ok(()) + error!( + "Error creating account. Settlement engine responded with HTTP code: {}", + status_code + ); + } + } + + Ok(account) +} + +// TODO: Do we really need this custom deserialization function? +// You'd expect that Serde would be able to handle this. +use interledger_http::error; +use mime::Mime; +use serde::de::DeserializeOwned; +pub fn deserialize_json( +) -> impl Filter + Copy { + warp::header::("content-type") + .and(warp::body::bytes()) + .and_then(|content_type: String, buf: bytes05::Bytes| { + async move { + let mime_type: Mime = content_type.parse().map_err(|_| { + Rejection::from( + error::ApiError::bad_request().detail("Invalid content-type header."), + ) + })?; + if mime_type.type_() != mime::APPLICATION_JSON.type_() { + return Err(Rejection::from( + error::ApiError::bad_request().detail("Invalid content-type."), + )); + } else if let Some(charset) = mime_type.get_param("charset") { + // Charset should be UTF-8 + // https://tools.ietf.org/html/rfc8259#section-8.1 + if charset != mime::UTF_8 { + return Err(Rejection::from( + error::ApiError::bad_request().detail("Charset should be UTF-8."), + )); + } + } + + let deserializer = &mut serde_json::Deserializer::from_slice(&buf); + serde_path_to_error::deserialize(deserializer).map_err(|err| { + warp::reject::custom(JsonDeserializeError { + category: err.inner().classify(), + detail: err.inner().to_string(), + path: err.path().clone(), }) - .and_then(move |_| { - Ok(account) - })) - } else { - Either::B(ok(account)) + }) } }) - }) } #[cfg(test)] mod tests { use crate::routes::test_helpers::*; + // TODO: Add test for GET /accounts/:username/spsp and /.well_known - #[test] - fn only_admin_can_create_account() { + #[tokio::test] + async fn only_admin_can_create_account() { let api = test_accounts_api(); - let resp = api_call(&api, "POST", "/accounts", "admin", DETAILS.clone()); + let resp = api_call(&api, "POST", "/accounts", "admin", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "POST", "/accounts", "wrong", DETAILS.clone()); + let resp = api_call(&api, "POST", "/accounts", "wrong", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_delete_account() { + #[tokio::test] + async fn only_admin_can_delete_account() { let api = test_accounts_api(); - let resp = api_call(&api, "DELETE", "/accounts/alice", "admin", DETAILS.clone()); + let resp = api_call(&api, "DELETE", "/accounts/alice", "admin", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "DELETE", "/accounts/alice", "wrong", DETAILS.clone()); + let resp = api_call(&api, "DELETE", "/accounts/alice", "wrong", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_modify_whole_account() { + #[tokio::test] + async fn only_admin_can_modify_whole_account() { let api = test_accounts_api(); - let resp = api_call(&api, "PUT", "/accounts/alice", "admin", DETAILS.clone()); + let resp = api_call(&api, "PUT", "/accounts/alice", "admin", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "PUT", "/accounts/alice", "wrong", DETAILS.clone()); + let resp = api_call(&api, "PUT", "/accounts/alice", "wrong", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_get_all_accounts() { + #[tokio::test] + async fn only_admin_can_get_all_accounts() { let api = test_accounts_api(); - let resp = api_call(&api, "GET", "/accounts", "admin", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts", "admin", None).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "GET", "/accounts", "wrong", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts", "wrong", None).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_or_user_can_get_account() { + #[tokio::test] + async fn only_admin_or_user_can_get_account() { let api = test_accounts_api(); - let resp = api_call(&api, "GET", "/accounts/alice", "admin", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts/alice", "admin", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); // TODO: Make this not require the username in the token - let resp = api_call(&api, "GET", "/accounts/alice", "password", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts/alice", "password", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "GET", "/accounts/alice", "wrong", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts/alice", "wrong", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_or_user_can_get_accounts_balance() { + #[tokio::test] + async fn only_admin_or_user_can_get_accounts_balance() { let api = test_accounts_api(); - let resp = api_call( - &api, - "GET", - "/accounts/alice/balance", - "admin", - DETAILS.clone(), - ); + let resp = api_call(&api, "GET", "/accounts/alice/balance", "admin", None).await; assert_eq!(resp.status().as_u16(), 200); // TODO: Make this not require the username in the token - let resp = api_call( - &api, - "GET", - "/accounts/alice/balance", - "password", - DETAILS.clone(), - ); + let resp = api_call(&api, "GET", "/accounts/alice/balance", "password", None).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call( - &api, - "GET", - "/accounts/alice/balance", - "wrong", - DETAILS.clone(), - ); + let resp = api_call(&api, "GET", "/accounts/alice/balance", "wrong", None).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_or_user_can_modify_accounts_settings() { + #[tokio::test] + async fn only_admin_or_user_can_modify_accounts_settings() { let api = test_accounts_api(); let resp = api_call( &api, @@ -735,7 +754,8 @@ mod tests { "/accounts/alice/settings", "admin", DETAILS.clone(), - ); + ) + .await; assert_eq!(resp.status().as_u16(), 200); // TODO: Make this not require the username in the token @@ -745,7 +765,8 @@ mod tests { "/accounts/alice/settings", "password", DETAILS.clone(), - ); + ) + .await; assert_eq!(resp.status().as_u16(), 200); let resp = api_call( @@ -754,7 +775,50 @@ mod tests { "/accounts/alice/settings", "wrong", DETAILS.clone(), - ); + ) + .await; + assert_eq!(resp.status().as_u16(), 401); + } + + #[tokio::test] + async fn only_admin_or_user_can_send_payment() { + let payment: Option = Some(serde_json::json!({ + "receiver": "some_receiver", + "source_amount" : 10, + })); + let api = test_accounts_api(); + let resp = api_call( + &api, + "POST", + "/accounts/alice/payments", + "password", + payment.clone(), + ) + .await; + // This should return an internal server error since we're making an invalid payment request + // We could have set up a mockito mock to set that pay is called correctly but we merely want + // to check that authorization and paths work as expected + assert_eq!(resp.status().as_u16(), 500); + + // Note that the operator has indirect access to the user's token since they control the store + let resp = api_call( + &api, + "POST", + "/accounts/alice/payments", + "admin", + payment.clone(), + ) + .await; + assert_eq!(resp.status().as_u16(), 401); + + let resp = api_call( + &api, + "POST", + "/accounts/alice/payments", + "wrong", + payment.clone(), + ) + .await; assert_eq!(resp.status().as_u16(), 401); } } diff --git a/crates/interledger-api/src/routes/node_settings.rs b/crates/interledger-api/src/routes/node_settings.rs index 40f12819b..5c6ab6159 100644 --- a/crates/interledger-api/src/routes/node_settings.rs +++ b/crates/interledger-api/src/routes/node_settings.rs @@ -1,10 +1,6 @@ use crate::{http_retry::Client, ExchangeRates, NodeStore}; -use bytes::Buf; -use futures::{ - future::{err, join_all, Either}, - Future, -}; -use interledger_http::{deserialize_json, error::*, HttpAccount, HttpStore}; +use futures::TryFutureExt; +use interledger_http::{error::*, HttpAccount, HttpStore}; use interledger_packet::Address; use interledger_router::RouterStore; use interledger_service::{Account, Username}; @@ -19,7 +15,8 @@ use std::{ str::{self, FromStr}, }; use url::Url; -use warp::{self, Filter, Rejection}; +use uuid::Uuid; +use warp::{self, reply::Json, Filter, Rejection}; // TODO add more to this response #[derive(Clone, Serialize)] @@ -41,20 +38,21 @@ where + BalanceStore + ExchangeRateStore + RouterStore, - A: Account + HttpAccount + SettlementAccount + Serialize + 'static, + A: Account + HttpAccount + Send + Sync + SettlementAccount + Serialize + 'static, { // Helper filters let admin_auth_header = format!("Bearer {}", admin_api_token); let admin_only = warp::header::("authorization") - .and_then( - move |authorization: SecretString| -> Result<(), Rejection> { + .and_then(move |authorization: SecretString| { + let admin_auth_header = admin_auth_header.clone(); + async move { if authorization.expose_secret() == &admin_auth_header { - Ok(()) + Ok::<(), Rejection>(()) } else { - Err(ApiError::unauthorized().into()) + Err(Rejection::from(ApiError::unauthorized())) } - }, - ) + } + }) // This call makes it so we do not pass on a () value on // success to the next filter, it just gets rid of it .untuple_one() @@ -62,7 +60,7 @@ where let with_store = warp::any().map(move || store.clone()).boxed(); // GET / - let get_root = warp::get2() + let get_root = warp::get() .and(warp::path::end()) .and(with_store.clone()) .map(move |store: S| { @@ -75,197 +73,200 @@ where .boxed(); // PUT /rates - let put_rates = warp::put2() + let put_rates = warp::put() .and(warp::path("rates")) .and(warp::path::end()) .and(admin_only.clone()) - .and(deserialize_json()) + .and(warp::body::json()) .and(with_store.clone()) - .and_then(|rates: ExchangeRates, store: S| -> Result<_, Rejection> { - if store.set_exchange_rates(rates.0.clone()).is_ok() { - Ok(warp::reply::json(&rates)) - } else { - error!("Error setting exchange rates"); - Err(ApiError::internal_server_error().into()) + .and_then(|rates: ExchangeRates, store: S| { + async move { + if store.set_exchange_rates(rates.0.clone()).is_ok() { + Ok(warp::reply::json(&rates)) + } else { + error!("Error setting exchange rates"); + Err(Rejection::from(ApiError::internal_server_error())) + } } }) .boxed(); // GET /rates - let get_rates = warp::get2() + let get_rates = warp::get() .and(warp::path("rates")) .and(warp::path::end()) .and(with_store.clone()) - .and_then(|store: S| -> Result<_, Rejection> { - if let Ok(rates) = store.get_all_exchange_rates() { - Ok(warp::reply::json(&rates)) - } else { - error!("Error getting exchange rates"); - Err(ApiError::internal_server_error().into()) + .and_then(|store: S| { + async move { + if let Ok(rates) = store.get_all_exchange_rates() { + Ok::(warp::reply::json(&rates)) + } else { + error!("Error getting exchange rates"); + Err(Rejection::from(ApiError::internal_server_error())) + } } }) .boxed(); - // GET /routes + // // GET /routes // Response: Map of ILP Address prefix -> Username - let get_routes = warp::get2() + let get_routes = warp::get() .and(warp::path("routes")) .and(warp::path::end()) .and(with_store.clone()) .and_then(|store: S| { - // Convert the account IDs listed in the routing table - // to the usernames for the API response - let routes = store.routing_table().clone(); - store - .get_accounts(routes.values().cloned().collect()) - .map_err::<_, Rejection>(|_| { - error!("Error getting accounts from store"); - ApiError::internal_server_error().into() - }) - .and_then(move |accounts| { - let routes: HashMap = HashMap::from_iter( - routes - .iter() - .map(|(prefix, _)| prefix.to_string()) - .zip(accounts.into_iter().map(|a| a.username().to_string())), - ); + async move { + // Convert the account IDs listed in the routing table + // to the usernames for the API response + let routes = store.routing_table().clone(); + let accounts = store + .get_accounts(routes.values().cloned().collect()) + .map_err(|_| { + error!("Error getting accounts from store"); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + let routes: HashMap = HashMap::from_iter( + routes + .iter() + .map(|(prefix, _)| prefix.to_string()) + .zip(accounts.into_iter().map(|a| a.username().to_string())), + ); - Ok(warp::reply::json(&routes)) - }) + Ok::(warp::reply::json(&routes)) + } }) .boxed(); // PUT /routes/static // Body: Map of ILP Address prefix -> Username - let put_static_routes = warp::put2() + let put_static_routes = warp::put() .and(warp::path("routes")) .and(warp::path("static")) .and(warp::path::end()) .and(admin_only.clone()) - .and(deserialize_json()) + .and(warp::body::json()) .and(with_store.clone()) - .and_then(|routes: HashMap, store: S| { - // Convert the usernames to account IDs to set the routes in the store - let store_clone = store.clone(); - let usernames: Vec = routes.values().cloned().collect(); - // TODO use one store call to look up all of the usernames - join_all(usernames.into_iter().map(move |username| { - store_clone - .get_account_id_from_username(&username) - .map_err(move |_| { - error!("No account exists with username: {}", username); - ApiError::account_not_found().into() - }) - })) - .and_then(move |account_ids| { + .and_then(move |routes: HashMap, store: S| { + async move { + // Convert the usernames to account IDs to set the routes in the store + let mut usernames: Vec = Vec::new(); + for username in routes.values() { + let user = match Username::from_str(&username) { + Ok(u) => u, + Err(_) => return Err(Rejection::from(ApiError::bad_request())), + }; + usernames.push(user); + } + + let mut account_ids: Vec = Vec::new(); + for username in usernames { + account_ids.push( + store + .get_account_id_from_username(&username) + .map_err(|_| { + error!("Error setting static routes"); + Rejection::from(ApiError::internal_server_error()) + }) + .await?, + ); + } + let prefixes = routes.keys().map(|s| s.to_string()); store .set_static_routes(prefixes.zip(account_ids.into_iter())) - .map_err::<_, Rejection>(|_| { + .map_err(|_| { error!("Error setting static routes"); - ApiError::internal_server_error().into() + Rejection::from(ApiError::internal_server_error()) }) - .map(move |_| warp::reply::json(&routes)) - }) + .await?; + Ok::(warp::reply::json(&routes)) + } }) .boxed(); // PUT /routes/static/:prefix // Body: Username - let put_static_route = warp::put2() + let put_static_route = warp::put() .and(warp::path("routes")) .and(warp::path("static")) - .and(warp::path::param2::()) + .and(warp::path::param::()) .and(warp::path::end()) .and(admin_only.clone()) - .and(warp::body::concat()) + .and(warp::body::bytes()) .and(with_store.clone()) - .and_then(|prefix: String, body: warp::body::FullBody, store: S| { - if let Ok(username) = str::from_utf8(body.bytes()) - .map_err(|_| ()) - .and_then(|string| Username::from_str(string).map_err(|_| ())) - { + .and_then(|prefix: String, body: bytes05::Bytes, store: S| { + async move { + let username_str = + str::from_utf8(&body).map_err(|_| Rejection::from(ApiError::bad_request()))?; + let username = Username::from_str(username_str) + .map_err(|_| Rejection::from(ApiError::bad_request()))?; // Convert the username to an account ID to set it in the store - let username_clone = username.clone(); - Either::A( - store - .clone() - .get_account_id_from_username(&username) - .map_err(move |_| { - error!("No account exists with username: {}", username_clone); - ApiError::account_not_found().into() - }) - .and_then(move |account_id| { - store - .set_static_route(prefix, account_id) - .map_err::<_, Rejection>(|_| { - error!("Error setting static route"); - ApiError::internal_server_error().into() - }) - }) - .map(move |_| username.to_string()), - ) - } else { - Either::B(err(ApiError::bad_request().into())) + let account_id = store + .get_account_id_from_username(&username) + .map_err(|_| { + error!("No account exists with username: {}", username); + Rejection::from(ApiError::account_not_found()) + }) + .await?; + store + .set_static_route(prefix, account_id) + .map_err(|_| { + error!("Error setting static route"); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + Ok::(username.to_string()) } }) .boxed(); // PUT /settlement/engines - let put_settlement_engines = warp::put2() + let put_settlement_engines = warp::put() .and(warp::path("settlement")) .and(warp::path("engines")) .and(warp::path::end()) - .and(admin_only.clone()) - .and(deserialize_json()) - .and(with_store.clone()) - .and_then(|asset_to_url_map: HashMap, store: S| { + .and(admin_only) + .and(warp::body::json()) + .and(with_store) + .and_then(move |asset_to_url_map: HashMap, store: S| async move { let asset_to_url_map_clone = asset_to_url_map.clone(); store .set_settlement_engines(asset_to_url_map.clone()) - .map_err::<_, Rejection>(|_| { + .map_err(|_| { error!("Error setting settlement engines"); - ApiError::internal_server_error().into() - }) - .and_then(move |_| { - // Create the accounts on the settlement engines for any - // accounts that are using the default settlement engine URLs - // (This is done in case we modify the globally configured settlement - // engine URLs after accounts have already been added) + Rejection::from(ApiError::internal_server_error()) + }).await?; + // Create the accounts on the settlement engines for any + // accounts that are using the default settlement engine URLs + // (This is done in case we modify the globally configured settlement + // engine URLs after accounts have already been added) - // TODO we should come up with a better way of ensuring - // the accounts are created that doesn't involve loading - // all of the accounts from the database into memory - // (even if this isn't called often, it could crash the node at some point) - store.get_all_accounts() - .map_err(|_| ApiError::internal_server_error().into()) - .and_then(move |accounts| { - let client = Client::default(); - let create_settlement_accounts = - accounts.into_iter().filter_map(move |account| { - let id = account.id(); - // Try creating the account on the settlement engine if the settlement_engine_url of the - // account is the one we just configured as the default for the account's asset code - if let Some(details) = account.settlement_engine_details() { - if Some(&details.url) == asset_to_url_map.get(account.asset_code()) { - return Some(client.create_engine_account(details.url, account.id()) - .map_err(|_| ApiError::internal_server_error().into()) - .and_then(move |status_code| { - if status_code.is_success() { - trace!("Account {} created on the SE", id); - } else { - error!("Error creating account. Settlement engine responded with HTTP code: {}", status_code); - } - Ok(()) - })); - } - } - None - }); - join_all(create_settlement_accounts) - }) - }) - .and_then(move |_| Ok(warp::reply::json(&asset_to_url_map_clone))) + // TODO we should come up with a better way of ensuring + // the accounts are created that doesn't involve loading + // all of the accounts from the database into memory + // (even if this isn't called often, it could crash the node at some point) + let accounts = store.get_all_accounts() + .map_err(|_| Rejection::from(ApiError::internal_server_error())).await?; + + let client = Client::default(); + // Try creating the account on the settlement engine if the settlement_engine_url of the + // account is the one we just configured as the default for the account's asset code + for account in accounts { + if let Some(details) = account.settlement_engine_details() { + if Some(&details.url) == asset_to_url_map.get(account.asset_code()) { + let status_code = client.create_engine_account(details.url, account.id()) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + if status_code.is_success() { + trace!("Account {} created on the SE", account.id()); + } else { + error!("Error creating account. Settlement engine responded with HTTP code: {}", status_code); + } + } + } + } + Ok::(warp::reply::json(&asset_to_url_map_clone)) }) .boxed(); @@ -284,10 +285,10 @@ mod tests { use crate::routes::test_helpers::{api_call, test_node_settings_api}; use serde_json::{json, Value}; - #[test] - fn gets_status() { + #[tokio::test] + async fn gets_status() { let api = test_node_settings_api(); - let resp = api_call(&api, "GET", "/", "", None); + let resp = api_call(&api, "GET", "/", "", None).await; assert_eq!(resp.status().as_u16(), 200); assert_eq!( resp.body(), @@ -295,10 +296,10 @@ mod tests { ); } - #[test] - fn gets_rates() { + #[tokio::test] + async fn gets_rates() { let api = test_node_settings_api(); - let resp = api_call(&api, "GET", "/rates", "", None); + let resp = api_call(&api, "GET", "/rates", "", None).await; assert_eq!(resp.status().as_u16(), 200); assert_eq!( serde_json::from_slice::(resp.body()).unwrap(), @@ -306,56 +307,60 @@ mod tests { ); } - #[test] - fn gets_routes() { + #[tokio::test] + async fn gets_routes() { let api = test_node_settings_api(); - let resp = api_call(&api, "GET", "/routes", "", None); + let resp = api_call(&api, "GET", "/routes", "", None).await; assert_eq!(resp.status().as_u16(), 200); } - #[test] - fn only_admin_can_put_rates() { + #[tokio::test] + async fn only_admin_can_put_rates() { let api = test_node_settings_api(); let rates = json!({"ABC": 1.0}); - let resp = api_call(&api, "PUT", "/rates", "admin", Some(rates.clone())); + let resp = api_call(&api, "PUT", "/rates", "admin", Some(rates.clone())).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "PUT", "/rates", "wrong", Some(rates)); + let resp = api_call(&api, "PUT", "/rates", "wrong", Some(rates)).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_put_static_routes() { + #[tokio::test] + async fn only_admin_can_put_static_routes() { let api = test_node_settings_api(); let routes = json!({"g.node1": "alice", "example.eu": "bob"}); - let resp = api_call(&api, "PUT", "/routes/static", "admin", Some(routes.clone())); + let resp = api_call(&api, "PUT", "/routes/static", "admin", Some(routes.clone())).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "PUT", "/routes/static", "wrong", Some(routes)); + let resp = api_call(&api, "PUT", "/routes/static", "wrong", Some(routes)).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_put_single_static_route() { + #[tokio::test] + async fn only_admin_can_put_single_static_route() { let api = test_node_settings_api(); - let api_put = move |auth: &str| { - warp::test::request() - .method("PUT") - .path("/routes/static/g.node1") - .body("alice") - .header("Authorization", format!("Bearer {}", auth.to_string())) - .reply(&api) + let api_put = |auth: String| { + let auth = format!("Bearer {}", auth); + async { + warp::test::request() + .method("PUT") + .path("/routes/static/g.node1") + .body("alice") + .header("Authorization", auth) + .reply(&api) + .await + } }; - let resp = api_put("admin"); + let resp = api_put("admin".to_owned()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_put("wrong"); + let resp = api_put("wrong".to_owned()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_put_engines() { + #[tokio::test] + async fn only_admin_can_put_engines() { let api = test_node_settings_api(); let engines = json!({"ABC": "http://localhost:3000", "XYZ": "http://localhost:3001"}); let resp = api_call( @@ -364,10 +369,11 @@ mod tests { "/settlement/engines", "admin", Some(engines.clone()), - ); + ) + .await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "PUT", "/settlement/engines", "wrong", Some(engines)); + let resp = api_call(&api, "PUT", "/settlement/engines", "wrong", Some(engines)).await; assert_eq!(resp.status().as_u16(), 401); } } diff --git a/crates/interledger-api/src/routes/test_helpers.rs b/crates/interledger-api/src/routes/test_helpers.rs index 9e5023569..ecd6355fe 100644 --- a/crates/interledger-api/src/routes/test_helpers.rs +++ b/crates/interledger-api/src/routes/test_helpers.rs @@ -2,12 +2,9 @@ use crate::{ routes::{accounts_api, node_settings_api}, AccountDetails, AccountSettings, NodeStore, }; +use async_trait::async_trait; use bytes::Bytes; -use futures::sync::mpsc::UnboundedSender; -use futures::{ - future::{err, ok}, - Future, -}; +use futures::channel::mpsc::UnboundedSender; use http::Response; use interledger_btp::{BtpAccount, BtpOutgoingService}; use interledger_ccp::{CcpRoutingAccount, RoutingRelation}; @@ -32,13 +29,13 @@ use url::Url; use uuid::Uuid; use warp::{self, Filter}; -pub fn api_call( +pub async fn api_call( api: &F, method: &str, endpoint: &str, // /ilp or /accounts/:username/ilp auth: T, // simple bearer or overloaded username+password data: Option, -) -> Response +) -> Response where F: warp::Filter + 'static, F::Extract: warp::Reply, @@ -52,7 +49,7 @@ where ret = ret.header("Content-type", "application/json").json(&d); } - ret.reply(api) + ret.reply(api).await } pub fn test_node_settings_api( @@ -63,20 +60,20 @@ pub fn test_node_settings_api( pub fn test_accounts_api( ) -> impl warp::Filter + Clone { let incoming = incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: None, } - .build())) + .build()) }); let outgoing = outgoing_service_fn(move |_request| { - Box::new(ok(FulfillBuilder { + Ok(FulfillBuilder { fulfillment: &[0; 32], data: b"hello!", } - .build())) + .build()) }); let btp = BtpOutgoingService::new( Address::from_str("example.alice").unwrap(), @@ -174,22 +171,17 @@ impl CcpRoutingAccount for TestAccount { } } +#[async_trait] impl AccountStore for TestStore { type Account = TestAccount; - fn get_accounts( - &self, - _account_ids: Vec, - ) -> Box, Error = ()> + Send> { - Box::new(ok(vec![TestAccount])) + async fn get_accounts(&self, _account_ids: Vec) -> Result, ()> { + Ok(vec![TestAccount]) } // stub implementation (not used in these tests) - fn get_account_id_from_username( - &self, - _username: &Username, - ) -> Box + Send> { - Box::new(ok(Uuid::new_v4())) + async fn get_account_id_from_username(&self, _username: &Username) -> Result { + Ok(Uuid::new_v4()) } } @@ -216,91 +208,74 @@ impl RouterStore for TestStore { } } +#[async_trait] impl NodeStore for TestStore { type Account = TestAccount; - fn insert_account( - &self, - _account: AccountDetails, - ) -> Box + Send> { - Box::new(ok(TestAccount)) + async fn insert_account(&self, _account: AccountDetails) -> Result { + Ok(TestAccount) } - fn delete_account( - &self, - _id: Uuid, - ) -> Box + Send> { - Box::new(ok(TestAccount)) + async fn delete_account(&self, _id: Uuid) -> Result { + Ok(TestAccount) } - fn update_account( + async fn update_account( &self, _id: Uuid, _account: AccountDetails, - ) -> Box + Send> { - Box::new(ok(TestAccount)) + ) -> Result { + Ok(TestAccount) } - fn modify_account_settings( + async fn modify_account_settings( &self, _id: Uuid, _settings: AccountSettings, - ) -> Box + Send> { - Box::new(ok(TestAccount)) + ) -> Result { + Ok(TestAccount) } - fn get_all_accounts(&self) -> Box, Error = ()> + Send> { - Box::new(ok(vec![TestAccount, TestAccount])) + async fn get_all_accounts(&self) -> Result, ()> { + Ok(vec![TestAccount, TestAccount]) } - fn set_static_routes(&self, _routes: R) -> Box + Send> + async fn set_static_routes(&self, _routes: R) -> Result<(), ()> where - R: IntoIterator, + R: IntoIterator + Send + 'async_trait, { - Box::new(ok(())) + Ok(()) } - fn set_static_route( - &self, - _prefix: String, - _account_id: Uuid, - ) -> Box + Send> { - Box::new(ok(())) + async fn set_static_route(&self, _prefix: String, _account_id: Uuid) -> Result<(), ()> { + Ok(()) } - fn set_default_route( - &self, - _account_id: Uuid, - ) -> Box + Send> { + async fn set_default_route(&self, _account_id: Uuid) -> Result<(), ()> { unimplemented!() } - fn set_settlement_engines( + async fn set_settlement_engines( &self, - _asset_to_url_map: impl IntoIterator, - ) -> Box + Send> { - Box::new(ok(())) + _asset_to_url_map: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()> { + Ok(()) } - fn get_asset_settlement_engine( - &self, - _asset_code: &str, - ) -> Box, Error = ()> + Send> { - Box::new(ok(None)) + async fn get_asset_settlement_engine(&self, _asset_code: &str) -> Result, ()> { + Ok(None) } } +#[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { - unimplemented!() + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { + Ok(()) } - fn clear_ilp_address(&self) -> Box + Send> { - unimplemented!() + async fn clear_ilp_address(&self) -> Result<(), ()> { + Ok(()) } /// Get's the store's ilp address from memory @@ -325,47 +300,49 @@ impl StreamNotificationsStore for TestStore { } } +#[async_trait] impl BalanceStore for TestStore { - fn get_balance(&self, _account: TestAccount) -> Box + Send> { - Box::new(ok(1)) + async fn get_balance(&self, _account: TestAccount) -> Result { + Ok(1) } - fn update_balances_for_prepare( + async fn update_balances_for_prepare( &self, _from_account: TestAccount, _incoming_amount: u64, - ) -> Box + Send> { + ) -> Result<(), ()> { unimplemented!() } - fn update_balances_for_fulfill( + async fn update_balances_for_fulfill( &self, _to_account: TestAccount, _outgoing_amount: u64, - ) -> Box + Send> { + ) -> Result<(i64, u64), ()> { unimplemented!() } - fn update_balances_for_reject( + async fn update_balances_for_reject( &self, _from_account: TestAccount, _incoming_amount: u64, - ) -> Box + Send> { + ) -> Result<(), ()> { unimplemented!() } } +#[async_trait] impl HttpStore for TestStore { type Account = TestAccount; - fn get_account_from_http_auth( + async fn get_account_from_http_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { + ) -> Result { if username == &*USERNAME && token == AUTH_PASSWORD { - Box::new(ok(TestAccount)) + Ok(TestAccount) } else { - Box::new(err(())) + Err(()) } } } diff --git a/crates/interledger-btp/Cargo.toml b/crates/interledger-btp/Cargo.toml index 446750055..330367f0f 100644 --- a/crates/interledger-btp/Cargo.toml +++ b/crates/interledger-btp/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/interledger-rs/interledger-rs" bytes = { version = "0.4.12", default-features = false } byteorder = { version = "1.3.2", default-features = false } chrono = { version = "0.4.9", default-features = false } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3.1", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } log = { version = "0.4.8", default-features = false } @@ -19,18 +19,20 @@ num-bigint = { version = "0.2.3", default-features = false, features = ["std"] } parking_lot = { version = "0.9.0", default-features = false } quick-error = { version = "1.2.2", default-features = false } rand = { version = "0.7.2", default-features = false, features = ["std"] } -stream-cancel = { version = "0.4.4", default-features = false } -tokio-executor = { version = "0.1.8", default-features = false } -tokio-timer = { version = "0.2.11", default-features = false } -tokio-tungstenite = { version = "0.9.0", default-features = false, features = ["tls", "connect"] } +stream-cancel = { version = "0.5", default-features = false } +tokio-tungstenite = { version = "0.10.0", package = "tokio-tungstenite", git = "https://github.com/snapview/tokio-tungstenite", default-features = false, features = ["tls", "connect"] } + tungstenite = { version = "0.9.1", default-features = false } url = { version = "2.1.0", default-features = false } uuid = { version = "0.8.1", default-features = false, features = ["v4"]} -warp = { version = "0.1.20", default-features = false, features = ["websocket"] } +# warp = { version = "0.1.20", default-features = false, features = ["websocket"] } +warp = { git = "https://github.com/seanmonstar/warp.git" } secrecy = "0.5.1" +async-trait = "0.1.22" +tokio = { version = "0.2.8", features = ["rt-core", "time", "stream"] } +lazy_static = { version = "1.4.0", default-features = false } +pin-project = "0.4.6" [dev-dependencies] hex = { version = "0.4.0", default-features = false } -lazy_static = { version = "1.4.0", default-features = false } -net2 = { version = "0.2.33", default-features = false } -tokio = { version = "0.1.22", default-features = false } +net2 = { version = "0.2.33", default-features = false } \ No newline at end of file diff --git a/crates/interledger-btp/src/client.rs b/crates/interledger-btp/src/client.rs index 7a0041e6e..4a2ca31d6 100644 --- a/crates/interledger-btp/src/client.rs +++ b/crates/interledger-btp/src/client.rs @@ -1,7 +1,7 @@ use super::packet::*; use super::service::BtpOutgoingService; use super::BtpAccount; -use futures::{future::join_all, Future, Sink, Stream}; +use futures::{future::join_all, SinkExt, StreamExt, TryFutureExt}; use interledger_packet::Address; use interledger_service::*; use log::{debug, error, trace}; @@ -22,15 +22,15 @@ pub fn parse_btp_url(uri: &str) -> Result { /// Create a BtpOutgoingService wrapping BTP connections to the accounts specified. /// Calling `handle_incoming` with an `IncomingService` will turn the returned /// BtpOutgoingService into a bidirectional handler. -pub fn connect_client( +pub async fn connect_client( ilp_address: Address, accounts: Vec, error_on_unavailable: bool, next_outgoing: S, -) -> impl Future, Error = ()> +) -> Result, ()> where S: OutgoingService + Clone + 'static, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { let service = BtpOutgoingService::new(ilp_address, next_outgoing); let mut connect_btp = Vec::new(); @@ -42,17 +42,18 @@ where service.clone(), )); } - join_all(connect_btp).and_then(move |_| Ok(service)) + join_all(connect_btp).await; + Ok(service) } -pub fn connect_to_service_account( +pub async fn connect_to_service_account( account: A, error_on_unavailable: bool, service: BtpOutgoingService, -) -> impl Future +) -> Result<(), ()> where O: OutgoingService + Clone + 'static, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { let account_id = account.id(); let mut url = account @@ -67,58 +68,63 @@ where .map(|s| s.to_vec()) .unwrap_or_default(); debug!("Connecting to {}", url); - connect_async(url.clone()) + + let (mut connection, _) = connect_async(url.clone()) .map_err(move |err| { error!( "Error connecting to WebSocket server for account: {} {:?}", account_id, err ) }) - .and_then(move |(connection, _)| { - trace!( - "Connected to account {} (URI: {}), sending auth packet", - account_id, - url - ); - // Send BTP authentication - let auth_packet = Message::Binary( - BtpPacket::Message(BtpMessage { - request_id: random(), - protocol_data: vec![ - ProtocolData { - protocol_name: String::from("auth"), - content_type: ContentType::ApplicationOctetStream, - data: vec![], - }, - ProtocolData { - protocol_name: String::from("auth_token"), - content_type: ContentType::TextPlainUtf8, - data: token, - }, - ], - }) - .to_bytes(), - ); + .await?; + + trace!( + "Connected to account {} (UID: {}) (URI: {}), sending auth packet", + account.username(), + account_id, + url + ); - // TODO check that the response is a success before proceeding - // (right now we just assume they'll close the connection if the auth didn't work) - connection - .send(auth_packet) - .map_err(move |_| error!("Error sending auth packet on connection: {}", url)) - .then(move |result| match result { - Ok(connection) => { - debug!("Connected to account {}'s server", account.id()); - let connection = connection.from_err().sink_from_err(); - service.add_connection(account, connection); - Ok(()) - } - Err(_) => { - if error_on_unavailable { - Err(()) - } else { - Ok(()) - } - } - }) + // Send BTP authentication + let auth_packet = Message::binary( + BtpPacket::Message(BtpMessage { + request_id: random(), + protocol_data: vec![ + ProtocolData { + protocol_name: String::from("auth"), + content_type: ContentType::ApplicationOctetStream, + data: vec![], + }, + ProtocolData { + protocol_name: String::from("auth_token"), + content_type: ContentType::TextPlainUtf8, + data: token, + }, + ], }) + .to_bytes(), + ); + + // TODO check that the response is a success before proceeding + // (right now we just assume they'll close the connection if the auth didn't work) + let result = connection // this just a stream + .send(auth_packet) + .map_err(move |_| error!("Error sending auth packet on connection: {}", url)) + .await; + + match result { + Ok(_) => { + debug!("Connected to account {}'s server", account.id()); + let connection = connection.filter_map(|v| async move { v.ok() }); + service.add_connection(account, connection); + Ok(()) + } + Err(_) => { + if error_on_unavailable { + Err(()) + } else { + Ok(()) + } + } + } } diff --git a/crates/interledger-btp/src/lib.rs b/crates/interledger-btp/src/lib.rs index fbc6981e4..03f1554b8 100644 --- a/crates/interledger-btp/src/lib.rs +++ b/crates/interledger-btp/src/lib.rs @@ -6,7 +6,7 @@ //! Because this protocol uses WebSockets, only one party needs to have a publicly-accessible HTTPS //! endpoint but both sides can send and receive ILP packets. -use futures::Future; +use async_trait::async_trait; use interledger_service::{Account, Username}; use url::Url; @@ -16,9 +16,10 @@ mod oer; mod packet; mod server; mod service; +mod wrapped_ws; pub use self::client::{connect_client, connect_to_service_account, parse_btp_url}; -pub use self::server::btp_service_as_filter; +pub use self::server::btp_service_as_filter; // This is consumed only by the node. pub use self::service::{BtpOutgoingService, BtpService}; pub trait BtpAccount: Account { @@ -27,26 +28,24 @@ pub trait BtpAccount: Account { } /// The interface for Store implementations that can be used with the BTP Server. +#[async_trait] pub trait BtpStore { type Account: BtpAccount; /// Load Account details based on the auth token received via BTP. - fn get_account_from_btp_auth( + async fn get_account_from_btp_auth( &self, username: &Username, token: &str, - ) -> Box + Send>; + ) -> Result; /// Load accounts that have a ilp_over_btp_url configured - fn get_btp_outgoing_accounts( - &self, - ) -> Box, Error = ()> + Send>; + async fn get_btp_outgoing_accounts(&self) -> Result, ()>; } #[cfg(test)] mod client_server { use super::*; - use futures::future::{err, lazy, ok, result}; use interledger_packet::{Address, ErrorCode, FulfillBuilder, PrepareBuilder, RejectBuilder}; use interledger_service::*; use net2::TcpBuilder; @@ -56,7 +55,6 @@ mod client_server { sync::Arc, time::{Duration, SystemTime}, }; - use tokio::runtime::Runtime; use uuid::Uuid; use lazy_static::lazy_static; @@ -126,13 +124,11 @@ mod client_server { accounts: Arc>, } + #[async_trait] impl AccountStore for TestStore { type Account = TestAccount; - fn get_accounts( - &self, - account_ids: Vec, - ) -> Box, Error = ()> + Send> { + async fn get_accounts(&self, account_ids: Vec) -> Result, ()> { let accounts: Vec = self .accounts .iter() @@ -145,156 +141,150 @@ mod client_server { }) .collect(); if accounts.len() == account_ids.len() { - Box::new(ok(accounts)) + Ok(accounts) } else { - Box::new(err(())) + Err(()) } } // stub implementation (not used in these tests) - fn get_account_id_from_username( - &self, - _username: &Username, - ) -> Box + Send> { - Box::new(ok(Uuid::new_v4())) + async fn get_account_id_from_username(&self, _username: &Username) -> Result { + Ok(Uuid::new_v4()) } } + #[async_trait] impl BtpStore for TestStore { type Account = TestAccount; - fn get_account_from_btp_auth( + async fn get_account_from_btp_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { - Box::new(result( - self.accounts - .iter() - .find(|account| { - if let Some(account_token) = &account.ilp_over_btp_incoming_token { - account_token == token && account.username() == username - } else { - false - } - }) - .cloned() - .ok_or(()), - )) + ) -> Result { + self.accounts + .iter() + .find(|account| { + if let Some(account_token) = &account.ilp_over_btp_incoming_token { + account_token == token && account.username() == username + } else { + false + } + }) + .cloned() + .ok_or(()) } - fn get_btp_outgoing_accounts( - &self, - ) -> Box, Error = ()> + Send> { - Box::new(ok(self + async fn get_btp_outgoing_accounts(&self) -> Result, ()> { + Ok(self .accounts .iter() .filter(|account| account.ilp_over_btp_url.is_some()) .cloned() - .collect())) + .collect()) } } // TODO should this be an integration test, since it binds to a port? - #[test] - fn client_server_test() { - let mut runtime = Runtime::new().unwrap(); - runtime - .block_on(lazy(|| { - let bind_addr = get_open_port(); - - let server_store = TestStore { - accounts: Arc::new(vec![TestAccount { - id: Uuid::new_v4(), - ilp_over_btp_incoming_token: Some("test_auth_token".to_string()), - ilp_over_btp_outgoing_token: None, - ilp_over_btp_url: None, - }]), - }; - let server_address = Address::from_str("example.server").unwrap(); - let btp_service = BtpOutgoingService::new( - server_address.clone(), - outgoing_service_fn(move |_| { - Err(RejectBuilder { - code: ErrorCode::F02_UNREACHABLE, - message: b"No other outgoing handler", - triggered_by: Some(&server_address), - data: &[], - } - .build()) - }), - ); - let filter = btp_service_as_filter(btp_service.clone(), server_store); - btp_service.handle_incoming(incoming_service_fn(|_| { - Ok(FulfillBuilder { - fulfillment: &[0; 32], - data: b"test data", - } - .build()) - })); - - let account = TestAccount { - id: Uuid::new_v4(), - ilp_over_btp_url: Some( - Url::parse(&format!("btp+ws://{}/accounts/alice/ilp/btp", bind_addr)) - .unwrap(), - ), - ilp_over_btp_outgoing_token: Some("test_auth_token".to_string()), - ilp_over_btp_incoming_token: None, - }; - let accounts = vec![account.clone()]; - let addr = Address::from_str("example.address").unwrap(); - let addr_clone = addr.clone(); - let client = connect_client( - addr.clone(), - accounts, - true, - outgoing_service_fn(move |_| { - Err(RejectBuilder { - code: ErrorCode::F02_UNREACHABLE, - message: &[], - data: &[], - triggered_by: Some(&addr_clone), - } - .build()) - }), - ) - .and_then(move |btp_service| { - let mut btp_service = - btp_service.handle_incoming(incoming_service_fn(move |_| { - Err(RejectBuilder { - code: ErrorCode::F02_UNREACHABLE, - message: &[], - data: &[], - triggered_by: Some(&addr), - } - .build()) - })); - let btp_service_clone = btp_service.clone(); - btp_service - .send_request(OutgoingRequest { - from: account.clone(), - to: account.clone(), - original_amount: 100, - prepare: PrepareBuilder { - destination: Address::from_str("example.destination").unwrap(), - amount: 100, - execution_condition: &[0; 32], - expires_at: SystemTime::now() + Duration::from_secs(30), - data: b"test data", - } - .build(), - }) - .map_err(|reject| println!("Packet was rejected: {:?}", reject)) - .and_then(move |_| { - btp_service_clone.close(); - Ok(()) - }) - }); - let server = warp::serve(filter); - tokio::spawn(server.bind(bind_addr)); - client + #[tokio::test] + async fn client_server_test() { + let bind_addr = get_open_port(); + + let server_store = TestStore { + accounts: Arc::new(vec![TestAccount { + id: Uuid::new_v4(), + ilp_over_btp_incoming_token: Some("test_auth_token".to_string()), + ilp_over_btp_outgoing_token: None, + ilp_over_btp_url: None, + }]), + }; + let server_address = Address::from_str("example.server").unwrap(); + let btp_service = BtpOutgoingService::new( + server_address.clone(), + outgoing_service_fn(move |_| { + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: b"No other outgoing handler", + triggered_by: Some(&server_address), + data: &[], + } + .build()) + }), + ); + btp_service + .clone() + .handle_incoming(incoming_service_fn(|_| { + Ok(FulfillBuilder { + fulfillment: &[0; 32], + data: b"test data", + } + .build()) + })) + .await; + let filter = btp_service_as_filter(btp_service.clone(), server_store); + let server = warp::serve(filter); + // Spawn the server and listen for incoming connections + tokio::spawn(server.bind(bind_addr)); + + // Try to connect + let account = TestAccount { + id: Uuid::new_v4(), + ilp_over_btp_url: Some( + Url::parse(&format!("btp+ws://{}/accounts/alice/ilp/btp", bind_addr)).unwrap(), + ), + ilp_over_btp_outgoing_token: Some("test_auth_token".to_string()), + ilp_over_btp_incoming_token: None, + }; + let accounts = vec![account.clone()]; + let addr = Address::from_str("example.address").unwrap(); + let addr_clone = addr.clone(); + + let btp_client = connect_client( + addr.clone(), + accounts, + true, + outgoing_service_fn(move |_| { + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: &[], + data: &[], + triggered_by: Some(&addr_clone), + } + .build()) + }), + ) + .await + .unwrap(); + + let mut btp_client = btp_client + .handle_incoming(incoming_service_fn(move |_| { + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: &[], + data: &[], + triggered_by: Some(&addr), + } + .build()) })) - .unwrap(); + .await; + + let res = btp_client + .send_request(OutgoingRequest { + from: account.clone(), + to: account.clone(), + original_amount: 100, + prepare: PrepareBuilder { + destination: Address::from_str("example.destination").unwrap(), + amount: 100, + execution_condition: &[0; 32], + expires_at: SystemTime::now() + Duration::from_secs(30), + data: b"test data", + } + .build(), + }) + .await; + dbg!(&res); + assert!(res.is_ok()); + btp_service.close(); } } diff --git a/crates/interledger-btp/src/server.rs b/crates/interledger-btp/src/server.rs index 2933316d4..e9e6deae9 100644 --- a/crates/interledger-btp/src/server.rs +++ b/crates/interledger-btp/src/server.rs @@ -1,23 +1,21 @@ -use super::service::{BtpOutgoingService, WsError}; use super::{packet::*, BtpAccount, BtpStore}; -use futures::{future::result, Async, AsyncSink, Future, Poll, Sink, Stream}; +use super::{service::BtpOutgoingService, wrapped_ws::WsWrap}; +use futures::{FutureExt, Sink, Stream}; +use futures::{SinkExt, StreamExt, TryFutureExt}; use interledger_service::*; -use log::{debug, error, warn}; +use log::{debug, warn}; use secrecy::{ExposeSecret, SecretString}; -use std::time::Duration; -use tokio_timer::Timeout; -use tungstenite; +// use std::time::Duration; use warp::{ self, - ws::{Message, WebSocket, Ws2}, + ws::{Message, WebSocket, Ws}, Filter, }; // Close the incoming websocket connection if the auth details // have not been received within this timeout -const WEBSOCKET_TIMEOUT: Duration = Duration::from_secs(10); - -// const MAX_MESSAGE_SIZE: usize = 40000; +// const WEBSOCKET_TIMEOUT: Duration = Duration::from_secs(10); +const MAX_MESSAGE_SIZE: usize = 40000; /// Returns a BtpOutgoingService and a warp Filter. /// @@ -37,35 +35,24 @@ pub fn btp_service_as_filter( where O: OutgoingService + Clone + Send + Sync + 'static, S: BtpStore + Clone + Send + Sync + 'static, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { warp::path("accounts") - .and(warp::path::param2::()) + .and(warp::path::param::()) .and(warp::path("ilp")) .and(warp::path("btp")) .and(warp::path::end()) - .and(warp::ws2()) - .map(move |username: Username, ws: Ws2| { - let store = store.clone(); + .and(warp::ws()) + .map(move |username: Username, ws: Ws| { + // warp Websocket let service_clone = service.clone(); - ws.on_upgrade(move |ws: WebSocket| { - // TODO set max_message_size once https://github.com/seanmonstar/warp/pull/272 is merged - let service_clone = service_clone.clone(); - Timeout::new(validate_auth(store, username, ws), WEBSOCKET_TIMEOUT) - .and_then(move |(account, connection)| { - debug!( - "Added connection for account {}: (id: {})", - account.username(), - account.id() - ); - service_clone.add_connection(account, WsWrap { connection }); - Ok(()) - }) - .or_else(|_| { - warn!("Closing Websocket connection because of an error"); - Ok(()) - }) - }) + let store_clone = store.clone(); + ws.max_message_size(MAX_MESSAGE_SIZE) + .on_upgrade(|socket: WebSocket| { + // wrapper over tungstenite Websocket + add_connections(socket, username, service_clone, store_clone) + .map(|result| result.unwrap()) + }) }) .boxed() } @@ -74,93 +61,40 @@ where /// tungstenite Websocket connection. It is needed for /// compatibility with the BTP service that interacts with the /// websocket implementation from warp and tokio-tungstenite -struct WsWrap { - connection: W, -} - -impl Stream for WsWrap +async fn add_connections( + socket: WebSocket, + username: Username, + service: BtpOutgoingService, + store: S, +) -> Result<(), ()> where - W: Stream - + Sink, + O: OutgoingService + Clone + Send + Sync + 'static, + S: BtpStore + Clone + Send + Sync + 'static, + A: BtpAccount + Send + Sync + 'static, { - type Item = tungstenite::Message; - type Error = WsError; - - fn poll(&mut self) -> Poll, Self::Error> { - match self.connection.poll() { - Ok(Async::NotReady) => Ok(Async::NotReady), - Ok(Async::Ready(None)) => Ok(Async::Ready(None)), - Ok(Async::Ready(Some(message))) => { - let message = if message.is_ping() { - tungstenite::Message::Ping(message.into_bytes()) - } else if message.is_binary() { - tungstenite::Message::Binary(message.into_bytes()) - } else if message.is_text() { - tungstenite::Message::Text(message.to_str().unwrap_or_default().to_string()) - } else if message.is_close() { - tungstenite::Message::Close(None) - } else { - warn!( - "Got unexpected websocket message, closing connection: {:?}", - message - ); - tungstenite::Message::Close(None) - }; - Ok(Async::Ready(Some(message))) - } - Err(err) => Err(WsError::from(err)), + // We ignore all the errors + let socket = socket.filter_map(|v| async move { v.ok() }); + match validate_auth(store, username, socket) + // .timeout(WEBSOCKET_TIMEOUT) // No method found? + .await + { + Ok((account, connection)) => { + // We need to wrap our Warp connection in order to cast the Sink type + // to tungstenite::Message. This probably can be implemented with SinkExt::with + // but couldn't figure out how. + service.add_connection(account.clone(), WsWrap { connection }); + debug!( + "Added connection for account {}: (id: {})", + account.username(), + account.id() + ); } - } -} - -impl Sink for WsWrap -where - W: Stream - + Sink, -{ - type SinkItem = tungstenite::Message; - type SinkError = WsError; - - fn start_send( - &mut self, - item: Self::SinkItem, - ) -> Result, Self::SinkError> { - match item { - tungstenite::Message::Binary(data) => self - .connection - .start_send(Message::binary(data)) - .map(|result| { - if let AsyncSink::NotReady(message) = result { - AsyncSink::NotReady(tungstenite::Message::Binary(message.into_bytes())) - } else { - AsyncSink::Ready - } - }) - .map_err(WsError::from), - tungstenite::Message::Text(data) => { - match self.connection.start_send(Message::text(data)) { - Ok(AsyncSink::NotReady(message)) => { - if let Ok(string) = String::from_utf8(message.into_bytes()) { - Ok(AsyncSink::NotReady(tungstenite::Message::text(string))) - } else { - Err(WsError::Tungstenite(tungstenite::Error::Utf8)) - } - } - Ok(AsyncSink::Ready) => Ok(AsyncSink::Ready), - Err(err) => Err(WsError::from(err)), - } - } - // Ignore other message types because warp's WebSocket type doesn't - // allow us to send any other types of messages - // TODO make sure warp's websocket responds to pings and/or sends them to keep the - // connection alive - _ => Ok(AsyncSink::Ready), + Err(_) => { + warn!("Closing Websocket connection because of an error"); } - } + }; - fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { - self.connection.poll_complete().map_err(WsError::from) - } + Ok(()) } struct Auth { @@ -168,74 +102,58 @@ struct Auth { token: SecretString, } -fn validate_auth( +async fn validate_auth( store: S, username: Username, - connection: impl Stream - + Sink, -) -> impl Future< - Item = ( - A, - impl Stream - + Sink, - ), - Error = (), -> + connection: impl Stream + Sink, +) -> Result<(A, impl Stream + Sink), ()> where S: BtpStore + 'static, A: BtpAccount + 'static, { - get_auth(connection).and_then(move |(auth, connection)| { - debug!("Got BTP connection for username: {}", username); - store - .get_account_from_btp_auth(&username, &auth.token.expose_secret()) - .map_err(move |_| warn!("BTP connection does not correspond to an account")) - .and_then(move |account| { - let auth_response = Message::binary( - BtpResponse { - request_id: auth.request_id, - protocol_data: Vec::new(), - } - .to_bytes(), - ); - connection - .send(auth_response) - .map_err(|_err| error!("warp::Error sending auth response")) - .and_then(|connection| Ok((account, connection))) - }) - }) + let (auth, mut connection) = get_auth(Box::pin(connection)).await?; + debug!("Got BTP connection for username: {}", username); + let account = store + .get_account_from_btp_auth(&username, &auth.token.expose_secret()) + .map_err(move |_| warn!("BTP connection does not correspond to an account")) + .await?; + + let auth_response = Message::binary( + BtpResponse { + request_id: auth.request_id, + protocol_data: Vec::new(), + } + .to_bytes(), + ); + + let _ = connection.send(auth_response).await; + + Ok((account, connection)) } -fn get_auth( - connection: impl Stream - + Sink, -) -> impl Future< - Item = ( - Auth, - impl Stream - + Sink, - ), - Error = (), -> { - connection - .skip_while(|message| { - // Skip non-binary messages like Pings and Pongs - // Note that the BTP protocol spec technically specifies that - // the auth message MUST be the first packet sent over the - // WebSocket connection. However, the JavaScript implementation - // of BTP sends a Ping packet first, so we should ignore it. - // (Be liberal in what you accept but strict in what you send) - Ok(!message.is_binary()) - // TODO: should we error if the client sends something other than a binary or ping packet first? - }) - .into_future() - .map_err(|_err| ()) - .and_then(move |(message, connection)| { - // The first packet sent on the connection MUST be the auth packet - result(parse_auth(message).map(|auth| (auth, connection)).ok_or_else(|| { - warn!("Got a BTP connection where the first packet sent was not a valid BTP Auth message. Closing the connection") - })) - }) +/// Reads the first non-empty non-error binary message from the WebSocket and attempts to parse it as an AuthToken +async fn get_auth( + connection: impl Stream + Sink + Unpin, +) -> Result<(Auth, impl Stream + Sink), ()> { + // Skip non-binary messages like Pings and Pongs + // Note that the BTP protocol spec technically specifies that + // the auth message MUST be the first packet sent over the + // WebSocket connection. However, the JavaScript implementation + // of BTP sends a Ping packet first, so we should ignore it. + // (Be liberal in what you accept but strict in what you send) + // TODO: should we error if the client sends something other than a binary or ping packet first? + let mut connection = + connection.skip_while(move |message| futures::future::ready(!message.is_binary())); + + // The first packet sent on the connection MUST be the auth packet + let message = connection.next().await; + match parse_auth(message) { + Some(auth) => Ok((auth, Box::pin(connection))), + None => { + warn!("Got a BTP connection where the first packet sent was not a valid BTP Auth message. Closing the connection"); + Err(()) + } + } } fn parse_auth(ws_packet: Option) -> Option { @@ -245,6 +163,9 @@ fn parse_auth(ws_packet: Option) -> Option { Ok(message) => { let request_id = message.request_id; let mut token: Option = None; + // The primary data should be the "auth" with empty data + // The secondary data MUST have the "auth_token" with the authorization + // token set as the data field for protocol_data in message.protocol_data.iter() { let protocol_name: &str = protocol_data.protocol_name.as_ref(); if protocol_name == "auth_token" { @@ -255,7 +176,7 @@ fn parse_auth(ws_packet: Option) -> Option { if let Some(token) = token { return Some(Auth { request_id, - token: SecretString::new(token.to_string()), + token: SecretString::new(token), }); } else { warn!("BTP packet is missing auth token"); diff --git a/crates/interledger-btp/src/service.rs b/crates/interledger-btp/src/service.rs index 88f0395ad..3669e957b 100644 --- a/crates/interledger-btp/src/service.rs +++ b/crates/interledger-btp/src/service.rs @@ -1,69 +1,44 @@ use super::{packet::*, BtpAccount}; +use async_trait::async_trait; use bytes::BytesMut; use futures::{ - future::err, - sync::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, - sync::oneshot, - Future, Sink, Stream, + channel::{ + mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, + oneshot, + }, + future, FutureExt, Sink, Stream, StreamExt, }; use interledger_packet::{Address, ErrorCode, Fulfill, Packet, Prepare, Reject, RejectBuilder}; use interledger_service::*; +use lazy_static::lazy_static; use log::{debug, error, trace, warn}; use parking_lot::{Mutex, RwLock}; use rand::random; use std::collections::HashMap; -use std::{ - convert::TryFrom, error::Error, fmt, io, iter::IntoIterator, marker::PhantomData, sync::Arc, - time::Duration, -}; +use std::{convert::TryFrom, iter::IntoIterator, marker::PhantomData, sync::Arc, time::Duration}; use stream_cancel::{Trigger, Valve}; -use tokio_executor::spawn; -use tokio_timer::Interval; +use tokio::time; use tungstenite::Message; use uuid::Uuid; -use warp; const PING_INTERVAL: u64 = 30; // seconds -type IlpResultChannel = oneshot::Sender>; -type IncomingRequestBuffer = UnboundedReceiver<(A, u32, Prepare)>; - -#[derive(Debug)] -pub enum WsError { - Tungstenite(tungstenite::Error), - Warp(warp::Error), -} - -impl fmt::Display for WsError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - WsError::Tungstenite(err) => err.fmt(f), - WsError::Warp(err) => err.fmt(f), - } - } -} - -impl Error for WsError {} - -impl From for WsError { - fn from(err: tungstenite::Error) -> Self { - WsError::Tungstenite(err) - } +lazy_static! { + static ref PING: Message = Message::Ping(Vec::with_capacity(0)); + static ref PONG: Message = Message::Pong(Vec::with_capacity(0)); } -impl From for WsError { - fn from(err: warp::Error) -> Self { - WsError::Warp(err) - } -} +type IlpResultChannel = oneshot::Sender>; +type IncomingRequestBuffer = UnboundedReceiver<(A, u32, Prepare)>; /// A container for BTP/WebSocket connections that implements OutgoingService /// for sending outgoing ILP Prepare packets over one of the connected BTP connections. #[derive(Clone)] pub struct BtpOutgoingService { - // TODO support multiple connections per account ilp_address: Address, + /// Outgoing messages for the receiver of the websocket indexed by account uid connections: Arc>>>, + // Pending results which are pending_outgoing: Arc>>, pending_incoming: Arc>>>, incoming_sender: UnboundedSender<(A, u32, Prepare)>, @@ -72,10 +47,75 @@ pub struct BtpOutgoingService { stream_valve: Arc, } +// Handle the packets based on whether they are an incoming request or a response to something we sent. +// a. If it's a prepare packet, it gets buffered in the incoming_sender channel which will get consumed +// once an incoming handler is added +// b. If it's a Fulfill/Reject packet, it gets added to the pending_outgoing hashmap which gets consumed +// by the outgoing service implementation immediately +// incoming_sender.unbounded_send basically sends data to the self.incoming_receiver +// to be consumed when we setup the incoming handler +// Set up a listener to handle incoming packets from the WebSocket connection +async fn handle_message( + message: Message, + tx_clone: UnboundedSender, + account: A, + pending_requests: Arc>>, + incoming_sender: UnboundedSender<(A, u32, Prepare)>, +) { + if message.is_binary() { + match parse_ilp_packet(message) { + // Queues up the prepare packet + Ok((request_id, Packet::Prepare(prepare))) => { + trace!( + "Got incoming Prepare packet on request ID: {} {:?}", + request_id, + prepare + ); + let _ = incoming_sender + .unbounded_send((account, request_id, prepare)) + .map_err(|err| error!("Unable to buffer incoming request: {:?}", err)); + } + // Sends the fulfill/reject to the outgoing service + Ok((request_id, Packet::Fulfill(fulfill))) => { + trace!("Got fulfill response to request id {}", request_id); + if let Some(channel) = (*pending_requests.lock()).remove(&request_id) { + let _ = channel.send(Ok(fulfill)).map_err(|fulfill| error!("Error forwarding Fulfill packet back to the Future that sent the Prepare: {:?}", fulfill)); + } else { + warn!( + "Got Fulfill packet that does not match an outgoing Prepare we sent: {:?}", + fulfill + ); + } + } + Ok((request_id, Packet::Reject(reject))) => { + trace!("Got reject response to request id {}", request_id); + if let Some(channel) = (*pending_requests.lock()).remove(&request_id) { + let _ = channel.send(Err(reject)).map_err(|reject| error!("Error forwarding Reject packet back to the Future that sent the Prepare: {:?}", reject)); + } else { + warn!( + "Got Reject packet that does not match an outgoing Prepare we sent: {:?}", + reject + ); + } + } + Err(_) => { + debug!("Unable to parse ILP packet from BTP packet (if this is the first time this appears, the packet was probably the auth response)"); + // TODO Send error back + } + } + } else if message.is_ping() { + trace!("Responding to Ping message from account {}", account.id()); + // Writes back the PONG to the websocket + let _ = tx_clone + .unbounded_send(PONG.clone()) + .map_err(|err| error!("Error sending Pong message back: {:?}", err)); + } +} + impl BtpOutgoingService where O: OutgoingService + Clone, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { pub fn new(ilp_address: Address, next: O) -> Self { let (incoming_sender, incoming_receiver) = unbounded(); @@ -100,133 +140,104 @@ where self.close_all_connections.lock().take(); } - /// Set up a WebSocket connection so that outgoing Prepare packets can be sent to it, - /// incoming Prepare packets are buffered in a channel (until an IncomingService is added - /// via the handle_incoming method), and ILP Fulfill and Reject packets will be - /// sent back to the Future that sent the outgoing request originally. + // Set up a WebSocket connection so that outgoing Prepare packets can be sent to it, + // incoming Prepare packets are buffered in a channel (until an IncomingService is added + // via the handle_incoming method), and ILP Fulfill and Reject packets will be + // sent back to the Future that sent the outgoing request originally. pub(crate) fn add_connection( &self, account: A, - connection: impl Stream - + Sink - + Send - + 'static, + ws_stream: impl Stream + Sink + Send + 'static, ) { let account_id = account.id(); - // Set up a channel to forward outgoing packets to the WebSocket connection - let (tx, rx) = unbounded(); - let (sink, stream) = connection.split(); + let (client_tx, client_rx) = unbounded(); + let (write, read) = ws_stream.split(); let (close_connection, valve) = Valve::new(); - let stream = valve.wrap(stream); - let stream = self.stream_valve.wrap(stream); - let forward_to_connection = sink - .send_all(rx.map_err(|_err| { - WsError::Tungstenite(io::Error::from(io::ErrorKind::ConnectionAborted).into()) - })) - .then(move |_| { + + // tx -> rx -> write -> our peer + // Responsible mainly for responding to Pings + // TODO: We must somehow figure out how to merge this stream with the incoming one + let write_to_ws = client_rx.map(Ok).forward(write).then(move |_| { + async move { debug!( "Finished forwarding to WebSocket stream for account: {}", account_id ); drop(close_connection); - Ok(()) - }); + Ok::<(), ()>(()) + } + }); + tokio::spawn(write_to_ws); - // Send pings every PING_INTERVAL until the connection closes or the Service is dropped - let tx_clone = tx.clone(); - let send_pings = valve - .wrap( - self.stream_valve - .wrap(Interval::new_interval(Duration::from_secs(PING_INTERVAL))), + // Process incoming messages depending on their type + let pending_outgoing = self.pending_outgoing.clone(); + let incoming_sender = self.incoming_sender.clone(); + let client_tx_clone = client_tx.clone(); + let handle_message_fn = move |msg: Message| { + handle_message( + msg, + client_tx_clone.clone(), + account.clone(), + pending_outgoing.clone(), + incoming_sender.clone(), ) - .map_err(|err| { - warn!("Timer error on Ping interval: {:?}", err); - }) - .for_each(move |_| { - if let Err(err) = tx_clone.unbounded_send(Message::Ping(Vec::with_capacity(0))) { - warn!( - "Error sending Ping on connection to account {}: {:?}", - account_id, err - ); - } - Ok(()) - }); - spawn(send_pings); + }; - // Set up a listener to handle incoming packets from the WebSocket connection - // TODO do we need all this cloning? - let pending_requests = self.pending_outgoing.clone(); - let incoming_sender = self.incoming_sender.clone(); - let tx_clone = tx.clone(); - let handle_incoming = stream.map_err(move |err| error!("Error reading from WebSocket stream for account {}: {:?}", account_id, err)).for_each(move |message| { - // Handle the packets based on whether they are an incoming request or a response to something we sent - if message.is_binary() { - match parse_ilp_packet(message) { - Ok((request_id, Packet::Prepare(prepare))) => { - trace!("Got incoming Prepare packet on request ID: {} {:?}", request_id, prepare); - incoming_sender.clone().unbounded_send((account.clone(), request_id, prepare)) - .map_err(|err| error!("Unable to buffer incoming request: {:?}", err)) - }, - Ok((request_id, Packet::Fulfill(fulfill))) => { - trace!("Got fulfill response to request id {}", request_id); - if let Some(channel) = (*pending_requests.lock()).remove(&request_id) { - channel.send(Ok(fulfill)).map_err(|fulfill| error!("Error forwarding Fulfill packet back to the Future that sent the Prepare: {:?}", fulfill)) - } else { - warn!("Got Fulfill packet that does not match an outgoing Prepare we sent: {:?}", fulfill); - Ok(()) - } - } - Ok((request_id, Packet::Reject(reject))) => { - trace!("Got reject response to request id {}", request_id); - if let Some(channel) = (*pending_requests.lock()).remove(&request_id) { - channel.send(Err(reject)).map_err(|reject| error!("Error forwarding Reject packet back to the Future that sent the Prepare: {:?}", reject)) - } else { - warn!("Got Reject packet that does not match an outgoing Prepare we sent: {:?}", reject); - Ok(()) - } - }, - Err(_) => { - debug!("Unable to parse ILP packet from BTP packet (if this is the first time this appears, the packet was probably the auth response)"); - // TODO Send error back - Ok(()) - } - } - } else if message.is_ping() { - trace!("Responding to Ping message from account {}", account.id()); - tx_clone.unbounded_send(Message::Pong(Vec::new())).map_err(|err| error!("Error sending Pong message back: {:?}", err)) - } else { - Ok(()) - } - }).then(move |result| { - debug!("Finished reading from WebSocket stream for account: {}", account_id); - result + // Close connections triggers + let read = valve.wrap(read); + let read = self.stream_valve.wrap(read); + let read_from_ws = read.for_each(handle_message_fn).then(move |_| { + async move { + debug!( + "Finished reading from WebSocket stream for account: {}", + account_id + ); + Ok::<(), ()>(()) + } }); - let connections = self.connections.clone(); - let keep_connections_open = self.close_all_connections.clone(); - let handle_connection = handle_incoming - .select(forward_to_connection) - .then(move |_| { - let _ = keep_connections_open; - let mut connections = connections.write(); - connections.remove(&account_id); - debug!( - "WebSocket connection closed for account {} ({} connections still open)", - account_id, - connections.len() + // TODO: How can we drop the trigger when both the read and write spawn'ed futures + // have completed? + // let connections = self.connections.clone(); + // let keep_connections_open = self.close_all_connections.clone(); + // .then(move |_| { + // let _ = keep_connections_open; + // let mut connections = connections.write(); + // connections.remove(&account_id); + // debug!( + // "WebSocket connection closed for account {} ({} connections still open)", + // account_id, + // connections.len() + // ); + // future::ready(()) + // }); + tokio::spawn(read_from_ws); + + // Send pings every PING_INTERVAL until the connection closes (when `drop(close_connection)` is called) + // or the Service is dropped (which will implicitly drop `close_all_connections`, closing the stream_valve) + let tx_clone = client_tx.clone(); + let ping_interval = time::interval(Duration::from_secs(PING_INTERVAL)); + let repeat_until_service_drops = self.stream_valve.wrap(ping_interval); + let send_pings = valve.wrap(repeat_until_service_drops).for_each(move |_| { + // For each tick send a ping + if let Err(err) = tx_clone.unbounded_send(PING.clone()) { + warn!( + "Error sending Ping on connection to account {}: {:?}", + account_id, err ); - Ok(()) - }); - spawn(handle_connection); + } + future::ready(()) + }); + tokio::spawn(send_pings); // Save the sender side of the channel so we have a way to forward outgoing requests to the WebSocket - self.connections.write().insert(account_id, tx); + self.connections.write().insert(account_id, client_tx); } /// Convert this BtpOutgoingService into a bidirectional BtpService by adding a handler for incoming requests. /// This will automatically pull all incoming Prepare packets from the channel buffer and call the IncomingService with them. - pub fn handle_incoming(self, incoming_handler: I) -> BtpService + pub async fn handle_incoming(self, incoming_handler: I) -> BtpService where I: IncomingService + Clone + Send + 'static, { @@ -234,14 +245,14 @@ where // the incoming Prepare packets they get in self.pending_incoming // Now that we're adding an incoming handler, this will spawn a task to read // all Prepare packets from the buffer, handle them, and send the responses back - let mut incoming_handler_clone = incoming_handler.clone(); let connections_clone = self.connections.clone(); - let handle_pending_incoming = self + let mut handle_pending_incoming = self .pending_incoming .lock() .take() - .expect("handle_incoming can only be called once") - .for_each(move |(account, request_id, prepare)| { + .expect("handle_incoming can only be called once"); + let handle_pending_incoming_fut = async move { + while let Some((account, request_id, prepare)) = handle_pending_incoming.next().await { let account_id = account.id(); let connections_clone = connections_clone.clone(); let request = IncomingRequest { @@ -254,37 +265,34 @@ where request.from.username(), request.from.id() ); - incoming_handler_clone - .handle_request(request) - .then(move |result| { - let packet = match result { - Ok(fulfill) => Packet::Fulfill(fulfill), - Err(reject) => Packet::Reject(reject), - }; - if let Some(connection) = connections_clone - .read() - .get(&account_id) { - let message = ilp_packet_to_ws_message(request_id, packet); - connection - .clone() - .unbounded_send(message) - .map_err(move |err| { - error!( - "Error sending response to account: {} {:?}", - account_id, err - ) - }) - } else { - error!("Error sending response to account: {}, connection was closed. {:?}", account_id, packet); - Err(()) - } - }) - }) - .then(move |_| { - trace!("Finished reading from pending_incoming buffer"); - Ok(()) - }); - spawn(handle_pending_incoming); + let mut handler = incoming_handler.clone(); + let packet = match handler.handle_request(request).await { + Ok(fulfill) => Packet::Fulfill(fulfill), + Err(reject) => Packet::Reject(reject), + }; + + // TODO: Is it OK to remove the results from here? + if let Some(connection) = connections_clone.clone().read().get(&account_id) { + let message = ilp_packet_to_ws_message(request_id, packet); + let _ = connection.unbounded_send(message).map_err(move |err| { + error!( + "Error sending response to account: {} {:?}", + account_id, err + ) + }); + } else { + error!( + "Error sending response to account: {}, connection was closed. {:?}", + account_id, packet + ); + } + } + + trace!("Finished reading from pending_incoming buffer"); + Ok::<(), ()>(()) + }; + + tokio::spawn(handle_pending_incoming_fut); BtpService { outgoing: self, @@ -293,20 +301,20 @@ where } } +#[async_trait] impl OutgoingService for BtpOutgoingService where - O: OutgoingService + Clone, - A: BtpAccount + 'static, + O: OutgoingService + Send + Sync + Clone + 'static, + A: BtpAccount + Send + Sync + Clone + 'static, { - type Future = BoxedIlpFuture; - /// Send an outgoing request to one of the open connections. /// /// If there is no open connection for the Account specified in `request.to`, the /// request will be passed through to the `next` handler. - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { let account_id = request.to.id(); - if let Some(connection) = (*self.connections.read()).get(&account_id) { + let connections = self.connections.read().clone(); // have to clone here to avoid await errors + if let Some(connection) = connections.get(&account_id) { let request_id = random::(); let ilp_address = self.ilp_address.clone(); @@ -315,11 +323,14 @@ where let keep_connections_open = self.close_all_connections.clone(); trace!( - "Sending outgoing request {} to account {}", + "Sending outgoing request {} to {} ({})", request_id, + request.to.username(), account_id ); + // Connection is an unbounded sender which sends to the rx that + // forwards to the sink which sends the data over match connection.unbounded_send(ilp_packet_to_ws_message( request_id, Packet::Prepare(request.prepare), @@ -327,59 +338,53 @@ where Ok(_) => { let (sender, receiver) = oneshot::channel(); (*self.pending_outgoing.lock()).insert(request_id, sender); - Box::new( - receiver - .then(move |result| { - // Drop the trigger here since we've gotten the response - // and don't need to keep the connections open if this was the - // last thing we were waiting for - let _ = keep_connections_open; - result - }) - .map_err(move |err| { - error!( - "Sending request {} to account {} failed: {:?}", - request_id, account_id, err - ); - RejectBuilder { - code: ErrorCode::T00_INTERNAL_ERROR, - message: &[], - triggered_by: Some(&ilp_address), - data: &[], - } - .build() - }) - .and_then(|result| match result { - Ok(fulfill) => Ok(fulfill), - Err(reject) => Err(reject), - }), - ) + let result = receiver.await; + // Drop the trigger here since we've gotten the response + // and don't need to keep the connections open if this was the + // last thing we were waiting for + let _ = keep_connections_open; + match result { + // This can be either a reject or a fulfill packet + Ok(packet) => packet, + Err(err) => { + error!( + "Sending request {} to account {} failed: {:?}", + request_id, account_id, err + ); + Err(RejectBuilder { + code: ErrorCode::T00_INTERNAL_ERROR, + message: &[], + triggered_by: Some(&ilp_address), + data: &[], + } + .build()) + } + } } Err(send_error) => { error!( "Error sending websocket message for request {} to account {}: {:?}", request_id, account_id, send_error ); - let reject = RejectBuilder { + Err(RejectBuilder { code: ErrorCode::T00_INTERNAL_ERROR, message: &[], triggered_by: Some(&ilp_address), data: &[], } - .build(); - Box::new(err(reject)) + .build()) } } - } else if request.to.get_ilp_over_btp_url().is_some() - || request.to.get_ilp_over_btp_outgoing_token().is_some() - { - trace!( - "No open connection for account: {}, forwarding request to the next service", - request.to.id() - ); - Box::new(self.next.send_request(request)) } else { - Box::new(self.next.send_request(request)) + if request.to.get_ilp_over_btp_url().is_some() + || request.to.get_ilp_over_btp_outgoing_token().is_some() + { + trace!( + "No open connection for account: {}, forwarding request to the next service", + request.to.username() + ); + } + self.next.send_request(request).await } } } @@ -394,7 +399,7 @@ impl BtpService where I: IncomingService + Clone + Send + 'static, O: OutgoingService + Clone, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { /// Close all of the open WebSocket connections pub fn close(&self) { @@ -402,19 +407,19 @@ where } } +#[async_trait] impl OutgoingService for BtpService where - O: OutgoingService + Clone + Send + 'static, - A: BtpAccount + 'static, + I: Send, // This is a async/await requirement + O: OutgoingService + Send + Sync + Clone + 'static, + A: BtpAccount + Send + Sync + Clone + 'static, { - type Future = BoxedIlpFuture; - /// Send an outgoing request to one of the open connections. /// /// If there is no open connection for the Account specified in `request.to`, the /// request will be passed through to the `next` handler. - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { - self.outgoing.send_request(request) + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { + self.outgoing.send_request(request).await } } @@ -460,42 +465,31 @@ fn parse_ilp_packet(message: Message) -> Result<(u32, Packet), ()> { } fn ilp_packet_to_ws_message(request_id: u32, packet: Packet) -> Message { - match packet { - Packet::Prepare(prepare) => { - let data = BytesMut::from(prepare).to_vec(); - let btp_packet = BtpMessage { - request_id, - protocol_data: vec![ProtocolData { - protocol_name: "ilp".to_string(), - content_type: ContentType::ApplicationOctetStream, - data, - }], - }; - Message::binary(btp_packet.to_bytes()) + let (data, is_response) = match packet { + Packet::Prepare(prepare) => (BytesMut::from(prepare).to_vec(), false), + Packet::Fulfill(fulfill) => (BytesMut::from(fulfill).to_vec(), true), + Packet::Reject(reject) => (BytesMut::from(reject).to_vec(), true), + }; + let btp_packet = if is_response { + BtpMessage { + request_id, + protocol_data: vec![ProtocolData { + protocol_name: "ilp".to_string(), + content_type: ContentType::ApplicationOctetStream, + data, + }], } - Packet::Fulfill(fulfill) => { - let data = BytesMut::from(fulfill).to_vec(); - let btp_packet = BtpResponse { - request_id, - protocol_data: vec![ProtocolData { - protocol_name: "ilp".to_string(), - content_type: ContentType::ApplicationOctetStream, - data, - }], - }; - Message::binary(btp_packet.to_bytes()) - } - Packet::Reject(reject) => { - let data = BytesMut::from(reject).to_vec(); - let btp_packet = BtpResponse { - request_id, - protocol_data: vec![ProtocolData { - protocol_name: "ilp".to_string(), - content_type: ContentType::ApplicationOctetStream, - data, - }], - }; - Message::binary(btp_packet.to_bytes()) + .to_bytes() + } else { + BtpResponse { + request_id, + protocol_data: vec![ProtocolData { + protocol_name: "ilp".to_string(), + content_type: ContentType::ApplicationOctetStream, + data, + }], } - } + .to_bytes() + }; + Message::binary(btp_packet) } diff --git a/crates/interledger-btp/src/wrapped_ws.rs b/crates/interledger-btp/src/wrapped_ws.rs new file mode 100644 index 000000000..230bc7417 --- /dev/null +++ b/crates/interledger-btp/src/wrapped_ws.rs @@ -0,0 +1,92 @@ +use futures::stream::Stream; +use futures::Sink; +use log::warn; +use pin_project::pin_project; +use std::pin::Pin; +use std::task::{Context, Poll}; +use warp::ws::Message; + +/// Wrapper struct to unify the Tungstenite WebSocket connection from connect_async +/// with the Warp websocket connection from ws.upgrade. Stream and Sink are re-implemented +/// for this struct, normalizing it to use Tungstenite's messages and a wrapped error type +#[pin_project] +#[derive(Clone)] +pub(crate) struct WsWrap { + #[pin] + pub(crate) connection: W, +} + +impl Stream for WsWrap +where + W: Stream, +{ + type Item = tungstenite::Message; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = self.project(); + match this.connection.poll_next(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(val) => match val { + Some(v) => { + let v = convert_msg(v); + Poll::Ready(Some(v)) + } + None => Poll::Ready(None), + }, + } + } +} + +impl Sink for WsWrap +where + W: Sink, +{ + type Error = W::Error; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.connection.poll_ready(cx) + } + + fn start_send(self: Pin<&mut Self>, item: tungstenite::Message) -> Result<(), Self::Error> { + let this = self.project(); + let item = match item { + tungstenite::Message::Binary(data) => Message::binary(data), + tungstenite::Message::Text(data) => Message::text(data), + // Ignore other message types because warp's WebSocket type doesn't + // allow us to send any other types of messages + // TODO make sure warp's websocket responds to pings and/or sends them to keep the + // connection alive + _ => return Ok(()), + }; + this.connection.start_send(item) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.connection.poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.connection.poll_close(cx) + } +} + +fn convert_msg(message: Message) -> tungstenite::Message { + if message.is_ping() { + tungstenite::Message::Ping(message.into_bytes()) + } else if message.is_binary() { + tungstenite::Message::Binary(message.into_bytes()) + } else if message.is_text() { + tungstenite::Message::Text(message.to_str().unwrap_or_default().to_string()) + } else if message.is_close() { + tungstenite::Message::Close(None) + } else { + warn!( + "Got unexpected websocket message, closing connection: {:?}", + message + ); + tungstenite::Message::Close(None) + } +} diff --git a/crates/interledger-ccp/Cargo.toml b/crates/interledger-ccp/Cargo.toml index 203030e2c..afec1a382 100644 --- a/crates/interledger-ccp/Cargo.toml +++ b/crates/interledger-ccp/Cargo.toml @@ -10,7 +10,7 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] bytes = { version = "0.4.12", default-features = false } byteorder = { version = "1.3.2", default-features = false } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3", default-features = false } hex = { version = "0.4.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } @@ -19,6 +19,7 @@ log = { version = "0.4.8", default-features = false } parking_lot = { version = "0.9.0", default-features = false } ring = { version = "0.16.9", default-features = false } tokio-executor = { version = "0.1.8", default-features = false } -tokio-timer = { version = "0.2.11", default-features = false } uuid = { version = "0.8.1", default-features = false, features = ["v4"]} serde = { version = "1.0.101", default-features = false, features = ["derive"] } +async-trait = "0.1.22" +tokio = { version = "0.2.6", features = ["time", "rt-core", "macros"] } \ No newline at end of file diff --git a/crates/interledger-ccp/src/lib.rs b/crates/interledger-ccp/src/lib.rs index 0eece31c6..ef677f906 100644 --- a/crates/interledger-ccp/src/lib.rs +++ b/crates/interledger-ccp/src/lib.rs @@ -9,7 +9,7 @@ //! updates are used by the `Router` to forward incoming packets to the best next hop //! we know about. -use futures::Future; +use async_trait::async_trait; use interledger_service::Account; use std::collections::HashMap; use std::{fmt, str::FromStr}; @@ -30,7 +30,7 @@ use serde::{Deserialize, Serialize}; /// Data structure used to describe the routing relation of an account with its peers. #[repr(u8)] -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Serialize, Deserialize, Ord, Eq)] pub enum RoutingRelation { /// An account from which we do not receive routes from, neither broadcast /// routes to @@ -98,25 +98,24 @@ pub trait CcpRoutingAccount: Account { type Routes = HashMap; type LocalAndConfiguredRoutes = (Routes, Routes); +#[async_trait] pub trait RouteManagerStore: Clone { type Account: CcpRoutingAccount; // TODO should we have a way to only get the details for specific routes? - fn get_local_and_configured_routes( + async fn get_local_and_configured_routes( &self, - ) -> Box, Error = ()> + Send>; + ) -> Result, ()>; - fn get_accounts_to_send_routes_to( + async fn get_accounts_to_send_routes_to( &self, ignore_accounts: Vec, - ) -> Box, Error = ()> + Send>; + ) -> Result, ()>; - fn get_accounts_to_receive_routes_from( - &self, - ) -> Box, Error = ()> + Send>; + async fn get_accounts_to_receive_routes_from(&self) -> Result, ()>; - fn set_routes( + async fn set_routes( &mut self, - routes: impl IntoIterator, - ) -> Box + Send>; + routes: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()>; } diff --git a/crates/interledger-ccp/src/routing_table.rs b/crates/interledger-ccp/src/routing_table.rs index 5274db747..52d0306e4 100644 --- a/crates/interledger-ccp/src/routing_table.rs +++ b/crates/interledger-ccp/src/routing_table.rs @@ -10,7 +10,7 @@ lazy_static! { static ref RANDOM: SystemRandom = SystemRandom::new(); } -#[derive(Debug)] +#[derive(Debug, Clone)] struct PrefixMap { map: HashMap, } @@ -44,7 +44,7 @@ impl PrefixMap { /// When an Interledger node reloads, it will generate a new UUID for its routing table. /// Each update applied increments the epoch number, so it acts as a version tracker. /// This helps peers make sure they are in sync with one another and request updates if not. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct RoutingTable { id: [u8; 16], epoch: u32, @@ -110,12 +110,12 @@ where self.prefix_map.resolve(prefix) } - pub fn get_simplified_table(&self) -> HashMap<&str, A> { + pub fn get_simplified_table(&self) -> HashMap { HashMap::from_iter( self.prefix_map .map .iter() - .map(|(address, (account, _route))| (address.as_str(), account.clone())), + .map(|(address, (account, _route))| (address.clone(), account.clone())), ) } diff --git a/crates/interledger-ccp/src/server.rs b/crates/interledger-ccp/src/server.rs index 718e557ce..f6401d6b5 100644 --- a/crates/interledger-ccp/src/server.rs +++ b/crates/interledger-ccp/src/server.rs @@ -1,5 +1,3 @@ -#[cfg(test)] -use crate::packet::PEER_PROTOCOL_CONDITION; use crate::{ packet::{ Mode, Route, RouteControlRequest, RouteUpdateRequest, CCP_CONTROL_DESTINATION, @@ -8,22 +6,17 @@ use crate::{ routing_table::RoutingTable, CcpRoutingAccount, RouteManagerStore, RoutingRelation, }; -use futures::{ - future::{err, join_all, ok, Either}, - Future, Stream, -}; -#[cfg(test)] -use interledger_packet::PrepareBuilder; -use interledger_packet::{Address, ErrorCode, Fulfill, Reject, RejectBuilder}; +use async_trait::async_trait; +use futures::future::join_all; +use interledger_packet::{Address, ErrorCode, RejectBuilder}; use interledger_service::{ - Account, AddressStore, BoxedIlpFuture, IncomingRequest, IncomingService, OutgoingRequest, + Account, AddressStore, IlpResult, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService, }; -#[cfg(test)] -use lazy_static::lazy_static; use log::{debug, error, trace, warn}; use parking_lot::{Mutex, RwLock}; use ring::digest::{digest, SHA256}; +use std::cmp::Ordering as StdOrdering; use std::collections::HashMap; use std::{ cmp::min, @@ -33,13 +26,16 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::{Duration, Instant}, + time::Duration, }; -use tokio_timer::Interval; use uuid::Uuid; -#[cfg(not(test))] -use tokio_executor::spawn; +#[cfg(test)] +use crate::packet::PEER_PROTOCOL_CONDITION; +#[cfg(test)] +use futures::TryFutureExt; +#[cfg(test)] +use lazy_static::lazy_static; // TODO should the route expiry be longer? we use 30 seconds now // because the expiry shortener will lower the expiry to 30 seconds @@ -116,7 +112,13 @@ where #[cfg(not(test))] { - spawn(service.start_broadcast_interval(self.broadcast_interval)); + let broadcast_interval = self.broadcast_interval; + let service_clone = service.clone(); + tokio::spawn(async move { + service_clone + .start_broadcast_interval(broadcast_interval) + .await + }); } service @@ -180,20 +182,17 @@ where A: CcpRoutingAccount + Send + Sync + 'static, { /// Returns a future that will trigger this service to update its routes and broadcast - /// updates to peers on the given interval. - pub fn start_broadcast_interval(&self, interval: u64) -> impl Future { - let clone = self.clone(); - self.request_all_routes().and_then(move |_| { - Interval::new(Instant::now(), Duration::from_millis(interval)) - .map_err(|err| error!("Interval error, no longer sending route updates: {:?}", err)) - .for_each(move |_| { - // ensure we have the latest ILP Address from the store - clone.update_ilp_address(); - // Returning an error would end the broadcast loop - // so we want to return Ok even if there was an error - clone.broadcast_routes().then(|_| Ok(())) - }) - }) + /// updates to peers on the given interval. `interval` is in milliseconds + pub async fn start_broadcast_interval(&self, interval: u64) -> Result<(), ()> { + self.request_all_routes().await?; + let mut interval = tokio::time::interval(Duration::from_millis(interval)); + loop { + interval.tick().await; + // ensure we have the latest ILP Address from the store + self.update_ilp_address(); + // Do not consume the result if an error since we want to keep the loop going + let _ = self.broadcast_routes().await; + } } fn update_ilp_address(&self) { @@ -210,52 +209,47 @@ where } } - pub fn broadcast_routes(&self) -> impl Future { - let clone = self.clone(); - self.update_best_routes(None) - .and_then(move |_| clone.send_route_updates()) + pub async fn broadcast_routes(&self) -> Result<(), ()> { + self.update_best_routes(None).await?; + self.send_route_updates().await } /// Request routes from all the peers we are willing to receive routes from. /// This is mostly intended for when the CCP server starts up and doesn't have any routes from peers. - fn request_all_routes(&self) -> impl Future { - let clone = self.clone(); - self.store - .get_accounts_to_receive_routes_from() - .then(|result| { - let accounts = result.unwrap_or_else(|_| Vec::new()); - join_all(accounts.into_iter().map(move |account| { - clone.send_route_control_request(account, DUMMY_ROUTING_TABLE_ID, 0) - })) - }) - .then(|_| Ok(())) + async fn request_all_routes(&self) -> Result<(), ()> { + let result = self.store.get_accounts_to_receive_routes_from().await; + let accounts = result.unwrap_or_else(|_| Vec::new()); + join_all( + accounts + .into_iter() + .map(|account| self.send_route_control_request(account, DUMMY_ROUTING_TABLE_ID, 0)), + ) + .await; + Ok(()) } /// Handle a CCP Route Control Request. If this is from an account that we broadcast routes to, /// we'll send an outgoing Route Update Request to them. - fn handle_route_control_request( - &self, - request: IncomingRequest, - ) -> impl Future { + async fn handle_route_control_request(&self, request: IncomingRequest) -> IlpResult { if !request.from.should_send_routes() { - return Either::A(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"We are not configured to send routes to you, sorry", triggered_by: Some(&self.ilp_address.read()), data: &[], } - .build())); + .build()); } let control = RouteControlRequest::try_from(&request.prepare); if control.is_err() { - return Either::A(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"Invalid route control request", triggered_by: Some(&self.ilp_address.read()), data: &[], } - .build())); + .build()); } let control = control.unwrap(); debug!( @@ -295,40 +289,38 @@ where #[cfg(test)] { let ilp_address = self.ilp_address.read().clone(); - return Either::B(Either::A( - self.send_route_update(request.from.clone(), from_epoch_index, to_epoch_index) - .map_err(move |_| { - RejectBuilder { - code: ErrorCode::T01_PEER_UNREACHABLE, - message: b"Error sending route update request", - data: &[], - triggered_by: Some(&ilp_address), - } - .build() - }) - .and_then(|_| Ok(CCP_RESPONSE.clone())), - )); + return self + .send_route_update(request.from.clone(), from_epoch_index, to_epoch_index) + .map_err(move |_| { + RejectBuilder { + code: ErrorCode::T01_PEER_UNREACHABLE, + message: b"Error sending route update request", + data: &[], + triggered_by: Some(&ilp_address), + } + .build() + }) + .map_ok(|_| Ok(CCP_RESPONSE.clone())) + .await?; } #[cfg(not(test))] { - spawn(self.send_route_update( - request.from.clone(), - from_epoch_index, - to_epoch_index, - )); + tokio::spawn({ + let self_clone = self.clone(); + async move { + self_clone + .send_route_update( + request.from.clone(), + from_epoch_index, + to_epoch_index, + ) + .await + } + }); } } - - #[cfg(not(test))] - { - Either::B(ok(CCP_RESPONSE.clone())) - } - - #[cfg(test)] - { - Either::B(Either::B(ok(CCP_RESPONSE.clone()))) - } + Ok(CCP_RESPONSE.clone()) } /// Remove invalid routes before processing the Route Update Request @@ -367,27 +359,27 @@ where /// If updates are applied to the Incoming Routing Table for this peer, we will /// then check whether those routes are better than the current best ones we have in the /// Local Routing Table. - fn handle_route_update_request(&self, request: IncomingRequest) -> BoxedIlpFuture { + async fn handle_route_update_request(&self, request: IncomingRequest) -> IlpResult { // Ignore the request if we don't accept routes from them if !request.from.should_receive_routes() { - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"Your route broadcasts are not accepted here", triggered_by: Some(&self.ilp_address.read()), data: &[], } - .build())); + .build()); } let update = RouteUpdateRequest::try_from(&request.prepare); if update.is_err() { - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"Invalid route update request", triggered_by: Some(&self.ilp_address.read()), data: &[], } - .build())); + .build()); } let update = update.unwrap(); debug!( @@ -399,62 +391,60 @@ where // Filter out routes that don't make sense or that we won't accept let update = self.filter_routes(update); - let mut incoming_tables = self.incoming_tables.write(); - if !&incoming_tables.contains_key(&request.from.id()) { - incoming_tables.insert( - request.from.id(), - RoutingTable::new(update.routing_table_id), - ); - } + // Ensure the mutex gets dropped before teh async block + let result = { + let mut incoming_tables = self.incoming_tables.write(); + if !&incoming_tables.contains_key(&request.from.id()) { + incoming_tables.insert( + request.from.id(), + RoutingTable::new(update.routing_table_id), + ); + } + incoming_tables + .get_mut(&request.from.id()) + .expect("Should have inserted a routing table for this account") + .handle_update_request(request.from.clone(), update) + }; // Update the routing table we maintain for the account we got this from. // Figure out whether we need to update our routes for any of the prefixes // that were included in this route update. - match (*incoming_tables) - .get_mut(&request.from.id()) - .expect("Should have inserted a routing table for this account") - .handle_update_request(request.from.clone(), update) - { + match result { Ok(prefixes_updated) => { if prefixes_updated.is_empty() { trace!("Route update request did not contain any prefixes we need to update our routes for"); - return Box::new(ok(CCP_RESPONSE.clone())); + return Ok(CCP_RESPONSE.clone()); } debug!( "Recalculating best routes for prefixes: {}", prefixes_updated.join(", ") ); - let future = self.update_best_routes(Some( - prefixes_updated - .into_iter() - .map(|s| s.to_string()) - .collect(), - )); #[cfg(not(test))] { - spawn(future); - Box::new(ok(CCP_RESPONSE.clone())) + tokio::spawn({ + let self_clone = self.clone(); + async move { self_clone.update_best_routes(Some(prefixes_updated)).await } + }); } #[cfg(test)] { let ilp_address = self.ilp_address.clone(); - Box::new( - future - .map_err(move |_| { - RejectBuilder { - code: ErrorCode::T00_INTERNAL_ERROR, - message: b"Error processing route update", - data: &[], - triggered_by: Some(&ilp_address.read()), - } - .build() - }) - .and_then(|_| Ok(CCP_RESPONSE.clone())), - ) + self.update_best_routes(Some(prefixes_updated)) + .map_err(move |_| { + RejectBuilder { + code: ErrorCode::T00_INTERNAL_ERROR, + message: b"Error processing route update", + data: &[], + triggered_by: Some(&ilp_address.read()), + } + .build() + }) + .await?; } + Ok(CCP_RESPONSE.clone()) } Err(message) => { warn!("Error handling incoming Route Update request, sending a Route Control request to get updated routing table info from peer. Error was: {}", &message); @@ -465,31 +455,41 @@ where triggered_by: Some(&self.ilp_address.read()), } .build(); - let table = &incoming_tables[&request.from.id()]; - let future = self.send_route_control_request( - request.from.clone(), - table.id(), - table.epoch(), - ); + + let table = &self.incoming_tables.read().clone()[&request.from.id()]; + #[cfg(not(test))] - { - spawn(future); - Box::new(err(reject)) - } + tokio::spawn({ + let table = table.clone(); + let self_clone = self.clone(); + async move { + let _ = self_clone + .send_route_control_request( + request.from.clone(), + table.id(), + table.epoch(), + ) + .await; + } + }); + #[cfg(test)] - Box::new(future.then(move |_| Err(reject))) + let _ = self + .send_route_control_request(request.from.clone(), table.id(), table.epoch()) + .await; + Err(reject) } } } /// Request a Route Update from the specified peer. This is sent when we get /// a Route Update Request from them with a gap in the epochs since the last one we saw. - fn send_route_control_request( + async fn send_route_control_request( &self, account: A, last_known_routing_table_id: [u8; 16], last_known_epoch: u32, - ) -> impl Future { + ) -> Result<(), ()> { let account_id = account.id(); let control = RouteControlRequest { mode: Mode::Sync, @@ -503,7 +503,8 @@ where hex::encode(&last_known_routing_table_id[..]), last_known_epoch); let prepare = control.to_prepare(); - self.clone() + let result = self + .clone() .outgoing .send_request(OutgoingRequest { // TODO If we start charging or paying for CCP broadcasts we'll need to @@ -514,15 +515,15 @@ where original_amount: prepare.amount(), prepare, }) - .then(move |result| { - if let Err(err) = result { - warn!( - "Error sending Route Control Request to account {}: {:?}", - account_id, err - ) - } - Ok(()) - }) + .await; + + if let Err(err) = result { + warn!( + "Error sending Route Control Request to account {}: {:?}", + account_id, err + ) + } + Ok(()) } /// Check whether the Local Routing Table currently has the best routes for the @@ -530,10 +531,7 @@ where /// with some new or modified routes that might be better than our existing ones. /// /// If prefixes is None, this will check the best routes for all local and configured prefixes. - fn update_best_routes( - &self, - prefixes: Option>, - ) -> impl Future + 'static { + async fn update_best_routes(&self, prefixes: Option>) -> Result<(), ()> { let local_table = self.local_table.clone(); let forwarding_table = self.forwarding_table.clone(); let forwarding_table_updates = self.forwarding_table_updates.clone(); @@ -541,140 +539,142 @@ where let ilp_address = self.ilp_address.read().clone(); let mut store = self.store.clone(); - self.store.get_local_and_configured_routes().and_then( - move |(ref local_routes, ref configured_routes)| { - let (better_routes, withdrawn_routes) = { - // Note we only use a read lock here and later get a write lock if we need to update the table - let local_table = local_table.read(); - let incoming_tables = incoming_tables.read(); - - // Either check the given prefixes or check all of our local and configured routes - let prefixes_to_check: Box> = - if let Some(ref prefixes) = prefixes { - Box::new(prefixes.iter().map(|prefix| prefix.as_str())) - } else { - let routes = configured_routes.iter().chain(local_routes.iter()); - Box::new(routes.map(|(prefix, _account)| prefix.as_str())) - }; - - // Check all the prefixes to see which ones we have different routes for - // and which ones we don't have routes for anymore - let mut better_routes: Vec<(&str, A, Route)> = - Vec::with_capacity(prefixes_to_check.size_hint().0); - let mut withdrawn_routes: Vec<&str> = Vec::new(); - for prefix in prefixes_to_check { - // See which prefixes there is now a better route for - if let Some((best_next_account, best_route)) = get_best_route_for_prefix( - local_routes, - configured_routes, - &incoming_tables, - prefix, - ) { - if let Some((ref next_account, ref _route)) = - local_table.get_route(prefix) - { - if next_account.id() == best_next_account.id() { - continue; - } else { - better_routes.push(( - prefix, - best_next_account.clone(), - best_route.clone(), - )); - } - } else { - better_routes.push((prefix, best_next_account, best_route)); - } - } else { - // No longer have a route to this prefix - withdrawn_routes.push(prefix); - } - } - (better_routes, withdrawn_routes) - }; + let (local_routes, configured_routes) = + self.store.get_local_and_configured_routes().await?; - // Update the local and forwarding tables - if !better_routes.is_empty() || !withdrawn_routes.is_empty() { - let mut local_table = local_table.write(); - let mut forwarding_table = forwarding_table.write(); - let mut forwarding_table_updates = forwarding_table_updates.write(); + // TODO: Should we extract this to a function and #[inline] it? + let (better_routes, withdrawn_routes) = { + // Note we only use a read lock here and later get a write lock if we need to update the table + let local_table = local_table.read(); + let incoming_tables = incoming_tables.read(); - let mut new_routes: Vec = Vec::with_capacity(better_routes.len()); + // Either check the given prefixes or check all of our local and configured routes + let prefixes_to_check: Box> = + if let Some(ref prefixes) = prefixes { + Box::new(prefixes.iter().map(|prefix| prefix.as_str())) + } else { + let routes = configured_routes.iter().chain(local_routes.iter()); + Box::new(routes.map(|(prefix, _account)| prefix.as_str())) + }; - for (prefix, account, mut route) in better_routes { - debug!( - "Setting new route for prefix: {} -> Account: {} (id: {})", - prefix, - account.username(), - account.id(), - ); - local_table.set_route(prefix.to_string(), account.clone(), route.clone()); - - // Update the forwarding table - - // Don't advertise routes that don't start with the global prefix - // or that advertise the whole global prefix - let address_scheme = ilp_address.scheme(); - let correct_address_scheme = route.prefix.starts_with(address_scheme) - && route.prefix != address_scheme; - // We do want to advertise our address - let is_our_address = route.prefix == &ilp_address as &str; - // Don't advertise local routes because advertising only our address - // will be enough to ensure the packet gets to us and we can route it - // to the correct account on our node - let is_local_route = - route.prefix.starts_with(&ilp_address as &str) && route.path.is_empty(); - let not_local_route = is_our_address || !is_local_route; - // Don't include routes we're also withdrawing - let not_withdrawn_route = !withdrawn_routes.contains(&prefix); - - if correct_address_scheme && not_local_route && not_withdrawn_route { - let old_route = forwarding_table.get_route(prefix); - if old_route.is_none() || old_route.unwrap().0.id() != account.id() { - route.path.insert(0, ilp_address.to_string()); - // Each hop hashes the auth before forwarding - route.auth = hash(&route.auth); - forwarding_table.set_route( - prefix.to_string(), - account.clone(), - route.clone(), - ); - new_routes.push(route); - } + // Check all the prefixes to see which ones we have different routes for + // and which ones we don't have routes for anymore + let mut better_routes: Vec<(&str, A, Route)> = + Vec::with_capacity(prefixes_to_check.size_hint().0); + let mut withdrawn_routes: Vec<&str> = Vec::new(); + for prefix in prefixes_to_check { + // See which prefixes there is now a better route for + if let Some((best_next_account, best_route)) = get_best_route_for_prefix( + &local_routes, + &configured_routes, + &incoming_tables, + prefix, + ) { + if let Some((ref next_account, ref _route)) = local_table.get_route(prefix) { + if next_account.id() == best_next_account.id() { + continue; + } else { + better_routes.push(( + prefix, + best_next_account.clone(), + best_route.clone(), + )); } + } else { + better_routes.push((prefix, best_next_account, best_route)); } + } else { + // No longer have a route to this prefix + withdrawn_routes.push(prefix); + } + } + (better_routes, withdrawn_routes) + }; - for prefix in withdrawn_routes.iter() { - debug!("Removed route for prefix: {}", prefix); - local_table.delete_route(prefix); - forwarding_table.delete_route(prefix); + // Update the local and forwarding tables + if !better_routes.is_empty() || !withdrawn_routes.is_empty() { + let update_routes = { + // These 3 make the future not `Send`. How can we fix this? Error says that + // local_table (and the other variables) are dropped while the await is still on. + // We could clone, but then we won't overwrite the object's values. + // Can this be fixed? + let mut local_table = local_table.write(); + let mut forwarding_table = forwarding_table.write(); + let mut forwarding_table_updates = forwarding_table_updates.write(); + + let mut new_routes: Vec = Vec::with_capacity(better_routes.len()); + + for (prefix, account, mut route) in better_routes { + debug!( + "Setting new route for prefix: {} -> Account: {} (id: {})", + prefix, + account.username(), + account.id(), + ); + local_table.set_route(prefix.to_string(), account.clone(), route.clone()); + + // Update the forwarding table + + // Don't advertise routes that don't start with the global prefix + // or that advertise the whole global prefix + let address_scheme = ilp_address.scheme(); + let correct_address_scheme = + route.prefix.starts_with(address_scheme) && route.prefix != address_scheme; + // We do want to advertise our address + let is_our_address = route.prefix == &ilp_address as &str; + // Don't advertise local routes because advertising only our address + // will be enough to ensure the packet gets to us and we can route it + // to the correct account on our node + let is_local_route = + route.prefix.starts_with(&ilp_address as &str) && route.path.is_empty(); + let not_local_route = is_our_address || !is_local_route; + // Don't include routes we're also withdrawing + let not_withdrawn_route = !withdrawn_routes.contains(&prefix); + + if correct_address_scheme && not_local_route && not_withdrawn_route { + let old_route = forwarding_table.get_route(prefix); + if old_route.is_none() || old_route.unwrap().0.id() != account.id() { + route.path.insert(0, ilp_address.to_string()); + // Each hop hashes the auth before forwarding + route.auth = hash(&route.auth); + forwarding_table.set_route( + prefix.to_string(), + account.clone(), + route.clone(), + ); + new_routes.push(route); + } } + } - let epoch = forwarding_table.increment_epoch(); - forwarding_table_updates.push(( - new_routes, - withdrawn_routes.iter().map(|s| s.to_string()).collect(), - )); - debug_assert_eq!(epoch as usize + 1, forwarding_table_updates.len()); - - Either::A( - store.set_routes( - local_table - .get_simplified_table() - .into_iter() - .map(|(prefix, account)| (prefix.to_string(), account)), - ), - ) - } else { - // The routing table hasn't changed - Either::B(ok(())) + for prefix in withdrawn_routes.iter() { + debug!("Removed route for prefix: {}", prefix); + local_table.delete_route(prefix); + forwarding_table.delete_route(prefix); } - }, - ) + + let epoch = forwarding_table.increment_epoch(); + forwarding_table_updates.push(( + new_routes, + withdrawn_routes + .into_iter() + .map(|s| s.to_string()) + .collect(), + )); + debug_assert_eq!(epoch as usize + 1, forwarding_table_updates.len()); + + store.set_routes(local_table.get_simplified_table()) + }; + + update_routes.await + } else { + // The routing table hasn't changed + Ok(()) + } } /// Send RouteUpdateRequests to all peers that we send routing messages to - fn send_route_updates(&self) -> impl Future { + async fn send_route_updates(&self) -> Result<(), ()> { let self_clone = self.clone(); let unavailable_accounts = self.unavailable_accounts.clone(); // Check which accounts we should skip this iteration @@ -690,95 +690,120 @@ where } skip }; + trace!("Skipping accounts: {:?}", accounts_to_skip); - self.store + let mut accounts = self + .store .get_accounts_to_send_routes_to(accounts_to_skip) - .and_then(move |mut accounts| { - let mut outgoing = self_clone.outgoing.clone(); - let to_epoch_index = self_clone.forwarding_table.read().epoch(); - let from_epoch_index = self_clone.last_epoch_updates_sent_for.swap(to_epoch_index, Ordering::SeqCst); - - let route_update_request = - self_clone.create_route_update(from_epoch_index, to_epoch_index); + .await?; + + let to_epoch_index = self_clone.forwarding_table.read().epoch(); + let from_epoch_index = self_clone + .last_epoch_updates_sent_for + .swap(to_epoch_index, Ordering::SeqCst); + + let route_update_request = self_clone.create_route_update(from_epoch_index, to_epoch_index); + + let prepare = route_update_request.to_prepare(); + accounts.sort_unstable_by_key(|a| a.id().to_string()); + accounts.dedup_by_key(|a| a.id()); + + let broadcasting = !accounts.is_empty(); + if broadcasting { + trace!( + "Sending route update for epochs {} - {} to accounts: {:?} {}", + from_epoch_index, + to_epoch_index, + route_update_request, + { + let account_list: Vec = accounts + .iter() + .map(|a| { + format!( + "{} (id: {}, ilp_address: {})", + a.username(), + a.id(), + a.ilp_address() + ) + }) + .collect(); + account_list.join(", ") + } + ); - let prepare = route_update_request.to_prepare(); - accounts.sort_unstable_by_key(|a| a.id().to_string()); - accounts.dedup_by_key(|a| a.id()); + // let results: Vec<(A, Result)> = + // let results = + // join_all(accounts.into_iter().map(|account| { + // outgoing + // .send_request(OutgoingRequest { + // from: account.clone(), + // to: account.clone(), + // original_amount: prepare.amount(), + // prepare: prepare.clone(), + // }) + // .map(move |res| (account, res)) + // // Vec> + // })) + // .await; + let mut outgoing = self_clone.outgoing.clone(); + let mut results = Vec::new(); + for account in accounts.into_iter() { + let res = outgoing + .send_request(OutgoingRequest { + from: account.clone(), + to: account.clone(), + original_amount: prepare.amount(), + prepare: prepare.clone(), + }) + .await; + results.push((account, res)); + } - let broadcasting = !accounts.is_empty(); - if broadcasting { - trace!( - "Sending route update for epochs {} - {} to accounts: {:?} {}", - from_epoch_index, - to_epoch_index, - route_update_request, - { - let account_list: Vec = accounts - .iter() - .map(|a| { - format!( - "{} (id: {}, ilp_address: {})", - a.username(), - a.id(), - a.ilp_address() - ) - }) - .collect(); - account_list.join(", ") + // Handle the results of the route broadcast attempts + trace!("Updating unavailable accounts"); + let mut unavailable_accounts = unavailable_accounts.lock(); + for (account, result) in results.into_iter() { + match (account.routing_relation(), result) { + (RoutingRelation::Child, Err(err)) => { + if let Some(backoff) = unavailable_accounts.get_mut(&account.id()) { + // Increase the number of intervals we'll skip + // (but don't overflow the value it's stored in) + backoff.max = backoff.max.saturating_add(1); + backoff.skip_intervals = backoff.max; + } else { + // Skip sending to this account next time + unavailable_accounts.insert( + account.id(), + BackoffParams { + max: 1, + skip_intervals: 1, + }, + ); } - ); - Either::A( - join_all(accounts.into_iter().map(move |account| { - outgoing - .send_request(OutgoingRequest { - from: account.clone(), - to: account.clone(), - original_amount: prepare.amount(), - prepare: prepare.clone(), - }) - .then(move |res| Ok((account, res))) - })) - .and_then(move |results: Vec<(A, Result)>| { - // Handle the results of the route broadcast attempts - trace!("Updating unavailable accounts"); - let mut unavailable_accounts = unavailable_accounts.lock(); - for (account, result) in results.into_iter() { - match (account.routing_relation(), result) { - (RoutingRelation::Child, Err(err)) => { - if let Some(backoff) = unavailable_accounts.get_mut(&account.id()) { - // Increase the number of intervals we'll skip - // (but don't overflow the value it's stored in) - backoff.max = backoff.max.saturating_add(1); - backoff.skip_intervals = backoff.max; - } else { - // Skip sending to this account next time - unavailable_accounts.insert(account.id(), BackoffParams { - max: 1, - skip_intervals: 1, - }); - } - trace!("Error sending route update to {:?} account {} (id: {}), increased backoff to {}: {:?}", - account.routing_relation(), account.username(), account.id(), unavailable_accounts[&account.id()].max, err); - }, - (_, Err(err)) => { - warn!("Error sending route update to {:?} account {} (id: {}): {:?}", - account.routing_relation(), account.username(), account.id(), err); - }, - (_, Ok(_)) => { - if unavailable_accounts.remove(&account.id()).is_some() { - debug!("Account {} (id: {}) is no longer unavailable, resuming route broadcasts", account.username(), account.id()); - } - } - } - } - Ok(()) - }), - ) - } else { - trace!("No accounts to broadcast routes to"); - Either::B(ok(())) + trace!("Error sending route update to {:?} account {} (id: {}), increased backoff to {}: {:?}", + account.routing_relation(), account.username(), account.id(), unavailable_accounts[&account.id()].max, err); + } + (_, Err(err)) => { + warn!( + "Error sending route update to {:?} account {} (id: {}): {:?}", + account.routing_relation(), + account.username(), + account.id(), + err + ); + } + (_, Ok(_)) => { + if unavailable_accounts.remove(&account.id()).is_some() { + debug!("Account {} (id: {}) is no longer unavailable, resuming route broadcasts", account.username(), account.id()); + } + } } - }) + } + Ok(()) + } else { + trace!("No accounts to broadcast routes to"); + Ok(()) + } } /// Create a RouteUpdateRequest representing the given range of Forwarding Routing Table epochs. @@ -852,8 +877,8 @@ where from_epoch_index, to_epoch_index, current_epoch_index, - new_routes: new_routes.clone(), - withdrawn_routes: withdrawn_routes.clone(), + new_routes, + withdrawn_routes, speaker: self.ilp_address.read().clone(), hold_down_time: DEFAULT_ROUTE_EXPIRY_TIME, } @@ -861,12 +886,12 @@ where /// Send a Route Update Request to a specific account for the given epoch range. /// This is used when the peer has fallen behind and has requested a specific range of updates. - fn send_route_update( + async fn send_route_update( &self, account: A, from_epoch_index: u32, to_epoch_index: u32, - ) -> impl Future { + ) -> Result<(), ()> { let prepare = self .create_route_update(from_epoch_index, to_epoch_index) .to_prepare(); @@ -875,7 +900,8 @@ where "Sending individual route update to account: {} for epochs from: {} to: {}", account_id, from_epoch_index, to_epoch_index ); - self.outgoing + let result = self + .outgoing .clone() .send_request(OutgoingRequest { from: account.clone(), @@ -883,16 +909,15 @@ where original_amount: prepare.amount(), prepare, }) - .and_then(|_| Ok(())) - .then(move |result| { - if let Err(err) = result { - error!( - "Error sending route update to account {}: {:?}", - account_id, err - ) - } - Ok(()) - }) + .await; + + if let Err(err) = result { + error!( + "Error sending route update to account {}: {:?}", + account_id, err + ) + } + Ok(()) } } @@ -943,24 +968,27 @@ fn get_best_route_for_prefix( (account, route), |(best_account, best_route), (account, route)| { // Prioritize child > peer > parent - if best_account.routing_relation() > account.routing_relation() { - return (best_account, best_route); - } else if best_account.routing_relation() < account.routing_relation() { - return (account, route); - } - - // Prioritize shortest path - if best_route.path.len() < route.path.len() { - return (best_account, best_route); - } else if best_route.path.len() > route.path.len() { - return (account, route); - } - - // Finally base it on account ID - if best_account.id().to_string() < account.id().to_string() { - (best_account, best_route) - } else { - (account, route) + match best_account + .routing_relation() + .cmp(&account.routing_relation()) + { + StdOrdering::Greater => (best_account, best_route), + StdOrdering::Less => (account, route), + _ => { + // Prioritize shortest path + match best_route.path.len().cmp(&route.path.len()) { + StdOrdering::Less => (best_account, best_route), + StdOrdering::Greater => (account, route), + _ => { + // Finally base it on account ID + if best_account.id().to_string() < account.id().to_string() { + (best_account, best_route) + } else { + (account, route) + } + } + } + } } }, ); @@ -970,6 +998,7 @@ fn get_best_route_for_prefix( } } +#[async_trait] impl IncomingService for CcpRouteManager where I: IncomingService + Clone + Send + Sync + 'static, @@ -977,18 +1006,16 @@ where S: AddressStore + RouteManagerStore + Clone + Send + Sync + 'static, A: CcpRoutingAccount + Send + Sync + 'static, { - type Future = BoxedIlpFuture; - /// Handle the IncomingRequest if it is a CCP protocol message or /// pass it on to the next handler if not - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let destination = request.prepare.destination(); if destination == *CCP_CONTROL_DESTINATION { - Box::new(self.handle_route_control_request(request)) + self.handle_route_control_request(request).await } else if destination == *CCP_UPDATE_DESTINATION { - Box::new(self.handle_route_update_request(request)) + self.handle_route_update_request(request).await } else { - Box::new(self.next_incoming.handle_request(request)) + self.next_incoming.handle_request(request).await } } } @@ -1030,7 +1057,7 @@ mod ranking_routes { let mut child = TestAccount::new(Uuid::from_slice(&[6; 16]).unwrap(), "example.child"); child.relation = RoutingRelation::Child; child_table.add_route( - child.clone(), + child, Route { prefix: "example.d".to_string(), path: vec!["example.one".to_string()], @@ -1059,7 +1086,7 @@ mod ranking_routes { }, ); peer_table_1.add_route( - peer_1.clone(), + peer_1, Route { // This route should be overridden by the configured "example.a" route prefix: "example.a.sub-prefix".to_string(), @@ -1071,7 +1098,7 @@ mod ranking_routes { let mut peer_table_2 = RoutingTable::default(); let peer_2 = TestAccount::new(Uuid::from_slice(&[8; 16]).unwrap(), "example.peer2"); peer_table_2.add_route( - peer_2.clone(), + peer_2, Route { prefix: "example.e".to_string(), path: vec!["example.one".to_string(), "example.two".to_string()], @@ -1141,28 +1168,29 @@ mod handle_route_control_request { use super::*; use crate::fixtures::*; use crate::test_helpers::*; + use interledger_packet::PrepareBuilder; use std::time::{Duration, SystemTime}; - #[test] - fn handles_valid_request() { + #[tokio::test] + async fn handles_valid_request() { test_service_with_routes() .0 .handle_request(IncomingRequest { prepare: CONTROL_REQUEST.to_prepare(), from: ROUTING_ACCOUNT.clone(), }) - .wait() + .await .unwrap(); } - #[test] - fn rejects_from_non_sending_account() { + #[tokio::test] + async fn rejects_from_non_sending_account() { let result = test_service() .handle_request(IncomingRequest { prepare: CONTROL_REQUEST.to_prepare(), from: NON_ROUTING_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1170,8 +1198,8 @@ mod handle_route_control_request { ); } - #[test] - fn rejects_invalid_packet() { + #[tokio::test] + async fn rejects_invalid_packet() { let result = test_service() .handle_request(IncomingRequest { prepare: PrepareBuilder { @@ -1184,7 +1212,7 @@ mod handle_route_control_request { .build(), from: ROUTING_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1192,11 +1220,11 @@ mod handle_route_control_request { ); } - #[test] - fn sends_update_in_response() { + #[tokio::test] + async fn sends_update_in_response() { let (mut service, outgoing_requests) = test_service_with_routes(); (*service.forwarding_table.write()).set_id([0; 16]); - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), @@ -1208,7 +1236,7 @@ mod handle_route_control_request { } .to_prepare(), }) - .wait() + .await .unwrap(); let request: &OutgoingRequest = &outgoing_requests.lock()[0]; assert_eq!(request.to.id(), ROUTING_ACCOUNT.id()); @@ -1220,10 +1248,10 @@ mod handle_route_control_request { assert_eq!(update.new_routes.len(), 3); } - #[test] - fn sends_whole_table_if_id_is_different() { + #[tokio::test] + async fn sends_whole_table_if_id_is_different() { let (mut service, outgoing_requests) = test_service_with_routes(); - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), @@ -1235,7 +1263,7 @@ mod handle_route_control_request { } .to_prepare(), }) - .wait() + .await .unwrap(); let routing_table_id = service.forwarding_table.read().id(); let request: &OutgoingRequest = &outgoing_requests.lock()[0]; @@ -1254,13 +1282,14 @@ mod handle_route_update_request { use super::*; use crate::fixtures::*; use crate::test_helpers::*; + use interledger_packet::PrepareBuilder; use std::{ iter::FromIterator, time::{Duration, SystemTime}, }; - #[test] - fn handles_valid_request() { + #[tokio::test] + async fn handles_valid_request() { let mut service = test_service(); let mut update = UPDATE_REQUEST_SIMPLE.clone(); update.to_epoch_index = 1; @@ -1271,18 +1300,18 @@ mod handle_route_update_request { prepare: update.to_prepare(), from: ROUTING_ACCOUNT.clone(), }) - .wait() + .await .unwrap(); } - #[test] - fn rejects_from_child_account() { + #[tokio::test] + async fn rejects_from_child_account() { let result = test_service() .handle_request(IncomingRequest { prepare: UPDATE_REQUEST_SIMPLE.to_prepare(), from: CHILD_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1290,14 +1319,14 @@ mod handle_route_update_request { ); } - #[test] - fn rejects_from_non_routing_account() { + #[tokio::test] + async fn rejects_from_non_routing_account() { let result = test_service() .handle_request(IncomingRequest { prepare: UPDATE_REQUEST_SIMPLE.to_prepare(), from: NON_ROUTING_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1305,8 +1334,8 @@ mod handle_route_update_request { ); } - #[test] - fn rejects_invalid_packet() { + #[tokio::test] + async fn rejects_invalid_packet() { let result = test_service() .handle_request(IncomingRequest { prepare: PrepareBuilder { @@ -1319,7 +1348,7 @@ mod handle_route_update_request { .build(), from: ROUTING_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1327,8 +1356,8 @@ mod handle_route_update_request { ); } - #[test] - fn adds_table_on_first_request() { + #[tokio::test] + async fn adds_table_on_first_request() { let mut service = test_service(); let mut update = UPDATE_REQUEST_SIMPLE.clone(); update.to_epoch_index = 1; @@ -1339,13 +1368,13 @@ mod handle_route_update_request { prepare: update.to_prepare(), from: ROUTING_ACCOUNT.clone(), }) - .wait() + .await .unwrap(); assert_eq!(service.incoming_tables.read().len(), 1); } - #[test] - fn filters_routes_with_other_address_scheme() { + #[tokio::test] + async fn filters_routes_with_other_address_scheme() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { @@ -1365,8 +1394,8 @@ mod handle_route_update_request { assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } - #[test] - fn filters_routes_for_address_scheme() { + #[tokio::test] + async fn filters_routes_for_address_scheme() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { @@ -1386,8 +1415,8 @@ mod handle_route_update_request { assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } - #[test] - fn filters_routing_loops() { + #[tokio::test] + async fn filters_routing_loops() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { @@ -1411,8 +1440,8 @@ mod handle_route_update_request { assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } - #[test] - fn filters_own_prefix_routes() { + #[tokio::test] + async fn filters_own_prefix_routes() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { @@ -1432,8 +1461,8 @@ mod handle_route_update_request { assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } - #[test] - fn updates_local_routing_table() { + #[tokio::test] + async fn updates_local_routing_table() { let mut service = test_service(); let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; @@ -1443,7 +1472,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); assert_eq!( (*service.local_table.read()) @@ -1463,8 +1492,8 @@ mod handle_route_update_request { ); } - #[test] - fn writes_local_routing_table_to_store() { + #[tokio::test] + async fn writes_local_routing_table_to_store() { let mut service = test_service(); let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; @@ -1474,7 +1503,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); assert_eq!( service @@ -1498,8 +1527,8 @@ mod handle_route_update_request { ); } - #[test] - fn doesnt_overwrite_configured_or_local_routes() { + #[tokio::test] + async fn doesnt_overwrite_configured_or_local_routes() { let mut service = test_service(); let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); @@ -1523,7 +1552,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); assert_eq!( (*service.local_table.read()) @@ -1543,8 +1572,8 @@ mod handle_route_update_request { ); } - #[test] - fn removes_withdrawn_routes() { + #[tokio::test] + async fn removes_withdrawn_routes() { let mut service = test_service(); let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; @@ -1554,7 +1583,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); service .handle_request(IncomingRequest { @@ -1571,7 +1600,7 @@ mod handle_route_update_request { } .to_prepare(), }) - .wait() + .await .unwrap(); assert_eq!( @@ -1587,8 +1616,8 @@ mod handle_route_update_request { .is_none()); } - #[test] - fn sends_control_request_if_routing_table_id_changed() { + #[tokio::test] + async fn sends_control_request_if_routing_table_id_changed() { let (mut service, outgoing_requests) = test_service_with_routes(); // First request is valid let mut request1 = UPDATE_REQUEST_COMPLEX.clone(); @@ -1599,7 +1628,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request1.to_prepare(), }) - .wait() + .await .unwrap(); // Second has a gap in epochs @@ -1612,7 +1641,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request2.to_prepare(), }) - .wait() + .await .unwrap_err(); assert_eq!(err.code(), ErrorCode::F00_BAD_REQUEST); @@ -1625,8 +1654,8 @@ mod handle_route_update_request { ); } - #[test] - fn sends_control_request_if_missing_epochs() { + #[tokio::test] + async fn sends_control_request_if_missing_epochs() { let (mut service, outgoing_requests) = test_service_with_routes(); // First request is valid @@ -1638,7 +1667,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); // Second has a gap in epochs @@ -1650,7 +1679,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap_err(); assert_eq!(err.code(), ErrorCode::F00_BAD_REQUEST); @@ -1665,8 +1694,8 @@ mod create_route_update { use super::*; use crate::test_helpers::*; - #[test] - fn heartbeat_message_for_empty_table() { + #[tokio::test] + async fn heartbeat_message_for_empty_table() { let service = test_service(); let update = service.create_route_update(0, 0); assert_eq!(update.from_epoch_index, 0); @@ -1678,8 +1707,8 @@ mod create_route_update { assert!(update.withdrawn_routes.is_empty()); } - #[test] - fn includes_the_given_range_of_epochs() { + #[tokio::test] + async fn includes_the_given_range_of_epochs() { let service = test_service(); (*service.forwarding_table.write()).set_epoch(4); *service.forwarding_table_updates.write() = vec![ @@ -1746,10 +1775,10 @@ mod send_route_updates { use interledger_service::*; use std::{collections::HashSet, iter::FromIterator, str::FromStr}; - #[test] - fn broadcasts_to_all_accounts_we_send_updates_to() { + #[tokio::test] + async fn broadcasts_to_all_accounts_we_send_updates_to() { let (service, outgoing_requests) = test_service_with_routes(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); let accounts: HashSet = outgoing_requests .lock() .iter() @@ -1765,14 +1794,14 @@ mod send_route_updates { assert_eq!(accounts, expected); } - #[test] - fn broadcasts_configured_and_local_routes() { + #[tokio::test] + async fn broadcasts_configured_and_local_routes() { let (service, outgoing_requests) = test_service_with_routes(); // This is normally spawned as a task when the service is created - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); let update = RouteUpdateRequest::try_from(&outgoing_requests.lock()[0].prepare).unwrap(); assert_eq!(update.new_routes.len(), 3); let prefixes: Vec<&str> = update @@ -1784,12 +1813,12 @@ mod send_route_updates { assert!(prefixes.contains(&"example.configured.1")); } - #[test] - fn broadcasts_received_routes() { + #[tokio::test] + async fn broadcasts_received_routes() { let (service, outgoing_requests) = test_service_with_routes(); // This is normally spawned as a task when the service is created - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); service .handle_route_update_request(IncomingRequest { @@ -1811,10 +1840,10 @@ mod send_route_updates { } .to_prepare(), }) - .wait() + .await .unwrap(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); let update = RouteUpdateRequest::try_from(&outgoing_requests.lock()[0].prepare).unwrap(); assert_eq!(update.new_routes.len(), 4); let prefixes: Vec<&str> = update @@ -1827,13 +1856,13 @@ mod send_route_updates { assert!(prefixes.contains(&"example.remote")); } - #[test] - fn broadcasts_withdrawn_routes() { + #[tokio::test] + async fn broadcasts_withdrawn_routes() { let id10 = Uuid::from_slice(&[10; 16]).unwrap(); let (service, outgoing_requests) = test_service_with_routes(); // This is normally spawned as a task when the service is created - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); service .handle_route_update_request(IncomingRequest { @@ -1855,7 +1884,7 @@ mod send_route_updates { } .to_prepare(), }) - .wait() + .await .unwrap(); service .handle_route_update_request(IncomingRequest { @@ -1872,10 +1901,10 @@ mod send_route_updates { } .to_prepare(), }) - .wait() + .await .unwrap(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); let update = RouteUpdateRequest::try_from(&outgoing_requests.lock()[0].prepare).unwrap(); assert_eq!(update.new_routes.len(), 3); let prefixes: Vec<&str> = update @@ -1890,8 +1919,8 @@ mod send_route_updates { assert_eq!(update.withdrawn_routes[0], "example.remote"); } - #[test] - fn backs_off_sending_to_unavailable_child_accounts() { + #[tokio::test] + async fn backs_off_sending_to_unavailable_child_accounts() { let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); let local_routes = HashMap::from_iter(vec![ @@ -1932,18 +1961,18 @@ mod send_route_updates { store, outgoing, incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), ) .ilp_address(Address::from_str("example.connector").unwrap()) .to_service(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // The first time, the child request is rejected assert_eq!(outgoing_requests.lock().len(), 2); @@ -1957,7 +1986,7 @@ mod send_route_updates { } *outgoing_requests.lock() = Vec::new(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // When we send again, we skip the child assert_eq!(outgoing_requests.lock().len(), 1); @@ -1971,7 +2000,7 @@ mod send_route_updates { } *outgoing_requests.lock() = Vec::new(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // When we send again, we try the child but it still won't work assert_eq!(outgoing_requests.lock().len(), 2); @@ -1985,8 +2014,8 @@ mod send_route_updates { } } - #[test] - fn resets_backoff_on_route_control_request() { + #[tokio::test] + async fn resets_backoff_on_route_control_request() { let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); let child_account = TestAccount { @@ -2028,18 +2057,18 @@ mod send_route_updates { store, outgoing, incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), ) .ilp_address(Address::from_str("example.connector").unwrap()) .to_service(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // The first time, the child request is rejected assert_eq!(outgoing_requests.lock().len(), 2); @@ -2057,7 +2086,7 @@ mod send_route_updates { prepare: CONTROL_REQUEST.to_prepare(), from: child_account, }) - .wait() + .await .unwrap(); { let lock = service.unavailable_accounts.lock(); @@ -2065,7 +2094,7 @@ mod send_route_updates { } *outgoing_requests.lock() = Vec::new(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // When we send again, we don't skip the child because we got a request from them assert_eq!(outgoing_requests.lock().len(), 2); diff --git a/crates/interledger-ccp/src/test_helpers.rs b/crates/interledger-ccp/src/test_helpers.rs index a201c84a4..33a3d5f83 100644 --- a/crates/interledger-ccp/src/test_helpers.rs +++ b/crates/interledger-ccp/src/test_helpers.rs @@ -1,16 +1,12 @@ /* kcov-ignore-start */ use super::*; use crate::{packet::CCP_RESPONSE, server::CcpRouteManager}; -use futures::{ - future::{err, ok}, - Future, -}; +use async_trait::async_trait; use interledger_packet::{Address, ErrorCode, RejectBuilder}; use interledger_service::{ - incoming_service_fn, outgoing_service_fn, AddressStore, BoxedIlpFuture, IncomingService, - OutgoingRequest, OutgoingService, Username, + incoming_service_fn, outgoing_service_fn, AddressStore, IncomingService, OutgoingRequest, + OutgoingService, Username, }; -#[cfg(test)] use lazy_static::lazy_static; use parking_lot::Mutex; use std::collections::HashMap; @@ -112,16 +108,14 @@ impl TestStore { type RoutingTable = HashMap; +#[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } @@ -131,22 +125,20 @@ impl AddressStore for TestStore { } } +#[async_trait] impl RouteManagerStore for TestStore { type Account = TestAccount; - fn get_local_and_configured_routes( + async fn get_local_and_configured_routes( &self, - ) -> Box< - dyn Future, RoutingTable), Error = ()> - + Send, - > { - Box::new(ok((self.local.clone(), self.configured.clone()))) + ) -> Result<(RoutingTable, RoutingTable), ()> { + Ok((self.local.clone(), self.configured.clone())) } - fn get_accounts_to_send_routes_to( + async fn get_accounts_to_send_routes_to( &self, ignore_accounts: Vec, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let mut accounts: Vec = self .local .values() @@ -158,12 +150,10 @@ impl RouteManagerStore for TestStore { .cloned() .collect(); accounts.dedup_by_key(|a| a.id()); - Box::new(ok(accounts)) + Ok(accounts) } - fn get_accounts_to_receive_routes_from( - &self, - ) -> Box, Error = ()> + Send> { + async fn get_accounts_to_receive_routes_from(&self) -> Result, ()> { let mut accounts: Vec = self .local .values() @@ -173,21 +163,21 @@ impl RouteManagerStore for TestStore { .cloned() .collect(); accounts.dedup_by_key(|a| a.id()); - Box::new(ok(accounts)) + Ok(accounts) } - fn set_routes( + async fn set_routes( &mut self, - routes: impl IntoIterator, - ) -> Box + Send> { + routes: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()> { *self.routes.lock() = HashMap::from_iter(routes.into_iter()); - Box::new(ok(())) + Ok(()) } } pub fn test_service() -> CcpRouteManager< - impl IncomingService + Clone, - impl OutgoingService + Clone, + impl IncomingService + Clone, + impl OutgoingService + Clone, TestStore, TestAccount, > { @@ -196,22 +186,22 @@ pub fn test_service() -> CcpRouteManager< addr.clone(), TestStore::new(), outgoing_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other outgoing handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), ) .ilp_address(addr) @@ -222,8 +212,8 @@ type OutgoingRequests = Arc>>>; pub fn test_service_with_routes() -> ( CcpRouteManager< - impl IncomingService + Clone, - impl OutgoingService + Clone, + impl IncomingService + Clone, + impl OutgoingService + Clone, TestStore, TestAccount, >, @@ -261,13 +251,13 @@ pub fn test_service_with_routes() -> ( store, outgoing, incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), ) .ilp_address(addr) diff --git a/crates/interledger-http/Cargo.toml b/crates/interledger-http/Cargo.toml index 80e660536..ff84945f3 100644 --- a/crates/interledger-http/Cargo.toml +++ b/crates/interledger-http/Cargo.toml @@ -9,22 +9,26 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] bytes = { version = "0.4.12", default-features = false } -futures = { version = "0.1.29", default-features = false } +bytes05 = { package = "bytes", version = "0.5", default-features = false } +futures = { version = "0.3", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } log = { version = "0.4.8", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10.0", default-features = false, features = ["default-tls"] } url = { version = "2.1.0", default-features = false } -warp = { version = "0.1.20", default-features = false } +# warp = { version = "0.1.20", default-features = false } +warp = { git = "https://github.com/seanmonstar/warp.git" } serde = { version = "1.0.101", default-features = false, features = ["derive"] } serde_json = { version = "1.0.41", default-features = false } serde_path_to_error = { version = "0.1", default-features = false } -http = { version = "0.1.18", default-features = false } +http = { version = "0.2.0", default-features = false } chrono = { version = "0.4.9", features = ["clock"], default-features = false } regex = { version ="1.3.1", default-features = false, features = ["std"] } lazy_static = { version ="1.4.0", default-features = false } mime = { version ="0.3.14", default-features = false } secrecy = "0.5.2" +async-trait = "0.1.22" [dev-dependencies] uuid = { version = "0.8.1", features=["v4"]} +tokio = { version = "0.2.6", features = ["rt-core", "macros"]} diff --git a/crates/interledger-http/src/client.rs b/crates/interledger-http/src/client.rs index bb55a11e9..81c7be91a 100644 --- a/crates/interledger-http/src/client.rs +++ b/crates/interledger-http/src/client.rs @@ -1,12 +1,13 @@ use super::{HttpAccount, HttpStore}; +use async_trait::async_trait; use bytes::BytesMut; -use futures::{future::result, Future, Stream}; -use interledger_packet::{Address, ErrorCode, Fulfill, Packet, Reject, RejectBuilder}; +use futures::future::TryFutureExt; +use interledger_packet::{Address, ErrorCode, Packet, RejectBuilder}; use interledger_service::*; use log::{error, trace}; use reqwest::{ header::{HeaderMap, HeaderName, HeaderValue}, - r#async::{Chunk, Client, ClientBuilder, Response as HttpResponse}, + Client, ClientBuilder, Response as HttpResponse, }; use secrecy::{ExposeSecret, SecretString}; use std::{convert::TryFrom, marker::PhantomData, sync::Arc, time::Duration}; @@ -46,18 +47,18 @@ where } } +#[async_trait] impl OutgoingService for HttpClientService where - S: AddressStore + HttpStore, - O: OutgoingService, - A: HttpAccount, + S: AddressStore + HttpStore + Clone, + O: OutgoingService + Clone + Sync + Send, + A: HttpAccount + Clone + Sync + Send, { - type Future = BoxedIlpFuture; - /// Send an OutgoingRequest to a peer that implements the ILP-Over-HTTP. - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); let ilp_address_clone = ilp_address.clone(); + let self_clone = self.clone(); if let Some(url) = request.to.get_http_url() { trace!( "Sending outgoing ILP over HTTP packet to account: {} (URL: {})", @@ -68,51 +69,42 @@ where .to .get_http_auth_token() .unwrap_or_else(|| SecretString::new("".to_owned())); - Box::new( - self.client - .post(url.as_ref()) - .header( - "authorization", - &format!("Bearer {}", token.expose_secret()), - ) - .body(BytesMut::from(request.prepare).freeze()) - .send() - .map_err(move |err| { - error!("Error sending HTTP request: {:?}", err); - let code = if err.is_client_error() { - ErrorCode::F00_BAD_REQUEST - } else { - ErrorCode::T01_PEER_UNREACHABLE - }; - let message = if let Some(status) = err.status() { - format!("Error sending ILP over HTTP request: {}", status) - } else if let Some(err) = err.get_ref() { - format!("Error sending ILP over HTTP request: {:?}", err) - } else { - "Error sending ILP over HTTP request".to_string() - }; - RejectBuilder { - code, - message: message.as_str().as_bytes(), - triggered_by: Some(&ilp_address), - data: &[], + let header = format!("Bearer {}", token.expose_secret()); + let body = request.prepare.as_ref().to_owned(); + let resp = self_clone + .client + .post(url.as_ref()) + .header("authorization", &header) + .body(body) + .send() + .map_err(move |err| { + error!("Error sending HTTP request: {:?}", err); + let mut code = ErrorCode::T01_PEER_UNREACHABLE; + if let Some(status) = err.status() { + if status.is_client_error() { + code = ErrorCode::F00_BAD_REQUEST } - .build() - }) - .and_then(move |resp| parse_packet_from_response(resp, ilp_address_clone)), - ) + }; + + let message = format!("Error sending ILP over HTTP request: {}", err); + RejectBuilder { + code, + message: message.as_bytes(), + triggered_by: Some(&ilp_address), + data: &[], + } + .build() + }) + .await?; + parse_packet_from_response(resp, ilp_address_clone).await } else { - Box::new(self.next.send_request(request)) + self.next.send_request(request).await } } } -fn parse_packet_from_response( - response: HttpResponse, - ilp_address: Address, -) -> impl Future { - let ilp_address_clone = ilp_address.clone(); - result(response.error_for_status().map_err(|err| { +async fn parse_packet_from_response(response: HttpResponse, ilp_address: Address) -> IlpResult { + let response = response.error_for_status().map_err(|err| { error!("HTTP error sending ILP over HTTP packet: {:?}", err); let code = if let Some(status) = err.status() { if status.is_client_error() { @@ -131,34 +123,33 @@ fn parse_packet_from_response( data: &[], } .build() - })) - .and_then(move |response: HttpResponse| { - let ilp_address_clone = ilp_address.clone(); - let decoder = response.into_body(); - decoder.concat2().map_err(move |err| { + })?; + + let ilp_address_clone = ilp_address.clone(); + let body = response + .bytes() + .map_err(|err| { error!("Error getting HTTP response body: {:?}", err); RejectBuilder { code: ErrorCode::T01_PEER_UNREACHABLE, message: &[], - triggered_by: Some(&ilp_address_clone.clone()), + triggered_by: Some(&ilp_address_clone), data: &[], } .build() }) - }) - .and_then(move |body: Chunk| { - // TODO can we get the body as a BytesMut so we don't need to copy? - let body = BytesMut::from(body.to_vec()); - match Packet::try_from(body) { - Ok(Packet::Fulfill(fulfill)) => Ok(fulfill), - Ok(Packet::Reject(reject)) => Err(reject), - _ => Err(RejectBuilder { - code: ErrorCode::T01_PEER_UNREACHABLE, - message: &[], - triggered_by: Some(&ilp_address_clone.clone()), - data: &[], - } - .build()), + .await?; + // TODO can we get the body as a BytesMut so we don't need to copy? + let body = BytesMut::from(body.to_vec()); + match Packet::try_from(body) { + Ok(Packet::Fulfill(fulfill)) => Ok(fulfill), + Ok(Packet::Reject(reject)) => Err(reject), + _ => Err(RejectBuilder { + code: ErrorCode::T01_PEER_UNREACHABLE, + message: &[], + triggered_by: Some(&ilp_address_clone), + data: &[], } - }) + .build()), + } } diff --git a/crates/interledger-http/src/error/mod.rs b/crates/interledger-http/src/error/mod.rs index dcdd70581..febacbe80 100644 --- a/crates/interledger-http/src/error/mod.rs +++ b/crates/interledger-http/src/error/mod.rs @@ -12,7 +12,12 @@ use std::{ error::Error as StdError, fmt::{self, Display}, }; -use warp::{reject::custom, reply::json, reply::Response, Rejection, Reply}; +use warp::{ + reject::{custom, Reject}, + reply::json, + reply::Response, + Rejection, Reply, +}; /// API error type prefix of problems. /// This URL prefix is currently not published but we assume that in the future. @@ -236,6 +241,8 @@ impl From for Rejection { } } +impl Reject for ApiError {} + lazy_static! { static ref MISSING_FIELD_REGEX: Regex = Regex::new("missing field `(.*)`").unwrap(); } @@ -248,6 +255,7 @@ pub struct JsonDeserializeError { } impl StdError for JsonDeserializeError {} +impl Reject for JsonDeserializeError {} impl Display for JsonDeserializeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -309,12 +317,12 @@ impl From for Rejection { } // Receives `ApiError`s and `JsonDeserializeError` and return it in the RFC7807 format. -pub fn default_rejection_handler(err: warp::Rejection) -> Result { - if let Some(api_error) = err.find_cause::() { +pub async fn default_rejection_handler(err: warp::Rejection) -> Result { + if let Some(api_error) = err.find::() { Ok(api_error.clone().into_response()) - } else if let Some(json_error) = err.find_cause::() { + } else if let Some(json_error) = err.find::() { Ok(json_error.clone().into_response()) - } else if err.status() == http::status::StatusCode::METHOD_NOT_ALLOWED { + } else if err.find::().is_some() { Ok(ApiError::from_api_error_type(&DEFAULT_METHOD_NOT_ALLOWED_TYPE).into_response()) } else { Err(err) diff --git a/crates/interledger-http/src/lib.rs b/crates/interledger-http/src/lib.rs index 29eec8b03..211504dbd 100644 --- a/crates/interledger-http/src/lib.rs +++ b/crates/interledger-http/src/lib.rs @@ -2,15 +2,10 @@ //! //! Client and server implementations of the [ILP-Over-HTTP](https://github.com/interledger/rfcs/blob/master/0035-ilp-over-http/0035-ilp-over-http.md) bilateral communication protocol. //! This protocol is intended primarily for server-to-server communication between peers on the Interledger network. -use bytes::Buf; -use error::*; -use futures::Future; +use async_trait::async_trait; use interledger_service::{Account, Username}; -use mime::Mime; use secrecy::SecretString; -use serde::de::DeserializeOwned; use url::Url; -use warp::{self, filters::body::FullBody, Filter, Rejection}; mod client; mod server; @@ -28,124 +23,15 @@ pub trait HttpAccount: Account { /// The interface for Stores that can be used with the HttpServerService. // TODO do we need all of these constraints? +#[async_trait] pub trait HttpStore: Clone + Send + Sync + 'static { type Account: HttpAccount; /// Load account details based on the full HTTP Authorization header /// received on the incoming HTTP request. - fn get_account_from_http_auth( + async fn get_account_from_http_auth( &self, username: &Username, token: &str, - ) -> Box + Send>; -} - -pub fn deserialize_json( -) -> impl Filter + Copy { - warp::header::("content-type") - .and(warp::body::concat()) - .and_then(|content_type: String, buf: FullBody| { - let mime_type: Mime = content_type.parse().map_err::(|_| { - error::ApiError::bad_request() - .detail("Invalid content-type header.") - .into() - })?; - if mime_type.type_() != mime::APPLICATION_JSON.type_() { - return Err(error::ApiError::bad_request() - .detail("Invalid content-type.") - .into()); - } else if let Some(charset) = mime_type.get_param("charset") { - // Charset should be UTF-8 - // https://tools.ietf.org/html/rfc8259#section-8.1 - if charset != mime::UTF_8 { - return Err(error::ApiError::bad_request() - .detail("Charset should be UTF-8.") - .into()); - } - } - - let deserializer = &mut serde_json::Deserializer::from_slice(&buf.bytes()); - serde_path_to_error::deserialize(deserializer).map_err(|err| { - warp::reject::custom(JsonDeserializeError { - category: err.inner().classify(), - detail: err.inner().to_string(), - path: err.path().clone(), - }) - }) - }) -} - -#[cfg(test)] -mod tests { - use super::deserialize_json; - use serde::Deserialize; - use warp::test::request; - - #[derive(Deserialize, Clone)] - struct TestJsonStruct { - string_value: String, - } - - #[test] - fn deserialize_json_header() { - let json_filter = deserialize_json::(); - let body_correct = r#"{"string_value": "some string value"}"#; - let body_incorrect = r#"{"other_key": 0}"#; - - // `content-type` should be provided. - assert_eq!(request().body(body_correct).matches(&json_filter), false); - - // Should accept only "application/json" or "application/json; charset=utf-8" - assert_eq!( - request() - .body(body_correct) - .header("content-type", "text/plain") - .matches(&json_filter), - false - ); - assert_eq!( - request() - .body(body_correct) - .header("content-type", "application/json") - .matches(&json_filter), - true - ); - assert_eq!( - request() - .body(body_correct) - .header("content-type", "application/json; charset=ascii") - .matches(&json_filter), - false - ); - assert_eq!( - request() - .body(body_correct) - .header("content-type", "application/json; charset=utf-8") - .matches(&json_filter), - true - ); - assert_eq!( - request() - .body(body_correct) - .header("content-type", "application/json; charset=UTF-8") - .matches(&json_filter), - true - ); - - // Should accept only bodies that can be deserialized - assert_eq!( - request() - .body(body_incorrect) - .header("content-type", "application/json") - .matches(&json_filter), - false - ); - assert_eq!( - request() - .body(body_incorrect) - .header("content-type", "application/json; charset=utf-8") - .matches(&json_filter), - false - ); - } + ) -> Result; } diff --git a/crates/interledger-http/src/server.rs b/crates/interledger-http/src/server.rs index 1700a1f3e..6ad74c6eb 100644 --- a/crates/interledger-http/src/server.rs +++ b/crates/interledger-http/src/server.rs @@ -1,16 +1,13 @@ use super::{error::*, HttpStore}; -use bytes::{buf::Buf, Bytes, BytesMut}; -use futures::{ - future::{err, Either, FutureResult}, - Future, -}; +use futures::TryFutureExt; use interledger_packet::Prepare; use interledger_service::Username; use interledger_service::{IncomingRequest, IncomingService}; use log::error; use secrecy::{ExposeSecret, SecretString}; -use std::{convert::TryFrom, net::SocketAddr}; -use warp::{self, Filter, Rejection}; +use std::convert::TryFrom; +use std::net::SocketAddr; +use warp::{Filter, Rejection}; /// Max message size that is allowed to transfer from a request or a message. pub const MAX_PACKET_SIZE: u64 = 40000; @@ -24,10 +21,75 @@ pub struct HttpServer { store: S, } -impl HttpServer +#[inline] +async fn get_account( + store: S, + path_username: &Username, + password: &SecretString, +) -> Result where - I: IncomingService + Clone + Send + Sync + 'static, S: HttpStore, +{ + if password.expose_secret().len() < BEARER_TOKEN_START { + return Err(()); + } + store + .get_account_from_http_auth( + &path_username, + &password.expose_secret()[BEARER_TOKEN_START..], + ) + .await +} + +#[inline] +async fn ilp_over_http( + path_username: Username, + password: SecretString, + body: bytes05::Bytes, + store: S, + incoming: I, +) -> Result +where + S: HttpStore, + I: IncomingService + Clone, +{ + let mut incoming = incoming.clone(); + let account = get_account(store, &path_username, &password) + .map_err(|_| -> Rejection { + error!("Invalid authorization provided for user: {}", path_username); + ApiError::unauthorized().into() + }) + .await?; + + let buffer = bytes::BytesMut::from(body.as_ref()); + if let Ok(prepare) = Prepare::try_from(buffer) { + let result = incoming + .handle_request(IncomingRequest { + from: account, + prepare, + }) + .await; + + let bytes: bytes05::BytesMut = match result { + Ok(fulfill) => fulfill.into(), + Err(reject) => reject.into(), + }; + + Ok(warp::http::Response::builder() + .header("Content-Type", "application/octet-stream") + .status(200) + .body(bytes.freeze()) // TODO: bring this back + .unwrap()) + } else { + error!("Body was not a valid Prepare packet"); + Err(Rejection::from(ApiError::invalid_ilp_packet())) + } +} + +impl HttpServer +where + I: IncomingService + Clone + Send + Sync, + S: HttpStore + Clone, { pub fn new(incoming: I, store: S) -> Self { HttpServer { incoming, store } @@ -35,71 +97,28 @@ where pub fn as_filter( &self, - ) -> impl warp::Filter,), Error = warp::Rejection> + Clone - { - let incoming = self.incoming.clone(); + ) -> impl warp::Filter + Clone { let store = self.store.clone(); - - warp::post2() + let incoming = self.incoming.clone(); + let with_store = warp::any().map(move || store.clone()).boxed(); + let with_incoming = warp::any().map(move || incoming.clone()); + warp::post() .and(warp::path("accounts")) - .and(warp::path::param2::()) + .and(warp::path::param::()) .and(warp::path("ilp")) .and(warp::path::end()) .and(warp::header::("authorization")) - .and_then(move |path_username: Username, password: SecretString| { - if password.expose_secret().len() < BEARER_TOKEN_START { - return Either::A(err(ApiError::bad_request().into())); - } - Either::B( - store - .get_account_from_http_auth( - &path_username, - &password.expose_secret()[BEARER_TOKEN_START..], - ) - .map_err(move |_| -> Rejection { - error!("Invalid authorization provided for user: {}", path_username); - ApiError::unauthorized().into() - }), - ) - }) .and(warp::body::content_length_limit(MAX_PACKET_SIZE)) - .and(warp::body::concat()) - .and_then( - move |account: S::Account, - body: warp::body::FullBody| - -> Either<_, FutureResult<_, Rejection>> { - // TODO don't copy ILP packet - let buffer = BytesMut::from(body.bytes()); - if let Ok(prepare) = Prepare::try_from(buffer) { - Either::A( - incoming - .clone() - .handle_request(IncomingRequest { - from: account, - prepare, - }) - .then(|result| { - let bytes: BytesMut = match result { - Ok(fulfill) => fulfill.into(), - Err(reject) => reject.into(), - }; - Ok(warp::http::Response::builder() - .header("Content-Type", "application/octet-stream") - .status(200) - .body(bytes.freeze()) - .unwrap()) - }), - ) - } else { - error!("Body was not a valid Prepare packet"); - Either::B(err(ApiError::invalid_ilp_packet().into())) - } - }, - ) + .and(warp::body::bytes()) + .and(with_store) + .and(with_incoming) + .and_then(ilp_over_http) } - pub fn bind(&self, addr: SocketAddr) -> impl Future + Send { - warp::serve(self.as_filter()).bind(addr) + // Do we really need to bind self to static? + pub async fn bind(&'static self, addr: SocketAddr) { + let filter = self.as_filter(); + warp::serve(filter).run(addr).await } } @@ -107,8 +126,8 @@ where mod tests { use super::*; use crate::HttpAccount; - use bytes::{Bytes, BytesMut}; - use futures::future::ok; + use async_trait::async_trait; + use bytes::BytesMut; use http::Response; use interledger_packet::{Address, ErrorCode, PrepareBuilder, RejectBuilder}; use interledger_service::{incoming_service_fn, Account}; @@ -136,11 +155,11 @@ mod tests { } const AUTH_PASSWORD: &str = "password"; - fn api_call( + async fn api_call( api: &F, endpoint: &str, // /ilp or /accounts/:username/ilp auth: &str, // simple bearer or overloaded username+password - ) -> Response + ) -> Response where F: warp::Filter + 'static, F::Extract: warp::Reply, @@ -152,19 +171,20 @@ mod tests { .header("Content-length", 1000) .body(PREPARE_BYTES.clone()) .reply(api) + .await } - #[test] - fn new_api_test() { + #[tokio::test] + async fn new_api_test() { let store = TestStore; let incoming = incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: None, } - .build())) + .build()) }); let api = HttpServer::new(incoming, store) .as_filter() @@ -175,11 +195,12 @@ mod tests { &api, "/accounts/alice/ilp", &format!("{}:{}", USERNAME.to_string(), AUTH_PASSWORD), - ); + ) + .await; assert_eq!(resp.status().as_u16(), 401); // Works with just the password - let resp = api_call(&api, "/accounts/alice/ilp", AUTH_PASSWORD); + let resp = api_call(&api, "/accounts/alice/ilp", AUTH_PASSWORD).await; assert_eq!(resp.status().as_u16(), 200); } @@ -218,17 +239,18 @@ mod tests { #[derive(Debug, Clone)] struct TestStore; + #[async_trait] impl HttpStore for TestStore { type Account = TestAccount; - fn get_account_from_http_auth( + async fn get_account_from_http_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { + ) -> Result { if username == &*USERNAME && token == AUTH_PASSWORD { - Box::new(ok(TestAccount)) + Ok(TestAccount) } else { - Box::new(err(())) + Err(()) } } } diff --git a/crates/interledger-ildcp/Cargo.toml b/crates/interledger-ildcp/Cargo.toml index 78e3409a2..bc2755674 100644 --- a/crates/interledger-ildcp/Cargo.toml +++ b/crates/interledger-ildcp/Cargo.toml @@ -10,8 +10,13 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] bytes = { version = "0.4.12", default-features = false } byteorder = { version = "1.3.2", default-features = false } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false } log = { version = "0.4.8", default-features = false } +async-trait = "0.1.22" + +[dev-dependencies] +tokio = { version = "0.2.6", features = ["macros","rt-core"]} +uuid = { version = "0.8.1", features = ["v4"] } diff --git a/crates/interledger-ildcp/src/client.rs b/crates/interledger-ildcp/src/client.rs index 06d1ef4c4..4c82c71e4 100644 --- a/crates/interledger-ildcp/src/client.rs +++ b/crates/interledger-ildcp/src/client.rs @@ -1,34 +1,30 @@ use super::packet::*; -use futures::Future; +use futures::future::TryFutureExt; use interledger_service::*; use log::{debug, error}; use std::convert::TryFrom; -/// Get the ILP address and asset details for a given account. -pub fn get_ildcp_info( - service: &mut S, - account: A, -) -> impl Future +/// Sends an ILDCP Request and receives the ILP address and asset details for a given account. +pub async fn get_ildcp_info(service: &mut S, account: A) -> Result where S: IncomingService, A: Account, { let prepare = IldcpRequest {}.to_prepare(); - service + let fulfill = service .handle_request(IncomingRequest { from: account, prepare, }) .map_err(|err| error!("Error getting ILDCP info: {:?}", err)) - .and_then(|fulfill| { - let response = - IldcpResponse::try_from(fulfill.into_data().freeze()).map_err(|err| { - error!( - "Unable to parse ILDCP response from fulfill packet: {:?}", - err - ); - })?; - debug!("Got ILDCP response: {:?}", response); - Ok(response) - }) + .await?; + + let response = IldcpResponse::try_from(fulfill.into_data().freeze()).map_err(|err| { + error!( + "Unable to parse ILDCP response from fulfill packet: {:?}", + err + ); + })?; + debug!("Got ILDCP response: {:?}", response); + Ok(response) } diff --git a/crates/interledger-ildcp/src/server.rs b/crates/interledger-ildcp/src/server.rs index f08ba5dbd..da2684eba 100644 --- a/crates/interledger-ildcp/src/server.rs +++ b/crates/interledger-ildcp/src/server.rs @@ -1,6 +1,6 @@ use super::packet::*; use super::Account; -use futures::future::ok; +use async_trait::async_trait; use interledger_packet::*; use interledger_service::*; use log::debug; @@ -27,14 +27,13 @@ where } } +#[async_trait] impl IncomingService for IldcpService where - I: IncomingService, + I: IncomingService + Send, A: Account, { - type Future = BoxedIlpFuture; - - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { if is_ildcp_request(&request.prepare) { let from = request.from.ilp_address(); let builder = IldcpResponseBuilder { @@ -44,10 +43,72 @@ where }; debug!("Responding to query for ildcp info by account: {:?}", from); let response = builder.build(); - let fulfill = Fulfill::from(response); - Box::new(ok(fulfill)) + Ok(Fulfill::from(response)) } else { - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::get_ildcp_info; + use lazy_static::lazy_static; + use std::str::FromStr; + use uuid::Uuid; + + lazy_static! { + pub static ref ALICE: Username = Username::from_str("alice").unwrap(); + pub static ref EXAMPLE_ADDRESS: Address = Address::from_str("example.alice").unwrap(); + } + + #[derive(Clone, Debug, Copy)] + struct TestAccount; + + impl Account for TestAccount { + fn id(&self) -> Uuid { + Uuid::new_v4() + } + + fn username(&self) -> &Username { + &ALICE + } + + fn asset_scale(&self) -> u8 { + 9 } + + fn asset_code(&self) -> &str { + "XYZ" + } + + fn ilp_address(&self) -> &Address { + &EXAMPLE_ADDRESS + } + } + + #[tokio::test] + async fn handles_request() { + let from = TestAccount; + let prepare = IldcpRequest {}.to_prepare(); + let req = IncomingRequest { from, prepare }; + let mut service = IldcpService::new(incoming_service_fn(|_| { + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: b"No other incoming handler!", + data: &[], + triggered_by: None, + } + .build()) + })); + + let result = service.handle_request(req).await.unwrap(); + assert_eq!(result.data().len(), 19); + + let ildpc_info = get_ildcp_info(&mut service, from).await.unwrap(); + assert_eq!(ildpc_info.ilp_address(), EXAMPLE_ADDRESS.clone()); + assert_eq!(ildpc_info.asset_code(), b"XYZ"); + assert_eq!(ildpc_info.asset_scale(), 9); } } diff --git a/crates/interledger-packet/Cargo.toml b/crates/interledger-packet/Cargo.toml index 4ec721e16..d3d926ed4 100644 --- a/crates/interledger-packet/Cargo.toml +++ b/crates/interledger-packet/Cargo.toml @@ -9,6 +9,7 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] byteorder = { version = "1.3.2", default-features = false } +bytes05 = { package = "bytes", version = "0.5", default-features = false, features = ["serde"] } bytes = { version = "0.4.12", default-features = false, features = ["serde"] } chrono = { version = "0.4.9", default-features = false } hex = { version = "0.4.0", default-features = false } diff --git a/crates/interledger-packet/src/errors.rs b/crates/interledger-packet/src/errors.rs index 25d8ef6ae..33beeccea 100644 --- a/crates/interledger-packet/src/errors.rs +++ b/crates/interledger-packet/src/errors.rs @@ -41,7 +41,7 @@ quick_error! { description(descr) display("Invalid Packet {}", descr) } - Other(err: Box) { + Other(err: Box) { cause(&**err) description(err.description()) display("Error {}", err.description()) diff --git a/crates/interledger-packet/src/packet.rs b/crates/interledger-packet/src/packet.rs index 9c527a746..9d3d1b1c6 100644 --- a/crates/interledger-packet/src/packet.rs +++ b/crates/interledger-packet/src/packet.rs @@ -359,6 +359,15 @@ impl From for BytesMut { } } +impl From for bytes05::BytesMut { + fn from(fulfill: Fulfill) -> Self { + // bytes 0.4 + let b = fulfill.buffer.as_ref(); + // convert to Bytes05 + bytes05::BytesMut::from(b) + } +} + impl fmt::Debug for Fulfill { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter @@ -481,6 +490,15 @@ impl From for BytesMut { } } +impl From for bytes05::BytesMut { + fn from(reject: Reject) -> Self { + // bytes 0.4 + let b = reject.buffer.as_ref(); + // convert to Bytes05 + bytes05::BytesMut::from(b) + } +} + impl fmt::Debug for Reject { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter diff --git a/crates/interledger-router/Cargo.toml b/crates/interledger-router/Cargo.toml index e3219e16f..aedd5e7aa 100644 --- a/crates/interledger-router/Cargo.toml +++ b/crates/interledger-router/Cargo.toml @@ -14,6 +14,8 @@ interledger-service = { path = "../interledger-service", version = "^0.4.0", def log = { version = "0.4.8", default-features = false } parking_lot = { version = "0.9.0", default-features = false } uuid = { version = "0.8.1", default-features = false, features = ["v4"]} +async-trait = "0.1.22" [dev-dependencies] lazy_static = { version = "1.4.0", default-features = false } +tokio = { version = "0.2.6", features = ["rt-core", "macros"]} diff --git a/crates/interledger-router/src/router.rs b/crates/interledger-router/src/router.rs index c03b42309..0866b5c70 100644 --- a/crates/interledger-router/src/router.rs +++ b/crates/interledger-router/src/router.rs @@ -1,5 +1,5 @@ use super::RouterStore; -use futures::{future::err, Future}; +use async_trait::async_trait; use interledger_packet::{ErrorCode, RejectBuilder}; use interledger_service::*; use log::{error, trace}; @@ -38,19 +38,18 @@ where } } +#[async_trait] impl IncomingService for Router where S: AddressStore + RouterStore, O: OutgoingService + Clone + Send + 'static, { - type Future = BoxedIlpFuture; - /// Figures out the next node to pass the received Prepare packet to. /// /// Firstly, it checks if there is a direct path for that account and uses that. /// If not it scans through the routing table and checks if the route prefix matches /// the prepare packet's destination or if it's a catch-all address (i.e. empty prefix) - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let destination = request.prepare.destination(); let mut next_hop = None; let routing_table = self.store.routing_table(); @@ -92,24 +91,22 @@ where if let Some(account_id) = next_hop { let mut next = self.next.clone(); - Box::new( - self.store - .get_accounts(vec![account_id]) - .map_err(move |_| { - error!("No record found for account: {}", account_id); - RejectBuilder { - code: ErrorCode::F02_UNREACHABLE, - message: &[], - triggered_by: Some(&ilp_address), - data: &[], - } - .build() - }) - .and_then(move |mut accounts| { - let request = request.into_outgoing(accounts.remove(0)); - next.send_request(request) - }), - ) + match self.store.get_accounts(vec![account_id]).await { + Ok(mut accounts) => { + let request = request.into_outgoing(accounts.remove(0)); + next.send_request(request).await + } + Err(_) => { + error!("No record found for account: {}", account_id); + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: &[], + triggered_by: Some(&ilp_address), + data: &[], + } + .build()) + } + } } else { error!( "No route found for request {}: {:?}", @@ -129,13 +126,13 @@ where }, request ); - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: &[], triggered_by: Some(&ilp_address), data: &[], } - .build())) + .build()) } } } @@ -190,36 +187,29 @@ mod tests { routes: HashMap, } + #[async_trait] impl AccountStore for TestStore { type Account = TestAccount; - fn get_accounts( - &self, - account_ids: Vec, - ) -> Box, Error = ()> + Send> { - Box::new(ok(account_ids.into_iter().map(TestAccount).collect())) + async fn get_accounts(&self, account_ids: Vec) -> Result, ()> { + Ok(account_ids.into_iter().map(TestAccount).collect()) } // stub implementation (not used in these tests) - fn get_account_id_from_username( - &self, - _username: &Username, - ) -> Box + Send> { - Box::new(ok(Uuid::new_v4())) + async fn get_account_id_from_username(&self, _username: &Username) -> Result { + Ok(Uuid::new_v4()) } } + #[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { - unimplemented!() + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { + Ok(()) } - fn clear_ilp_address(&self) -> Box + Send> { - unimplemented!() + async fn clear_ilp_address(&self) -> Result<(), ()> { + Ok(()) } /// Get's the store's ilp address from memory @@ -234,8 +224,8 @@ mod tests { } } - #[test] - fn empty_routing_table() { + #[tokio::test] + async fn empty_routing_table() { let mut router = Router::new( TestStore { routes: HashMap::new(), @@ -261,12 +251,12 @@ mod tests { } .build(), }) - .wait(); + .await; assert!(result.is_err()); } - #[test] - fn no_route() { + #[tokio::test] + async fn no_route() { let mut router = Router::new( TestStore { routes: HashMap::from_iter( @@ -294,12 +284,12 @@ mod tests { } .build(), }) - .wait(); + .await; assert!(result.is_err()); } - #[test] - fn finds_exact_route() { + #[tokio::test] + async fn finds_exact_route() { let mut router = Router::new( TestStore { routes: HashMap::from_iter( @@ -327,12 +317,12 @@ mod tests { } .build(), }) - .wait(); + .await; assert!(result.is_ok()); } - #[test] - fn catch_all_route() { + #[tokio::test] + async fn catch_all_route() { let mut router = Router::new( TestStore { routes: HashMap::from_iter(vec![(String::new(), Uuid::new_v4())].into_iter()), @@ -358,12 +348,12 @@ mod tests { } .build(), }) - .wait(); + .await; assert!(result.is_ok()); } - #[test] - fn finds_matching_prefix() { + #[tokio::test] + async fn finds_matching_prefix() { let mut router = Router::new( TestStore { routes: HashMap::from_iter( @@ -391,12 +381,12 @@ mod tests { } .build(), }) - .wait(); + .await; assert!(result.is_ok()); } - #[test] - fn finds_longest_matching_prefix() { + #[tokio::test] + async fn finds_longest_matching_prefix() { let id0 = Uuid::from_slice(&[0; 16]).unwrap(); let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); @@ -436,7 +426,7 @@ mod tests { } .build(), }) - .wait(); + .await; assert!(result.is_ok()); assert_eq!(to.lock().take().unwrap().0, id2); } diff --git a/crates/interledger-service-util/Cargo.toml b/crates/interledger-service-util/Cargo.toml index 8af5d60fe..97cfdc53e 100644 --- a/crates/interledger-service-util/Cargo.toml +++ b/crates/interledger-service-util/Cargo.toml @@ -11,19 +11,19 @@ repository = "https://github.com/interledger-rs/interledger-rs" bytes = { version = "0.4.12", default-features = false } byteorder = { version = "1.3.2", default-features = false } chrono = { version = "0.4.9", default-features = false, features = ["clock"] } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3.1", default-features = false } hex = { version = "0.4.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } interledger-settlement = { path = "../interledger-settlement", version = "^0.3.0", default-features = false, features = ["settlement_api"] } lazy_static = { version = "1.4.0", default-features = false } log = { version = "0.4.8", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10.0", default-features = false, features = ["default-tls"] } ring = { version = "0.16.9", default-features = false } secrecy = { version = "0.5.1", default-features = false, features = ["alloc", "serde"] } serde = { version = "1.0.101", default-features = false, features = ["derive"]} -tokio = { version = "0.1.22", default-features = false } -tokio-executor = { version = "0.1.8", default-features = false } +tokio = { version = "0.2.6", default-features = false, features = ["macros", "time"] } +async-trait = "0.1.22" [dev-dependencies] -uuid = { version = "0.8.1", default-features = false} \ No newline at end of file +uuid = { version = "0.8.1", default-features = false} diff --git a/crates/interledger-service-util/src/balance_service.rs b/crates/interledger-service-util/src/balance_service.rs index 93103a2c0..399776f9d 100644 --- a/crates/interledger-service-util/src/balance_service.rs +++ b/crates/interledger-service-util/src/balance_service.rs @@ -1,5 +1,6 @@ -use futures::Future; -use interledger_packet::{ErrorCode, Fulfill, Reject, RejectBuilder}; +use async_trait::async_trait; +use futures::TryFutureExt; +use interledger_packet::{ErrorCode, RejectBuilder}; use interledger_service::*; use interledger_settlement::{ api::SettlementClient, @@ -7,32 +8,31 @@ use interledger_settlement::{ }; use log::{debug, error}; use std::marker::PhantomData; -use tokio_executor::spawn; +#[async_trait] pub trait BalanceStore: AccountStore { /// Fetch the current balance for the given account. - fn get_balance(&self, account: Self::Account) - -> Box + Send>; + async fn get_balance(&self, account: Self::Account) -> Result; - fn update_balances_for_prepare( + async fn update_balances_for_prepare( &self, from_account: Self::Account, incoming_amount: u64, - ) -> Box + Send>; + ) -> Result<(), ()>; /// Increases the account's balance, and returns the updated balance /// along with the amount which should be settled - fn update_balances_for_fulfill( + async fn update_balances_for_fulfill( &self, to_account: Self::Account, outgoing_amount: u64, - ) -> Box + Send>; + ) -> Result<(i64, u64), ()>; - fn update_balances_for_reject( + async fn update_balances_for_reject( &self, from_account: Self::Account, incoming_amount: u64, - ) -> Box + Send>; + ) -> Result<(), ()>; } /// # Balance Service @@ -64,6 +64,7 @@ where } } +#[async_trait] impl OutgoingService for BalanceService where S: AddressStore @@ -74,10 +75,8 @@ where + Sync + 'static, O: OutgoingService + Send + Clone + 'static, - A: SettlementAccount + 'static, + A: SettlementAccount + Send + Sync + 'static, { - type Future = BoxedIlpFuture; - /// On send message: /// 1. Calls `store.update_balances_for_prepare` with the prepare. /// If it fails, it replies with a reject @@ -86,21 +85,17 @@ where /// INDEPENDENTLY of if the call suceeds or fails. This makes a `sendMoney` call if the fulfill puts the account's balance over the `settle_threshold` /// - if it returns an reject calls `store.update_balances_for_reject` and replies with the fulfill /// INDEPENDENTLY of if the call suceeds or fails - fn send_request( - &mut self, - request: OutgoingRequest, - ) -> Box + Send> { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { // Don't bother touching the store for zero-amount packets. // Note that it is possible for the original_amount to be >0 while the // prepare.amount is 0, because the original amount could be rounded down // to 0 when exchange rate and scale change are applied. if request.prepare.amount() == 0 && request.original_amount == 0 { - return Box::new(self.next.send_request(request)); + return self.next.send_request(request).await; } let mut next = self.next.clone(); let store = self.store.clone(); - let store_clone = store.clone(); let from = request.from.clone(); let from_clone = from.clone(); let from_id = from.id(); @@ -123,79 +118,92 @@ where // _eventually_ be completed. Because of this settlement_engine guarantee, the Connector can // operate as-if the settlement engine has completed. Finally, if the request to the settlement-engine // fails, this amount will be re-added back to balance. - Box::new( - self.store - .update_balances_for_prepare( - from.clone(), - incoming_amount, - ) - .map_err(move |_| { - debug!("Rejecting packet because it would exceed a balance limit"); - RejectBuilder { - code: ErrorCode::T04_INSUFFICIENT_LIQUIDITY, - message: &[], - triggered_by: Some(&ilp_address), - data: &[], - } - .build() - }) - .and_then(move |_| { - next.send_request(request) - .and_then(move |fulfill| { - // We will spawn a task to update the balances in the database - // so that we DO NOT wait for the database before sending the - // Fulfill packet back to our peer. Due to how the flow of ILP - // packets work, once we get the Fulfill back from the next node - // we need to propagate it backwards ASAP. If we do not give the - // previous node the fulfillment in time, they won't pay us back - // for the packet we forwarded. Note this means that we will - // relay the fulfillment _even if saving to the DB fails._ - let fulfill_balance_update = store.update_balances_for_fulfill( - to.clone(), - outgoing_amount, - ) - .map_err(move |_| error!("Error applying balance changes for fulfill from account: {} to account: {}. Incoming amount was: {}, outgoing amount was: {}", from_id, to_id, incoming_amount, outgoing_amount)) - .and_then(move |(balance, amount_to_settle)| { - debug!("Account balance after fulfill: {}. Amount that needs to be settled: {}", balance, amount_to_settle); - if amount_to_settle > 0 && to_has_engine { - // Note that if this program crashes after changing the balance (in the PROCESS_FULFILL script) - // and the send_settlement fails but the program isn't alive to hear that, the balance will be incorrect. - // No other instance will know that it was trying to send an outgoing settlement. We could - // make this more robust by saving something to the DB about the outgoing settlement when we change the balance - // but then we would also need to prevent a situation where every connector instance is polling the - // settlement engine for the status of each - // outgoing settlement and putting unnecessary - // load on the settlement engine. - spawn(settlement_client - .send_settlement(to, amount_to_settle) - .or_else(move |_| store.refund_settlement(to_id, amount_to_settle))); - } - Ok(()) - }); + self.store + .update_balances_for_prepare(from.clone(), incoming_amount) + .map_err(move |_| { + debug!("Rejecting packet because it would exceed a balance limit"); + RejectBuilder { + code: ErrorCode::T04_INSUFFICIENT_LIQUIDITY, + message: &[], + triggered_by: Some(&ilp_address), + data: &[], + } + .build() + }) + .await?; - spawn(fulfill_balance_update); + match next.send_request(request).await { + Ok(fulfill) => { + // We will spawn a task to update the balances in the database + // so that we DO NOT wait for the database before sending the + // Fulfill packet back to our peer. Due to how the flow of ILP + // packets work, once we get the Fulfill back from the next node + // we need to propagate it backwards ASAP. If we do not give the + // previous node the fulfillment in time, they won't pay us back + // for the packet we forwarded. Note this means that we will + // relay the fulfillment _even if saving to the DB fails._ + tokio::spawn(async move { + let (balance, amount_to_settle) = match store + .update_balances_for_fulfill(to.clone(), outgoing_amount) + .await + { + Ok(r) => r, + Err(_) => { + error!("Error applying balance changes for fulfill from account: {} to account: {}. Incoming amount was: {}, outgoing amount was: {}", from_id, to_id, incoming_amount, outgoing_amount); + return Err(()); + } + }; + debug!( + "Account balance after fulfill: {}. Amount that needs to be settled: {}", + balance, amount_to_settle + ); + if amount_to_settle > 0 && to_has_engine { + // Note that if this program crashes after changing the balance (in the PROCESS_FULFILL script) + // and the send_settlement fails but the program isn't alive to hear that, the balance will be incorrect. + // No other instance will know that it was trying to send an outgoing settlement. We could + // make this more robust by saving something to the DB about the outgoing settlement when we change the balance + // but then we would also need to prevent a situation where every connector instance is polling the + // settlement engine for the status of each + // outgoing settlement and putting unnecessary + // load on the settlement engine. + tokio::spawn(async move { + if settlement_client + .send_settlement(to, amount_to_settle) + .await + .is_err() + { + store.refund_settlement(to_id, amount_to_settle).await?; + } + Ok::<(), ()>(()) + }); + } + Ok(()) + }); - Ok(fulfill) - }) - .or_else(move |reject| { - // Similar to the logic for handling the Fulfill packet above, we - // spawn a task to update the balance for the Reject in parallel - // rather than waiting for the database to update before relaying - // the packet back. In this case, the only substantive difference - // would come from if the DB operation fails or takes too long. - // The packet is already rejected so it's more useful for the sender - // to get the error message from the original Reject packet rather - // than a less specific one saying that this node had an "internal - // error" caused by a database issue. - let reject_balance_update = store_clone.update_balances_for_reject( - from_clone.clone(), - incoming_amount, - ).map_err(move |_| error!("Error rolling back balance change for accounts: {} and {}. Incoming amount was: {}, outgoing amount was: {}", from_clone.id(), to_clone.id(), incoming_amount, outgoing_amount)); - spawn(reject_balance_update); + Ok(fulfill) + } + Err(reject) => { + // Similar to the logic for handling the Fulfill packet above, we + // spawn a task to update the balance for the Reject in parallel + // rather than waiting for the database to update before relaying + // the packet back. In this case, the only substantive difference + // would come from if the DB operation fails or takes too long. + // The packet is already rejected so it's more useful for the sender + // to get the error message from the original Reject packet rather + // than a less specific one saying that this node had an "internal + // error" caused by a database issue. + tokio::spawn({ + let store_clone = self.store.clone(); + async move { + store_clone.update_balances_for_reject( + from_clone.clone(), + incoming_amount, + ).map_err(move |_| error!("Error rolling back balance change for accounts: {} and {}. Incoming amount was: {}, outgoing amount was: {}", from_clone.id(), to_clone.id(), incoming_amount, outgoing_amount)).await + } + }); - Err(reject) - }) - }), - ) + Err(reject) + } + } } } diff --git a/crates/interledger-service-util/src/echo_service.rs b/crates/interledger-service-util/src/echo_service.rs index 1cd227f17..9fc69ae6e 100644 --- a/crates/interledger-service-util/src/echo_service.rs +++ b/crates/interledger-service-util/src/echo_service.rs @@ -1,7 +1,7 @@ +use async_trait::async_trait; use byteorder::ReadBytesExt; use bytes::{BufMut, BytesMut}; use core::borrow::Borrow; -use futures::future::err; use interledger_packet::{ oer::BufOerExt, Address, ErrorCode, Prepare, PrepareBuilder, RejectBuilder, }; @@ -49,20 +49,19 @@ where } } +#[async_trait] impl IncomingService for EchoService where - I: IncomingService, - S: AddressStore, - A: Account, + I: IncomingService + Send, + S: AddressStore + Send, + A: Account + Send, { - type Future = BoxedIlpFuture; - - fn handle_request(&mut self, mut request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, mut request: IncomingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); let should_echo = request.prepare.destination() == ilp_address && request.prepare.data().starts_with(ECHO_PREFIX.as_bytes()); if !should_echo { - return Box::new(self.next.handle_request(request)); + return self.next.handle_request(request).await; } debug!("Responding to Echo protocol request: {:?}", request); @@ -78,23 +77,23 @@ where Ok(value) => value, Err(error) => { eprintln!("Could not read packet type: {:?}", error); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F01_INVALID_PACKET, message: b"Could not read echo packet type.", triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } }; if echo_packet_type == EchoPacketType::Response as u8 { // if the echo packet type is Response, just pass it to the next service // so that the initiator could handle this packet - return Box::new(self.next.handle_request(request)); + return self.next.handle_request(request).await; } if echo_packet_type != EchoPacketType::Request as u8 { eprintln!("The packet type is not acceptable: {}", echo_packet_type); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F01_INVALID_PACKET, message: format!( "The echo packet type: {} is not acceptable.", @@ -104,7 +103,7 @@ where triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } // check source address @@ -116,24 +115,24 @@ where "Could not parse source address from echo packet: {:?}", error ); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F01_INVALID_PACKET, message: b"Could not parse source address from Echo packet", triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } }, Err(error) => { eprintln!("Could not read source address: {:?}", error); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F01_INVALID_PACKET, message: b"Could not read source address.", triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } }; @@ -150,7 +149,7 @@ where } .build(); - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await } } @@ -214,7 +213,6 @@ impl<'a> EchoResponseBuilder<'a> { #[cfg(test)] mod echo_tests { use super::*; - use futures::future::Future; use interledger_packet::{FulfillBuilder, PrepareBuilder}; use interledger_service::incoming_service_fn; use lazy_static::lazy_static; @@ -232,16 +230,14 @@ mod echo_tests { #[derive(Clone)] struct TestStore(Address); + #[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } @@ -279,8 +275,8 @@ mod echo_tests { /// If the destination of the packet is not destined to the node's address, /// the node should not echo the packet. - #[test] - fn test_echo_packet_not_destined() { + #[tokio::test] + async fn test_echo_packet_not_destined() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -319,14 +315,14 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_ok()); } /// Even if the destination of the packet is the node's address, /// packets that don't have a correct echo prefix will not be handled as echo packets. - #[test] - fn test_echo_packet_without_echo_prefix() { + #[tokio::test] + async fn test_echo_packet_without_echo_prefix() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -365,14 +361,14 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_ok()); } /// If the destination of the packet is the node's address and the echo packet type is /// request, the service will echo the packet modifying destination to the `source_address`. - #[test] - fn test_echo_packet() { + #[tokio::test] + async fn test_echo_packet() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -411,13 +407,13 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_ok()); } /// If echo packet type is neither `1` nor `2`, the packet is considered to be malformed. - #[test] - fn test_invalid_echo_packet_type() { + #[tokio::test] + async fn test_invalid_echo_packet_type() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -452,14 +448,14 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_err()); } /// Even if the destination of the packet is the node's address and the data starts with /// echo prefix correctly, `source_address` may be broken. This is the case. - #[test] - fn test_invalid_source_address() { + #[tokio::test] + async fn test_invalid_source_address() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -494,7 +490,7 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_err()); } diff --git a/crates/interledger-service-util/src/exchange_rate_providers/coincap.rs b/crates/interledger-service-util/src/exchange_rate_providers/coincap.rs index 1dd0bf3dd..26f8e7fbe 100644 --- a/crates/interledger-service-util/src/exchange_rate_providers/coincap.rs +++ b/crates/interledger-service-util/src/exchange_rate_providers/coincap.rs @@ -1,7 +1,7 @@ -use futures::Future; +use futures::TryFutureExt; use lazy_static::lazy_static; use log::{error, warn}; -use reqwest::{r#async::Client, Url}; +use reqwest::{Client, Url}; use serde::Deserialize; use std::{collections::HashMap, str::FromStr}; @@ -25,50 +25,52 @@ struct RateResponse { data: Vec, } -pub fn query_coincap(client: &Client) -> impl Future, Error = ()> { - query_coincap_endpoint(client, COINCAP_ASSETS_URL.clone()) - .join(query_coincap_endpoint(client, COINCAP_RATES_URL.clone())) - .and_then(|(assets, rates)| { - let all_rates: HashMap = assets - .data - .into_iter() - .chain(rates.data.into_iter()) - .filter_map(|record| match f64::from_str(record.rate_usd.as_str()) { - Ok(rate) => Some((record.symbol.to_uppercase(), rate)), - Err(err) => { - warn!( - "Unable to parse {} rate as an f64: {} {:?}", - record.symbol, record.rate_usd, err - ); - None - } - }) - .collect(); - Ok(all_rates) +pub async fn query_coincap(client: &Client) -> Result, ()> { + println!("querying coincap"); + let (assets, rates) = futures::future::join( + query_coincap_endpoint(client, COINCAP_ASSETS_URL.clone()), + query_coincap_endpoint(client, COINCAP_RATES_URL.clone()), + ) + .await; + println!("queryied coincap {:?} {:?}", assets, rates); + + let all_rates: HashMap = assets? + .data + .into_iter() + .chain(rates?.data.into_iter()) + .filter_map(|record| match f64::from_str(record.rate_usd.as_str()) { + Ok(rate) => Some((record.symbol.to_uppercase(), rate)), + Err(err) => { + warn!( + "Unable to parse {} rate as an f64: {} {:?}", + record.symbol, record.rate_usd, err + ); + None + } }) + .collect(); + Ok(all_rates) } -fn query_coincap_endpoint( - client: &Client, - url: Url, -) -> impl Future { - client +async fn query_coincap_endpoint(client: &Client, url: Url) -> Result { + let res = client .get(url) .send() .map_err(|err| { error!("Error fetching exchange rates from CoinCap: {:?}", err); }) - .and_then(|res| { - res.error_for_status().map_err(|err| { - error!("HTTP error getting exchange rates from CoinCap: {:?}", err); - }) - }) - .and_then(|mut res| { - res.json().map_err(|err| { - error!( - "Error getting exchange rate response body from CoinCap, incorrect type: {:?}", - err - ); - }) + .await?; + + let res = res.error_for_status().map_err(|err| { + error!("HTTP error getting exchange rates from CoinCap: {:?}", err); + })?; + + res.json() + .map_err(|err| { + error!( + "Error getting exchange rate response body from CoinCap, incorrect type: {:?}", + err + ); }) + .await } diff --git a/crates/interledger-service-util/src/exchange_rate_providers/cryptocompare.rs b/crates/interledger-service-util/src/exchange_rate_providers/cryptocompare.rs index 2a418b3af..90f78f939 100644 --- a/crates/interledger-service-util/src/exchange_rate_providers/cryptocompare.rs +++ b/crates/interledger-service-util/src/exchange_rate_providers/cryptocompare.rs @@ -1,7 +1,7 @@ -use futures::Future; +use futures::TryFutureExt; use lazy_static::lazy_static; use log::error; -use reqwest::{r#async::Client, Url}; +use reqwest::{Client, Url}; use secrecy::{ExposeSecret, SecretString}; use serde::Deserialize; use std::{ @@ -47,44 +47,52 @@ struct Response { data: Vec, } -pub fn query_cryptocompare( +pub async fn query_cryptocompare( client: &Client, api_key: &SecretString, -) -> impl Future, Error = ()> { - client +) -> Result, ()> { + // ref: https://github.com/rust-lang/rust/pull/64856 + let header = format!("Apikey {}", api_key.expose_secret()).to_string(); + let res = client .get(CRYPTOCOMPARE_URL.clone()) // TODO don't copy the api key on every request - .header( - "Authorization", - format!("Apikey {}", api_key.expose_secret()).as_str(), - ) + .header("Authorization", header) .send() .map_err(|err| { - error!("Error fetching exchange rates from CryptoCompare: {:?}", err); + error!( + "Error fetching exchange rates from CryptoCompare: {:?}", + err + ); }) - .and_then(|res| { - res.error_for_status().map_err(|err| { - error!("HTTP error getting exchange rates from CryptoCompare: {:?}", err); - }) - }) - .and_then(|mut res| { - res.json().map_err(|err| { - error!( - "Error getting exchange rate response body from CryptoCompare, incorrect type: {:?}", - err - ); - }) + .await?; + + let res = res.error_for_status().map_err(|err| { + error!( + "HTTP error getting exchange rates from CryptoCompare: {:?}", + err + ); + })?; + + let res: Response = res + .json() + .map_err(|err| { + error!( + "Error getting exchange rate response body from CryptoCompare, incorrect type: {:?}", + err + ); }) - .and_then(|res: Response| { - let rates = res - .data - .into_iter() - .filter_map(|asset| if let Some(raw) = asset.raw { - Some((asset.coin_info.name.to_uppercase(), raw.usd.price)) - } else { - None - }) - .chain(once(("USD".to_string(), 1.0))); - Ok(HashMap::from_iter(rates)) + .await?; + + let rates = res + .data + .into_iter() + .filter_map(|asset| { + if let Some(raw) = asset.raw { + Some((asset.coin_info.name.to_uppercase(), raw.usd.price)) + } else { + None + } }) + .chain(once(("USD".to_string(), 1.0))); + Ok(HashMap::from_iter(rates)) } diff --git a/crates/interledger-service-util/src/exchange_rates_service.rs b/crates/interledger-service-util/src/exchange_rates_service.rs index 4e5bb83a3..73cec88ae 100644 --- a/crates/interledger-service-util/src/exchange_rates_service.rs +++ b/crates/interledger-service-util/src/exchange_rates_service.rs @@ -1,13 +1,11 @@ use super::exchange_rate_providers::*; -use futures::{ - future::{err, Either}, - Future, Stream, -}; -use interledger_packet::{ErrorCode, Fulfill, Reject, RejectBuilder}; +use async_trait::async_trait; +use futures::TryFutureExt; +use interledger_packet::{ErrorCode, RejectBuilder}; use interledger_service::*; use interledger_settlement::core::types::{Convert, ConvertDetails}; use log::{debug, error, trace, warn}; -use reqwest::r#async::Client; +use reqwest::Client; use secrecy::SecretString; use serde::Deserialize; use std::{ @@ -17,9 +15,8 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::{Duration, Instant}, + time::Duration, }; -use tokio::{executor::spawn, timer::Interval}; // TODO should this whole file be moved to its own crate? @@ -67,25 +64,21 @@ where } } +#[async_trait] impl OutgoingService for ExchangeRateService where // TODO can we make these non-'static? S: AddressStore + ExchangeRateStore + Clone + Send + Sync + 'static, - O: OutgoingService + Send + Clone + 'static, - A: Account + Sync + 'static, + O: OutgoingService + Send + Sync + Clone + 'static, + A: Account + Send + Sync + 'static, { - type Future = BoxedIlpFuture; - /// On send request: /// 1. If the prepare packet's amount is 0, it just forwards /// 1. Retrieves the exchange rate from the store (the store independently is responsible for polling the rates) /// - return reject if the call to the store fails /// 1. Calculates the exchange rate AND scales it up/down depending on how many decimals each asset requires /// 1. Updates the amount in the prepare packet and forwards it - fn send_request( - &mut self, - mut request: OutgoingRequest, - ) -> Box + Send> { + async fn send_request(&mut self, mut request: OutgoingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); if request.prepare.amount() > 0 { let rate: f64 = if request.from.asset_code() == request.to.asset_code() { @@ -105,7 +98,7 @@ where request.from.asset_code(), request.to.asset_code() ); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::T00_INTERNAL_ERROR, message: format!( "No exchange rate available from asset: {} to: {}", @@ -116,7 +109,7 @@ where triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); }; // Apply spread @@ -165,13 +158,13 @@ where ) }; - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code, message: message.as_bytes(), triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } request.prepare.set_amount(outgoing_amount as u64); trace!("Converted incoming amount of: {} {} (scale {}) from account {} to outgoing amount of: {} {} (scale {}) for account {}", @@ -183,7 +176,7 @@ where // returns an error. Happens due to float // multiplication overflow . // (float overflow in Rust produces +inf) - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F08_AMOUNT_TOO_LARGE, message: format!( "Could not convert exchange rate from {}:{} to: {}:{}. Got incoming amount: {}", @@ -197,12 +190,12 @@ where triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } } } - Box::new(self.next.send_request(request)) + self.next.send_request(request).await } } @@ -230,6 +223,7 @@ pub enum ExchangeRateProvider { } /// Poll exchange rate providers for the current exchange rates +#[derive(Clone)] pub struct ExchangeRateFetcher { provider: ExchangeRateProvider, consecutive_failed_polls: Arc, @@ -256,47 +250,40 @@ where } } - pub fn fetch_on_interval(self, interval: Duration) -> impl Future { + // TODO: This compiles on nightly but fails on 1.39? + pub fn spawn_interval(self, interval: Duration) { debug!( "Starting interval to poll exchange rate provider: {:?} for rates", self.provider ); - Interval::new(Instant::now(), interval) - .map_err(|err| { - error!( - "Interval error, no longer fetching exchange rates: {:?}", - err - ); - }) - .for_each(move |_| { - self.update_rates().then(|_| { - // Ignore errors so that they don't cause the Interval to stop - Ok(()) - }) - }) - } - - pub fn spawn_interval(self, interval: Duration) { - spawn(self.fetch_on_interval(interval)); + let interval = async move { + let mut interval = tokio::time::interval(interval); + loop { + interval.tick().await; + // Ignore errors so that they don't cause the Interval to stop + let _ = self.update_rates().await; + } + }; + tokio::spawn(interval); } - fn fetch_rates(&self) -> impl Future, Error = ()> { + async fn fetch_rates(&self) -> Result, ()> { match self.provider { ExchangeRateProvider::CryptoCompare(ref api_key) => { - Either::A(query_cryptocompare(&self.client, api_key)) + query_cryptocompare(&self.client, api_key).await } - ExchangeRateProvider::CoinCap => Either::B(query_coincap(&self.client)), + ExchangeRateProvider::CoinCap => query_coincap(&self.client).await, } } - fn update_rates(&self) -> impl Future { + async fn update_rates(&self) -> Result<(), ()> { let consecutive_failed_polls = self.consecutive_failed_polls.clone(); let consecutive_failed_polls_zeroer = consecutive_failed_polls.clone(); let failed_polls_before_invalidation = self.failed_polls_before_invalidation; let store = self.store.clone(); let store_clone = self.store.clone(); let provider = self.provider.clone(); - self.fetch_rates() + let mut rates = self.fetch_rates() .map_err(move |_| { // Note that a race between the read on this line and the check on the line after // is quite unlikely as long as the interval between polls is reasonable. @@ -311,29 +298,27 @@ where panic!("Failed to clear exchange rates cache after exchange rates server became unresponsive"); } } - }) - .and_then(move |mut rates| { - trace!("Fetched exchange rates: {:?}", rates); - let num_rates = rates.len(); - rates.insert("USD".to_string(), 1.0); - if store_clone.set_exchange_rates(rates).is_ok() { - // Reset our invalidation counter - consecutive_failed_polls_zeroer.store(0, Ordering::Relaxed); - debug!("Updated {} exchange rates from {:?}", num_rates, provider); - Ok(()) - } else { - error!("Error setting exchange rates in store"); - Err(()) - } - }) + }).await?; + + trace!("Fetched exchange rates: {:?}", rates); + let num_rates = rates.len(); + rates.insert("USD".to_string(), 1.0); + if store_clone.set_exchange_rates(rates).is_ok() { + // Reset our invalidation counter + consecutive_failed_polls_zeroer.store(0, Ordering::Relaxed); + debug!("Updated {} exchange rates from {:?}", num_rates, provider); + Ok(()) + } else { + error!("Error setting exchange rates in store"); + Err(()) + } } } #[cfg(test)] mod tests { use super::*; - use futures::{future::ok, Future}; - use interledger_packet::{Address, FulfillBuilder, PrepareBuilder}; + use interledger_packet::{Address, Fulfill, FulfillBuilder, PrepareBuilder, Reject}; use interledger_service::{outgoing_service_fn, Account}; use lazy_static::lazy_static; use std::collections::HashMap; @@ -348,21 +333,21 @@ mod tests { pub static ref ALICE: Username = Username::from_str("alice").unwrap(); } - #[test] - fn exchange_rate_ok() { + #[tokio::test] + async fn exchange_rate_ok() { // if `to` is worth $2, and `from` is worth 1, then they receive half // the amount of units - let ret = exchange_rate(200, 1, 1.0, 1, 2.0, 0.0); + let ret = exchange_rate(200, 1, 1.0, 1, 2.0, 0.0).await; assert_eq!(ret.1[0].prepare.amount(), 100); - let ret = exchange_rate(1_000_000, 1, 3.0, 1, 2.0, 0.0); + let ret = exchange_rate(1_000_000, 1, 3.0, 1, 2.0, 0.0).await; assert_eq!(ret.1[0].prepare.amount(), 1_500_000); } - #[test] - fn exchange_conversion_error() { + #[tokio::test] + async fn exchange_conversion_error() { // rejects f64 that does not fit in u64 - let ret = exchange_rate(std::u64::MAX, 1, 2.0, 1, 1.0, 0.0); + let ret = exchange_rate(std::u64::MAX, 1, 2.0, 1, 1.0, 0.0).await; let reject = ret.0.unwrap_err(); assert_eq!(reject.code(), ErrorCode::F08_AMOUNT_TOO_LARGE); assert!(reject @@ -370,7 +355,7 @@ mod tests { .starts_with(b"Could not cast to f64, amount too large")); // rejects f64 which gets rounded down to 0 - let ret = exchange_rate(1, 2, 1.0, 1, 1.0, 0.0); + let ret = exchange_rate(1, 2, 1.0, 1, 1.0, 0.0).await; let reject = ret.0.unwrap_err(); assert_eq!(reject.code(), ErrorCode::R01_INSUFFICIENT_SOURCE_AMOUNT); assert!(reject @@ -378,38 +363,38 @@ mod tests { .starts_with(b"Could not cast to f64, amount too small")); // `Convert` errored - let ret = exchange_rate(std::u64::MAX, 1, std::f64::MAX, 255, 1.0, 0.0); + let ret = exchange_rate(std::u64::MAX, 1, std::f64::MAX, 255, 1.0, 0.0).await; let reject = ret.0.unwrap_err(); assert_eq!(reject.code(), ErrorCode::F08_AMOUNT_TOO_LARGE); assert!(reject.message().starts_with(b"Could not convert")); } - #[test] - fn applies_spread() { - let ret = exchange_rate(100, 1, 1.0, 1, 2.0, 0.01); + #[tokio::test] + async fn applies_spread() { + let ret = exchange_rate(100, 1, 1.0, 1, 2.0, 0.01).await; assert_eq!(ret.1[0].prepare.amount(), 49); // Negative spread is unusual but possible - let ret = exchange_rate(200, 1, 1.0, 1, 2.0, -0.01); + let ret = exchange_rate(200, 1, 1.0, 1, 2.0, -0.01).await; assert_eq!(ret.1[0].prepare.amount(), 101); // Rounds down - let ret = exchange_rate(4, 1, 1.0, 1, 2.0, 0.01); + let ret = exchange_rate(4, 1, 1.0, 1, 2.0, 0.01).await; // this would've been 2, but it becomes 1.99 and gets rounded down to 1 assert_eq!(ret.1[0].prepare.amount(), 1); // Spread >= 1 means the node takes everything - let ret = exchange_rate(10_000_000_000, 1, 1.0, 1, 2.0, 1.0); + let ret = exchange_rate(10_000_000_000, 1, 1.0, 1, 2.0, 1.0).await; assert_eq!(ret.1[0].prepare.amount(), 0); // Need to catch when spread > 1 - let ret = exchange_rate(10_000_000_000, 1, 1.0, 1, 2.0, 2.0); + let ret = exchange_rate(10_000_000_000, 1, 1.0, 1, 2.0, 2.0).await; assert_eq!(ret.1[0].prepare.amount(), 0); } // Instantiates an exchange rate service and returns the fulfill/reject // packet and the outgoing request after performing an asset conversion - fn exchange_rate( + async fn exchange_rate( amount: u64, scale1: u8, rate1: f64, @@ -421,11 +406,11 @@ mod tests { let requests_clone = requests.clone(); let outgoing = outgoing_service_fn(move |request| { requests_clone.lock().unwrap().push(request); - Box::new(ok(FulfillBuilder { + Ok(FulfillBuilder { fulfillment: &[0; 32], data: b"hello!", } - .build())) + .build()) }); let mut service = test_service(rate1, rate2, spread, outgoing); let result = service @@ -442,7 +427,7 @@ mod tests { } .build(), }) - .wait(); + .await; let reqs = requests.lock().unwrap(); (result, reqs.clone()) @@ -464,16 +449,14 @@ mod tests { } } + #[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } diff --git a/crates/interledger-service-util/src/expiry_shortener_service.rs b/crates/interledger-service-util/src/expiry_shortener_service.rs index db5658447..d46d98572 100644 --- a/crates/interledger-service-util/src/expiry_shortener_service.rs +++ b/crates/interledger-service-util/src/expiry_shortener_service.rs @@ -1,5 +1,6 @@ +use async_trait::async_trait; use chrono::{DateTime, Duration, Utc}; -use interledger_service::{Account, OutgoingRequest, OutgoingService}; +use interledger_service::{Account, IlpResult, OutgoingRequest, OutgoingService}; use log::trace; pub const DEFAULT_ROUND_TRIP_TIME: u32 = 500; @@ -39,19 +40,18 @@ impl ExpiryShortenerService { } } +#[async_trait] impl OutgoingService for ExpiryShortenerService where - O: OutgoingService, - A: RoundTripTimeAccount, + O: OutgoingService + Send + Sync + 'static, + A: RoundTripTimeAccount + Send + Sync + 'static, { - type Future = O::Future; - /// On send request: /// 1. Get the sender and receiver's roundtrip time (default 1000ms) /// 2. Reduce the packet's expiry by that amount /// 3. Ensure that the packet expiry does not exceed the maximum expiry duration /// 4. Forward the request - fn send_request(&mut self, mut request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, mut request: OutgoingRequest) -> IlpResult { let time_to_subtract = i64::from(request.from.round_trip_time() + request.to.round_trip_time()); let new_expiry = DateTime::::from(request.prepare.expires_at()) @@ -70,14 +70,13 @@ where }; request.prepare.set_expires_at(new_expiry.into()); - self.next.send_request(request) + self.next.send_request(request).await } } #[cfg(test)] mod tests { use super::*; - use futures::Future; use interledger_packet::{Address, ErrorCode, FulfillBuilder, PrepareBuilder, RejectBuilder}; use interledger_service::{outgoing_service_fn, Username}; use std::str::FromStr; @@ -121,8 +120,8 @@ mod tests { } } - #[test] - fn shortens_expiry_by_round_trip_time() { + #[tokio::test] + async fn shortens_expiry_by_round_trip_time() { let original_expiry = Utc::now() + Duration::milliseconds(30000); let mut service = ExpiryShortenerService::new(outgoing_service_fn(move |request| { if DateTime::::from(request.prepare.expires_at()) @@ -157,12 +156,12 @@ mod tests { .build(), original_amount: 10, }) - .wait() + .await .expect("Should have shortened expiry"); } - #[test] - fn reduces_expiry_to_max_duration() { + #[tokio::test] + async fn reduces_expiry_to_max_duration() { let mut service = ExpiryShortenerService::new(outgoing_service_fn(move |request| { if DateTime::::from(request.prepare.expires_at()) - Utc::now() <= Duration::milliseconds(30000) @@ -196,7 +195,7 @@ mod tests { .build(), original_amount: 10, }) - .wait() + .await .expect("Should have shortened expiry"); } } diff --git a/crates/interledger-service-util/src/max_packet_amount_service.rs b/crates/interledger-service-util/src/max_packet_amount_service.rs index 3dc0aeac0..e51054fc5 100644 --- a/crates/interledger-service-util/src/max_packet_amount_service.rs +++ b/crates/interledger-service-util/src/max_packet_amount_service.rs @@ -1,4 +1,4 @@ -use futures::future::err; +use async_trait::async_trait; use interledger_packet::{ErrorCode, MaxPacketAmountDetails, RejectBuilder}; use interledger_service::*; use log::debug; @@ -27,21 +27,20 @@ impl MaxPacketAmountService { } } +#[async_trait] impl IncomingService for MaxPacketAmountService where - I: IncomingService, - S: AddressStore, - A: MaxPacketAmountAccount, + I: IncomingService + Send + Sync + 'static, + S: AddressStore + Send + Sync + 'static, + A: MaxPacketAmountAccount + Send + Sync + 'static, { - type Future = BoxedIlpFuture; - /// On receive request: /// 1. if request.prepare.amount <= request.from.max_packet_amount forward the request, else error - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); let max_packet_amount = request.from.max_packet_amount(); if request.prepare.amount() <= max_packet_amount { - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await } else { debug!( "Prepare amount:{} exceeds max_packet_amount: {}", @@ -50,13 +49,13 @@ where ); let details = MaxPacketAmountDetails::new(request.prepare.amount(), max_packet_amount).to_bytes(); - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F08_AMOUNT_TOO_LARGE, message: &[], triggered_by: Some(&ilp_address), data: &details[..], } - .build())) + .build()) } } } diff --git a/crates/interledger-service-util/src/rate_limit_service.rs b/crates/interledger-service-util/src/rate_limit_service.rs index 28b5627f1..d18ed5a12 100644 --- a/crates/interledger-service-util/src/rate_limit_service.rs +++ b/crates/interledger-service-util/src/rate_limit_service.rs @@ -1,11 +1,6 @@ -use futures::{ - future::{err, Either}, - Future, -}; +use async_trait::async_trait; use interledger_packet::{ErrorCode, RejectBuilder}; -use interledger_service::{ - Account, AddressStore, BoxedIlpFuture, IncomingRequest, IncomingService, -}; +use interledger_service::{Account, AddressStore, IlpResult, IncomingRequest, IncomingService}; use log::{error, warn}; use std::marker::PhantomData; @@ -26,19 +21,21 @@ pub enum RateLimitError { StoreError, } +#[async_trait] pub trait RateLimitStore { type Account: RateLimitAccount; - fn apply_rate_limits( + async fn apply_rate_limits( &self, account: Self::Account, prepare_amount: u64, - ) -> Box + Send>; - fn refund_throughput_limit( + ) -> Result<(), RateLimitError>; + + async fn refund_throughput_limit( &self, account: Self::Account, prepare_amount: u64, - ) -> Box + Send>; + ) -> Result<(), ()>; } /// # Rate Limit Service @@ -74,21 +71,20 @@ where } } +#[async_trait] impl IncomingService for RateLimitService where S: AddressStore + RateLimitStore + Clone + Send + Sync + 'static, I: IncomingService + Clone + Send + Sync + 'static, A: RateLimitAccount + Sync + 'static, { - type Future = BoxedIlpFuture; - /// On receiving a request: /// 1. Apply rate limit based on the sender of the request and the amount in the prepare packet in the request /// 1. If no limits were hit forward the request /// - If it succeeds, OK /// - If the request forwarding failed, the client should not be charged towards their throughput limit, so they are refunded, and return a reject /// 1. If the limit was hit, return a reject with the appropriate ErrorCode. - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); let mut next = self.next.clone(); let store = self.store.clone(); @@ -98,43 +94,47 @@ where let has_throughput_limit = account.amount_per_minute_limit().is_some(); // request.from and request.amount are used for apply_rate_limits, can't the previous service // always set the account to have None for both? - Box::new(self.store.apply_rate_limits(request.from.clone(), request.prepare.amount()) - .map_err(move |err| { + match self + .store + .apply_rate_limits(request.from.clone(), request.prepare.amount()) + .await + { + Ok(_) => next.handle_request(request).await, + Err(err) => { let code = match err { RateLimitError::PacketLimitExceeded => { if let Some(limit) = account.packets_per_minute_limit() { warn!("Account {} was rate limited for sending too many packets. Limit is: {} per minute", account.id(), limit); } ErrorCode::T05_RATE_LIMITED - }, + } RateLimitError::ThroughputLimitExceeded => { if let Some(limit) = account.amount_per_minute_limit() { warn!("Account {} was throughput limited for trying to send too much money. Limit is: {} per minute", account.id(), limit); } ErrorCode::T04_INSUFFICIENT_LIQUIDITY - }, + } RateLimitError::StoreError => ErrorCode::T00_INTERNAL_ERROR, }; - RejectBuilder { + + let reject = RejectBuilder { code, triggered_by: Some(&ilp_address), message: &[], data: &[], - }.build() - }) - .and_then(move |_| next.handle_request(request)) - .or_else(move |reject| { + } + .build(); + if has_throughput_limit { - Either::A(store.refund_throughput_limit(account_clone, prepare_amount) - .then(|result| { - if let Err(err) = result { - error!("Error refunding throughput limit: {:?}", err); - } - Err(reject) - })) - } else { - Either::B(err(reject)) + if let Err(err) = store + .refund_throughput_limit(account_clone, prepare_amount) + .await + { + error!("Error refunding throughput limit: {:?}", err); + } } - })) + Err(reject) + } + } } } diff --git a/crates/interledger-service-util/src/validator_service.rs b/crates/interledger-service-util/src/validator_service.rs index d8a04267a..95fa0fced 100644 --- a/crates/interledger-service-util/src/validator_service.rs +++ b/crates/interledger-service-util/src/validator_service.rs @@ -1,12 +1,11 @@ +use async_trait::async_trait; use chrono::{DateTime, Duration, Utc}; -use futures::{future::err, Future}; use hex; use interledger_packet::{ErrorCode, RejectBuilder}; use interledger_service::*; use log::error; use ring::digest::{digest, SHA256}; use std::marker::PhantomData; -use tokio::prelude::FutureExt; /// # Validator Service /// @@ -51,21 +50,20 @@ where } } +#[async_trait] impl IncomingService for ValidatorService where - I: IncomingService, - S: AddressStore, - A: Account, + I: IncomingService + Send + Sync, + S: AddressStore + Send + Sync, + A: Account + Send + Sync, { - type Future = BoxedIlpFuture; - /// On receiving a request: /// 1. If the prepare packet in the request is not expired, forward it, otherwise return a reject - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let expires_at = DateTime::::from(request.prepare.expires_at()); let now = Utc::now(); if expires_at >= now { - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await } else { error!( "Incoming packet expired {}ms ago at {:?} (time now: {:?})", @@ -73,26 +71,24 @@ where expires_at.to_rfc3339(), expires_at.to_rfc3339(), ); - let result = Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::R00_TRANSFER_TIMED_OUT, message: &[], triggered_by: Some(&self.store.get_ilp_address()), data: &[], } - .build())); - Box::new(result) + .build()) } } } +#[async_trait] impl OutgoingService for ValidatorService where - O: OutgoingService, - S: AddressStore, - A: Account, + O: OutgoingService + Send + Sync, + S: AddressStore + Send + Sync, + A: Account + Send + Sync, { - type Future = BoxedIlpFuture; - /// On sending a request: /// 1. If the outgoing packet has expired, return a reject with the appropriate ErrorCode /// 1. Tries to forward the request @@ -101,7 +97,7 @@ where /// - If the forwarding is successful, it should receive a fulfill packet. Depending on if the hash of the fulfillment condition inside the fulfill is a preimage of the condition of the prepare: /// - return the fulfill if it matches /// - otherwise reject - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { let mut condition: [u8; 32] = [0; 32]; condition[..].copy_from_slice(request.prepare.execution_condition()); // why? @@ -109,60 +105,58 @@ where let now = Utc::now(); let time_left = expires_at - now; let ilp_address = self.store.get_ilp_address(); - let ilp_address_clone = ilp_address.clone(); if time_left > Duration::zero() { - Box::new( - self.next - .send_request(request) - .timeout(time_left.to_std().expect("Time left must be positive")) - .map_err(move |err| { - // If the error was caused by the timer, into_inner will return None - if let Some(reject) = err.into_inner() { - reject - } else { - error!( - "Outgoing request timed out after {}ms (expiry was: {})", - time_left.num_milliseconds(), - expires_at, - ); - RejectBuilder { - code: ErrorCode::R00_TRANSFER_TIMED_OUT, - message: &[], - triggered_by: Some(&ilp_address_clone), - data: &[], - } - .build() - } - }) - .and_then(move |fulfill| { - let generated_condition = digest(&SHA256, fulfill.fulfillment()); - if generated_condition.as_ref() == condition { - Ok(fulfill) - } else { - error!("Fulfillment did not match condition. Fulfillment: {}, hash: {}, actual condition: {}", hex::encode(fulfill.fulfillment()), hex::encode(generated_condition), hex::encode(condition)); - Err(RejectBuilder { - code: ErrorCode::F09_INVALID_PEER_RESPONSE, - message: b"Fulfillment did not match condition", - triggered_by: Some(&ilp_address), - data: &[], - } - .build()) - } - }), - ) + let fulfill = self + .next + .send_request(request) + // TODO: Re-enable timeout! + // .timeout(time_left.to_std().expect("Time left must be positive")) + // .map_err(move |err| { + // // If the error was caused by the timer, into_inner will return None + // if let Some(reject) = err.into_inner() { + // reject + // } else { + // error!( + // "Outgoing request timed out after {}ms (expiry was: {})", + // time_left.num_milliseconds(), + // expires_at, + // ); + // RejectBuilder { + // code: ErrorCode::R00_TRANSFER_TIMED_OUT, + // message: &[], + // triggered_by: Some(&ilp_address_clone), + // data: &[], + // } + // .build() + // } + // }) + .await?; + let generated_condition = digest(&SHA256, fulfill.fulfillment()); + if generated_condition.as_ref() == condition { + Ok(fulfill) + } else { + error!("Fulfillment did not match condition. Fulfillment: {}, hash: {}, actual condition: {}", hex::encode(fulfill.fulfillment()), hex::encode(generated_condition), hex::encode(condition)); + Err(RejectBuilder { + code: ErrorCode::F09_INVALID_PEER_RESPONSE, + message: b"Fulfillment did not match condition", + triggered_by: Some(&ilp_address), + data: &[], + } + .build()) + } } else { error!( "Outgoing packet expired {}ms ago", (Duration::zero() - time_left).num_milliseconds(), ); // Already expired - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::R00_TRANSFER_TIMED_OUT, message: &[], triggered_by: Some(&ilp_address), data: &[], } - .build())) + .build()) } } } @@ -212,16 +206,14 @@ impl Account for TestAccount { struct TestStore; #[cfg(test)] +#[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } @@ -241,8 +233,8 @@ mod incoming { time::{Duration, SystemTime}, }; - #[test] - fn lets_through_valid_incoming_packet() { + #[tokio::test] + async fn lets_through_valid_incoming_packet() { let requests = Arc::new(Mutex::new(Vec::new())); let requests_clone = requests.clone(); let mut validator = ValidatorService::incoming( @@ -271,14 +263,14 @@ mod incoming { } .build(), }) - .wait(); + .await; assert_eq!(requests.lock().unwrap().len(), 1); assert!(result.is_ok()); } - #[test] - fn rejects_expired_incoming_packet() { + #[tokio::test] + async fn rejects_expired_incoming_packet() { let requests = Arc::new(Mutex::new(Vec::new())); let requests_clone = requests.clone(); let mut validator = ValidatorService::incoming( @@ -307,7 +299,7 @@ mod incoming { } .build(), }) - .wait(); + .await; assert!(requests.lock().unwrap().is_empty()); assert!(result.is_err()); @@ -328,30 +320,8 @@ mod outgoing { time::{Duration, SystemTime}, }; - #[derive(Clone)] - struct TestStore; - - impl AddressStore for TestStore { - /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { - unimplemented!() - } - - fn clear_ilp_address(&self) -> Box + Send> { - unimplemented!() - } - - /// Get's the store's ilp address from memory - fn get_ilp_address(&self) -> Address { - Address::from_str("example.connector").unwrap() - } - } - - #[test] - fn lets_through_valid_outgoing_response() { + #[tokio::test] + async fn lets_through_valid_outgoing_response() { let requests = Arc::new(Mutex::new(Vec::new())); let requests_clone = requests.clone(); let mut validator = ValidatorService::outgoing( @@ -382,14 +352,14 @@ mod outgoing { } .build(), }) - .wait(); + .await; assert_eq!(requests.lock().unwrap().len(), 1); assert!(result.is_ok()); } - #[test] - fn returns_reject_instead_of_invalid_fulfillment() { + #[tokio::test] + async fn returns_reject_instead_of_invalid_fulfillment() { let requests = Arc::new(Mutex::new(Vec::new())); let requests_clone = requests.clone(); let mut validator = ValidatorService::outgoing( @@ -420,7 +390,7 @@ mod outgoing { } .build(), }) - .wait(); + .await; assert_eq!(requests.lock().unwrap().len(), 1); assert!(result.is_err()); diff --git a/crates/interledger-service/Cargo.toml b/crates/interledger-service/Cargo.toml index 77328b94c..e8ecb4ee7 100644 --- a/crates/interledger-service/Cargo.toml +++ b/crates/interledger-service/Cargo.toml @@ -12,16 +12,17 @@ default = [] trace = ["tracing-futures"] [dependencies] -futures = { version = "0.1.29", default-features = true } +futures = { version = "0.3", default-features = true } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } serde = { version = "1.0.101", default-features = false, features = ["derive"] } base64 = { version = "0.10.1", default-features = false } regex = { version = "1.3.1", default-features = false, features = ["std", "unicode-perl"] } lazy_static = { version = "1.4.0", default-features = false } -tracing-futures = { version = "0.1.1", default-features = true, features = ["tokio", "futures-01"], optional = true } +tracing-futures = { version = "0.2", default-features = true, features = ["tokio", "futures-03"], optional = true } unicase = { version = "2.5.1", default-features = false } unicode-normalization = { version = "0.1.8", default-features = false } uuid = { version = "0.8.1", default-features = false} +async-trait = "0.1.22" [dev-dependencies] serde_json = { version = "1.0.41", default-features = false } diff --git a/crates/interledger-service/src/lib.rs b/crates/interledger-service/src/lib.rs index abc6055c8..a3a108eba 100644 --- a/crates/interledger-service/src/lib.rs +++ b/crates/interledger-service/src/lib.rs @@ -26,10 +26,11 @@ //! //! HttpServerService --> ValidatorService --> StreamReceiverService -use futures::{Future, IntoFuture}; +use async_trait::async_trait; use interledger_packet::{Address, Fulfill, Prepare, Reject}; use std::{ fmt::{self, Debug}, + future::Future, marker::PhantomData, sync::Arc, }; @@ -37,10 +38,11 @@ use uuid::Uuid; mod username; pub use username::Username; +// TODO: Temporarily disable until we figure out what's going on with tracing and async_trait #[cfg(feature = "trace")] mod trace; -#[cfg(feature = "trace")] -pub use trace::*; + +pub type IlpResult = Result; /// The base trait that Account types from other Services extend. /// This trait only assumes that the account has an ID that can be compared with others. @@ -59,7 +61,9 @@ pub trait Account: Clone + Send + Sized + Debug { /// A struct representing an incoming ILP Prepare packet or an outgoing one before the next hop is set. #[derive(Clone)] pub struct IncomingRequest { + /// The account which the request originates from pub from: A, + /// The prepare packet attached to the request pub prepare: Prepare, } @@ -80,9 +84,13 @@ where /// A struct representing an ILP Prepare packet with the incoming and outgoing accounts set. #[derive(Clone)] pub struct OutgoingRequest { + /// The account which the request originates from pub from: A, + /// The account which the packet is being sent to pub to: A, + /// The amount attached to the packet by its original sender pub original_amount: u64, + /// The prepare packet attached to the request pub prepare: Prepare, } @@ -118,10 +126,14 @@ where } /// Core service trait for handling IncomingRequests that asynchronously returns an ILP Fulfill or Reject packet. +#[async_trait] pub trait IncomingService { - type Future: Future + Send + 'static; + // type Future: Unpin + Future + Send + 'static; - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future; + /// Receives an Incoming request, and modifies it in place and passes it + /// to the next service. Alternatively, if the packet was intended for the service, + /// it returns an ILP Fulfill or Reject packet. + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult; /// Wrap the given service such that the provided function will /// be called to handle each request. That function can @@ -130,7 +142,7 @@ pub trait IncomingService { fn wrap(self, f: F) -> WrappedService where F: Fn(IncomingRequest, Self) -> R, - R: Future + Send + 'static, + R: Future + Send + 'static, Self: Clone + Sized, { WrappedService::wrap_incoming(self, f) @@ -138,10 +150,12 @@ pub trait IncomingService { } /// Core service trait for sending OutgoingRequests that asynchronously returns an ILP Fulfill or Reject packet. +#[async_trait] pub trait OutgoingService { - type Future: Future + Send + 'static; - - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future; + /// Receives an Outgoing request, and modifies it in place and passes it + /// to the next service. Alternatively, if the packet was intended for the service, + /// it returns an ILP Fulfill or Reject packet. + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult; /// Wrap the given service such that the provided function will /// be called to handle each request. That function can @@ -150,37 +164,39 @@ pub trait OutgoingService { fn wrap(self, f: F) -> WrappedService where F: Fn(OutgoingRequest, Self) -> R, - R: Future + Send + 'static, + R: Future + Send + 'static, Self: Clone + Sized, { WrappedService::wrap_outgoing(self, f) } } -/// A future that returns an ILP Fulfill or Reject packet. -pub type BoxedIlpFuture = Box + Send + 'static>; - /// The base Store trait that can load a given account based on the ID. +#[async_trait] pub trait AccountStore { + /// The provided account type. Must implement the `Account` trait. type Account: Account; - fn get_accounts( + /// Loads the accounts which correspond to the provided account ids + async fn get_accounts( &self, + // The account ids (UUID format) of the accounts you are fetching account_ids: Vec, - ) -> Box, Error = ()> + Send>; + ) -> Result, ()>; - fn get_account_id_from_username( + /// Loads the account id which corresponds to the provided username + async fn get_account_id_from_username( &self, + // The username of the account you are fetching username: &Username, - ) -> Box + Send>; + ) -> Result; } /// Create an IncomingService that calls the given handler for each request. -pub fn incoming_service_fn(handler: F) -> ServiceFn +pub fn incoming_service_fn(handler: F) -> ServiceFn where A: Account, - B: IntoFuture, - F: FnMut(IncomingRequest) -> B, + F: FnMut(IncomingRequest) -> IlpResult, { ServiceFn { handler, @@ -189,11 +205,10 @@ where } /// Create an OutgoingService that calls the given handler for each request. -pub fn outgoing_service_fn(handler: F) -> ServiceFn +pub fn outgoing_service_fn(handler: F) -> ServiceFn where A: Account, - B: IntoFuture, - F: FnMut(OutgoingRequest) -> B, + F: FnMut(OutgoingRequest) -> IlpResult, { ServiceFn { handler, @@ -208,31 +223,25 @@ pub struct ServiceFn { account_type: PhantomData, } -impl IncomingService for ServiceFn +#[async_trait] +impl IncomingService for ServiceFn where A: Account, - B: IntoFuture, - ::Future: std::marker::Send + 'static, - F: FnMut(IncomingRequest) -> B, + F: FnMut(IncomingRequest) -> IlpResult + Send, { - type Future = BoxedIlpFuture; - - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { - Box::new((self.handler)(request).into_future()) + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { + (self.handler)(request) } } -impl OutgoingService for ServiceFn +#[async_trait] +impl OutgoingService for ServiceFn where A: Account, - B: IntoFuture, - ::Future: std::marker::Send + 'static, - F: FnMut(OutgoingRequest) -> B, + F: FnMut(OutgoingRequest) -> IlpResult + Send, { - type Future = BoxedIlpFuture; - - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { - Box::new((self.handler)(request).into_future()) + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { + (self.handler)(request) } } @@ -254,7 +263,7 @@ where F: Fn(IncomingRequest, IO) -> R, IO: IncomingService + Clone, A: Account, - R: Future + Send + 'static, + R: Future + Send + 'static, { /// Wrap the given service such that the provided function will /// be called to handle each request. That function can @@ -269,17 +278,16 @@ where } } +#[async_trait] impl IncomingService for WrappedService where - F: Fn(IncomingRequest, IO) -> R, - IO: IncomingService + Clone, + F: Fn(IncomingRequest, IO) -> R + Send + Sync, + IO: IncomingService + Send + Sync + Clone, A: Account, - R: Future + Send + 'static, + R: Future + Send + 'static, { - type Future = R; - - fn handle_request(&mut self, request: IncomingRequest) -> R { - (self.f)(request, (*self.inner).clone()) + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { + (self.f)(request, (*self.inner).clone()).await } } @@ -288,7 +296,7 @@ where F: Fn(OutgoingRequest, IO) -> R, IO: OutgoingService + Clone, A: Account, - R: Future + Send + 'static, + R: Future + Send + 'static, { /// Wrap the given service such that the provided function will /// be called to handle each request. That function can @@ -303,29 +311,38 @@ where } } +#[async_trait] impl OutgoingService for WrappedService where - F: Fn(OutgoingRequest, IO) -> R, - IO: OutgoingService + Clone, + F: Fn(OutgoingRequest, IO) -> R + Send + Sync, + IO: OutgoingService + Clone + Send + Sync, A: Account, - R: Future + Send + 'static, + R: Future + Send + 'static, { - type Future = R; - - fn send_request(&mut self, request: OutgoingRequest) -> R { - (self.f)(request, (*self.inner).clone()) + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { + (self.f)(request, (*self.inner).clone()).await } } +/// A store responsible for managing the node's ILP Address. When +/// an account is added as a parent via the REST API, the node will +/// perform an ILDCP request to it. The parent will then return the ILP Address +/// which has been assigned to the node. The node will then proceed to set its +/// ILP Address to that value. +#[async_trait] pub trait AddressStore: Clone { - /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( + /// Saves the ILP Address in the database AND in the store's memory so that it can + /// be read without read overhead + async fn set_ilp_address( &self, + // The new ILP Address of the node ilp_address: Address, - ) -> Box + Send>; + ) -> Result<(), ()>; - fn clear_ilp_address(&self) -> Box + Send>; + /// Resets the node's ILP Address to local.host + async fn clear_ilp_address(&self) -> Result<(), ()>; - /// Get's the store's ilp address from memory + /// Gets the node's ILP Address *synchronously* + /// (the value is stored in memory because it is read often by all services) fn get_ilp_address(&self) -> Address; } diff --git a/crates/interledger-service/src/trace.rs b/crates/interledger-service/src/trace.rs index 0e37539bf..8722f0bb2 100644 --- a/crates/interledger-service/src/trace.rs +++ b/crates/interledger-service/src/trace.rs @@ -1,31 +1,36 @@ use crate::*; +use async_trait::async_trait; use tracing_futures::{Instrument, Instrumented}; // TODO see if we can replace this with the tower tracing later +#[async_trait] impl IncomingService for Instrumented where - IO: IncomingService + Clone, - A: Account, + IO: IncomingService + Clone + Send, + A: Account + 'static, { - type Future = Instrumented; - - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let span = self.span().clone(); let _enter = span.enter(); - self.inner_mut().handle_request(request).in_current_span() + self.inner_mut() + .handle_request(request) + .in_current_span() + .await } } +#[async_trait] impl OutgoingService for Instrumented where - IO: OutgoingService + Clone, - A: Account, + IO: OutgoingService + Clone + Send, + A: Account + 'static, { - type Future = Instrumented; - - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { let span = self.span().clone(); let _enter = span.enter(); - self.inner_mut().send_request(request).in_current_span() + self.inner_mut() + .send_request(request) + .in_current_span() + .await } } diff --git a/crates/interledger-settlement/Cargo.toml b/crates/interledger-settlement/Cargo.toml index 0060f6f57..31eff415d 100644 --- a/crates/interledger-settlement/Cargo.toml +++ b/crates/interledger-settlement/Cargo.toml @@ -9,13 +9,14 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] bytes = { version = "0.4.12", default-features = false } -futures = { version = "0.1.29", default-features = false } -hyper = { version = "0.12.35", default-features = false } +bytes05 = { package = "bytes", version = "0.5", default-features = false } +futures = { version = "0.3.1", default-features = false, features = ["compat"] } +hyper = { version = "0.13.1", default-features = false } interledger-http = { path = "../interledger-http", version = "^0.4.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } log = { version = "0.4.8", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10", default-features = false, features = ["default-tls", "json"] } serde = { version = "1.0.101", default-features = false } serde_json = { version = "1.0.41", default-features = false } url = { version = "2.1.0", default-features = false } @@ -23,12 +24,15 @@ lazy_static = { version = "1.4.0", default-features = false } uuid = { version = "0.8.1", default-features = false, features = ["v4"] } ring = { version = "0.16.9", default-features = false } tokio-retry = { version = "0.2.0", default-features = false } -tokio = { version = "0.1.22", default-features = false } +tokio = { version = "0.2.6", default-features = false, features = ["macros", "rt-core"] } num-bigint = { version = "0.2.3", default-features = false, features = ["std"] } num-traits = { version = "0.2.8", default-features = false } -warp = { version = "0.1.20", default-features = false } -http = "0.1.19" -redis_crate = { package = "redis", version = "0.13.0", optional = true } +# warp = { version = "0.1.20", default-features = false } +warp = { git = "https://github.com/seanmonstar/warp", default-features = false } +http = "0.2.0" +# redis_crate = { package = "redis", version = "0.13.0", optional = true } +redis_crate = { package = "redis", git = "https://github.com/mitsuhiko/redis-rs", optional = true, features = ["tokio-rt-core"] } +async-trait = "0.1.22" [dev-dependencies] parking_lot = { version = "0.9.0", default-features = false } diff --git a/crates/interledger-settlement/src/api/client.rs b/crates/interledger-settlement/src/api/client.rs index 9b8e4cb43..519202f81 100644 --- a/crates/interledger-settlement/src/api/client.rs +++ b/crates/interledger-settlement/src/api/client.rs @@ -1,11 +1,8 @@ use crate::core::types::{Quantity, SettlementAccount}; -use futures::{ - future::{err, Either}, - Future, -}; +use futures::TryFutureExt; use interledger_service::Account; use log::{debug, error, trace}; -use reqwest::r#async::Client; +use reqwest::Client; use serde_json::json; use uuid::Uuid; @@ -21,11 +18,11 @@ impl SettlementClient { } } - pub fn send_settlement( + pub async fn send_settlement( &self, account: A, amount: u64, - ) -> impl Future { + ) -> Result<(), ()> { if let Some(settlement_engine) = account.settlement_engine_details() { let mut settlement_engine_url = settlement_engine.url; settlement_engine_url @@ -40,23 +37,37 @@ impl SettlementClient { ); let settlement_engine_url_clone = settlement_engine_url.clone(); let idempotency_uuid = Uuid::new_v4().to_hyphenated().to_string(); - return Either::A(self.http_client.post(settlement_engine_url.as_ref()) + let response = self + .http_client + .post(settlement_engine_url.as_ref()) .header("Idempotency-Key", idempotency_uuid) .json(&json!(Quantity::new(amount, account.asset_scale()))) .send() - .map_err(move |err| error!("Error sending settlement command to settlement engine {}: {:?}", settlement_engine_url, err)) - .and_then(move |response| { - if response.status().is_success() { - trace!("Sent settlement of {} to settlement engine: {}", amount, settlement_engine_url_clone); - Ok(()) - } else { - error!("Error sending settlement. Settlement engine responded with HTTP code: {}", response.status()); - Err(()) - } - })); + .map_err(move |err| { + error!( + "Error sending settlement command to settlement engine {}: {:?}", + settlement_engine_url, err + ) + }) + .await?; + + if response.status().is_success() { + trace!( + "Sent settlement of {} to settlement engine: {}", + amount, + settlement_engine_url_clone + ); + return Ok(()); + } else { + error!( + "Error sending settlement. Settlement engine responded with HTTP code: {}", + response.status() + ); + return Err(()); + } } error!("Cannot send settlement for account {} because it does not have the settlement_engine_url and scale configured", account.id()); - Either::B(err(())) + Err(()) } } @@ -70,37 +81,37 @@ impl Default for SettlementClient { mod tests { use super::*; use crate::api::fixtures::TEST_ACCOUNT_0; - use crate::api::test_helpers::{block_on, mock_settlement}; + use crate::api::test_helpers::mock_settlement; use mockito::Matcher; - #[test] - fn settlement_ok() { + #[tokio::test] + async fn settlement_ok() { let m = mock_settlement(200) .match_header("Idempotency-Key", Matcher::Any) .create(); let client = SettlementClient::new(); - let ret = block_on(client.send_settlement(TEST_ACCOUNT_0.clone(), 100)); + let ret = client.send_settlement(TEST_ACCOUNT_0.clone(), 100).await; m.assert(); assert!(ret.is_ok()); } - #[test] - fn engine_rejects() { + #[tokio::test] + async fn engine_rejects() { let m = mock_settlement(500) .match_header("Idempotency-Key", Matcher::Any) .create(); let client = SettlementClient::new(); - let ret = block_on(client.send_settlement(TEST_ACCOUNT_0.clone(), 100)); + let ret = client.send_settlement(TEST_ACCOUNT_0.clone(), 100).await; m.assert(); assert!(ret.is_err()); } - #[test] - fn account_does_not_have_settlement_engine() { + #[tokio::test] + async fn account_does_not_have_settlement_engine() { let m = mock_settlement(200) .expect(0) .match_header("Idempotency-Key", Matcher::Any) @@ -109,7 +120,7 @@ mod tests { let mut acc = TEST_ACCOUNT_0.clone(); acc.no_details = true; // Hide the settlement engine data from the account - let ret = block_on(client.send_settlement(acc, 100)); + let ret = client.send_settlement(acc, 100).await; m.assert(); assert!(ret.is_err()); diff --git a/crates/interledger-settlement/src/api/message_service.rs b/crates/interledger-settlement/src/api/message_service.rs index f91adcc4c..f2689f0a9 100644 --- a/crates/interledger-settlement/src/api/message_service.rs +++ b/crates/interledger-settlement/src/api/message_service.rs @@ -1,12 +1,10 @@ use crate::core::types::{SettlementAccount, SE_ILP_ADDRESS}; -use futures::{ - future::{err, Either}, - Future, Stream, -}; +use async_trait::async_trait; +use futures::{compat::Future01CompatExt, TryFutureExt}; use interledger_packet::{ErrorCode, FulfillBuilder, RejectBuilder}; -use interledger_service::{Account, BoxedIlpFuture, IncomingRequest, IncomingService}; +use interledger_service::{Account, IlpResult, IncomingRequest, IncomingService}; use log::error; -use reqwest::r#async::Client; +use reqwest::Client; use std::marker::PhantomData; use tokio_retry::{strategy::ExponentialBackoff, Retry}; @@ -33,14 +31,13 @@ where } } +#[async_trait] impl IncomingService for SettlementMessageService where I: IncomingService + Send, - A: SettlementAccount + Account, + A: SettlementAccount + Account + Send + Sync, { - type Future = BoxedIlpFuture; - - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { // Only handle the request if the destination address matches the ILP address // of the settlement engine being used for this account if let Some(settlement_engine_details) = request.from.settlement_engine_details() { @@ -67,54 +64,75 @@ where .header("Idempotency-Key", idempotency_uuid.clone()) .body(message.clone()) .send() + .compat() // Wrap to a 0.1 future }; + // TODO: futures-retry is still not on futures 0.3. As a result, we wrap our action in a + // 0.1 future, and then wrap the Retry future in a 0.3 future to use async/await. - return Box::new(Retry::spawn(ExponentialBackoff::from_millis(10).take(10), action) - .map_err(move |error| { - error!("Error sending message to settlement engine: {:?}", error); - RejectBuilder { - code: ErrorCode::T00_INTERNAL_ERROR, - message: b"Error sending message to settlement engine", - data: &[], - triggered_by: Some(&SE_ILP_ADDRESS), - }.build() - }) - .and_then(move |response| { - let status = response.status(); - if status.is_success() { - Either::A(response.into_body().concat2().map_err(move |err| { - error!("Error concatenating settlement engine response body: {:?}", err); + let response = Retry::spawn(ExponentialBackoff::from_millis(10).take(10), action) + .compat() + .map_err(move |error| { + error!("Error sending message to settlement engine: {:?}", error); + RejectBuilder { + code: ErrorCode::T00_INTERNAL_ERROR, + message: b"Error sending message to settlement engine", + data: &[], + triggered_by: Some(&SE_ILP_ADDRESS), + } + .build() + }) + .await?; + let status = response.status(); + if status.is_success() { + let body = response + .bytes() + .map_err(|err| { + error!( + "Error concatenating settlement engine response body: {:?}", + err + ); RejectBuilder { code: ErrorCode::T00_INTERNAL_ERROR, message: b"Error getting settlement engine response", data: &[], triggered_by: Some(&SE_ILP_ADDRESS), - }.build() + } + .build() }) - .and_then(|body| { - Ok(FulfillBuilder { - fulfillment: &PEER_FULFILLMENT, - data: body.as_ref(), - }.build()) - })) + .await?; + + return Ok(FulfillBuilder { + fulfillment: &PEER_FULFILLMENT, + data: body.as_ref(), + } + .build()); + } else { + error!( + "Settlement engine rejected message with HTTP error code: {}", + response.status() + ); + let code = if status.is_client_error() { + ErrorCode::F00_BAD_REQUEST } else { - error!("Settlement engine rejected message with HTTP error code: {}", response.status()); - let code = if status.is_client_error() { - ErrorCode::F00_BAD_REQUEST - } else { - ErrorCode::T00_INTERNAL_ERROR - }; - Either::B(err(RejectBuilder { - code, - message: format!("Settlement engine rejected request with error code: {}", response.status()).as_str().as_ref(), - data: &[], - triggered_by: Some(&SE_ILP_ADDRESS), - }.build())) + ErrorCode::T00_INTERNAL_ERROR + }; + + return Err(RejectBuilder { + code, + message: format!( + "Settlement engine rejected request with error code: {}", + response.status() + ) + .as_str() + .as_ref(), + data: &[], + triggered_by: Some(&SE_ILP_ADDRESS), } - })); + .build()); + } } } - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await } } @@ -122,18 +140,18 @@ where mod tests { use super::*; use crate::api::fixtures::{BODY, DATA, SERVICE_ADDRESS, TEST_ACCOUNT_0}; - use crate::api::test_helpers::{block_on, mock_message, test_service}; + use crate::api::test_helpers::{mock_message, test_service}; use interledger_packet::{Address, Fulfill, PrepareBuilder, Reject}; use std::str::FromStr; use std::time::SystemTime; - #[test] - fn settlement_ok() { + #[tokio::test] + async fn settlement_ok() { // happy case let m = mock_message(200).create(); let mut settlement = test_service(); - let fulfill: Fulfill = block_on( - settlement.handle_request(IncomingRequest { + let fulfill: Fulfill = settlement + .handle_request(IncomingRequest { from: TEST_ACCOUNT_0.clone(), prepare: PrepareBuilder { amount: 0, @@ -143,22 +161,22 @@ mod tests { execution_condition: &[0; 32], } .build(), - }), - ) - .unwrap(); + }) + .await + .unwrap(); m.assert(); assert_eq!(fulfill.data(), BODY.as_bytes()); assert_eq!(fulfill.fulfillment(), &[0; 32]); } - #[test] - fn gets_forwarded_if_destination_not_engine_() { + #[tokio::test] + async fn gets_forwarded_if_destination_not_engine_() { let m = mock_message(200).create().expect(0); let mut settlement = test_service(); let destination = Address::from_str("example.some.address").unwrap(); - let reject: Reject = block_on( - settlement.handle_request(IncomingRequest { + let reject: Reject = settlement + .handle_request(IncomingRequest { from: TEST_ACCOUNT_0.clone(), prepare: PrepareBuilder { amount: 0, @@ -168,9 +186,9 @@ mod tests { execution_condition: &[0; 32], } .build(), - }), - ) - .unwrap_err(); + }) + .await + .unwrap_err(); m.assert(); assert_eq!(reject.code(), ErrorCode::F02_UNREACHABLE); @@ -178,14 +196,14 @@ mod tests { assert_eq!(reject.message(), b"No other incoming handler!" as &[u8],); } - #[test] - fn account_does_not_have_settlement_engine() { + #[tokio::test] + async fn account_does_not_have_settlement_engine() { let m = mock_message(200).create().expect(0); let mut settlement = test_service(); let mut acc = TEST_ACCOUNT_0.clone(); acc.no_details = true; // Hide the settlement engine data from the account - let reject: Reject = block_on( - settlement.handle_request(IncomingRequest { + let reject: Reject = settlement + .handle_request(IncomingRequest { from: acc.clone(), prepare: PrepareBuilder { amount: 0, @@ -195,9 +213,9 @@ mod tests { execution_condition: &[0; 32], } .build(), - }), - ) - .unwrap_err(); + }) + .await + .unwrap_err(); m.assert(); assert_eq!(reject.code(), ErrorCode::F02_UNREACHABLE); @@ -205,15 +223,15 @@ mod tests { assert_eq!(reject.message(), b"No other incoming handler!"); } - #[test] - fn settlement_engine_rejects() { + #[tokio::test] + async fn settlement_engine_rejects() { // for whatever reason the engine rejects our request with a 500 code let error_code = 500; let error_str = "Internal Server Error"; let m = mock_message(error_code).create(); let mut settlement = test_service(); - let reject: Reject = block_on( - settlement.handle_request(IncomingRequest { + let reject: Reject = settlement + .handle_request(IncomingRequest { from: TEST_ACCOUNT_0.clone(), prepare: PrepareBuilder { amount: 0, @@ -223,9 +241,9 @@ mod tests { execution_condition: &[0; 32], } .build(), - }), - ) - .unwrap_err(); + }) + .await + .unwrap_err(); m.assert(); assert_eq!(reject.code(), ErrorCode::T00_INTERNAL_ERROR); diff --git a/crates/interledger-settlement/src/api/node_api.rs b/crates/interledger-settlement/src/api/node_api.rs index d095eed11..a83530f03 100644 --- a/crates/interledger-settlement/src/api/node_api.rs +++ b/crates/interledger-settlement/src/api/node_api.rs @@ -3,16 +3,12 @@ use crate::core::{ idempotency::*, scale_with_precision_loss, types::{ - ApiResponse, LeftoversStore, Quantity, SettlementAccount, SettlementStore, - CONVERSION_ERROR_TYPE, NO_ENGINE_CONFIGURED_ERROR_TYPE, SE_ILP_ADDRESS, + ApiResponse, ApiResult, LeftoversStore, Quantity, SettlementAccount, SettlementStore, + CONVERSION_ERROR_TYPE, SE_ILP_ADDRESS, }, }; -use bytes::buf::FromBuf; use bytes::Bytes; -use futures::{ - future::{err, result}, - Future, -}; +use futures::TryFutureExt; use hyper::{Response, StatusCode}; use interledger_http::error::*; use interledger_packet::PrepareBuilder; @@ -32,6 +28,83 @@ static PEER_PROTOCOL_CONDITION: [u8; 32] = [ 110, 226, 51, 179, 144, 42, 89, 29, 13, 95, 41, 37, ]; +async fn receive_settlement( + account_id: String, + idempotency_key: Option, + quantity: Quantity, + store: S, +) -> Result +where + S: LeftoversStore + + SettlementStore + + IdempotentStore + + AccountStore + + Clone + + Send + + Sync + + 'static, + A: SettlementAccount + Account + Send + Sync + 'static, +{ + let input = format!("{}{:?}", account_id, quantity); + let input_hash = get_hash_of(input.as_ref()); + + let idempotency_key_clone = idempotency_key.clone(); + let store_clone = store.clone(); + let (status_code, message) = make_idempotent_call( + store, + || do_receive_settlement(store_clone, account_id, quantity, idempotency_key_clone), + input_hash, + idempotency_key, + StatusCode::CREATED, + "RECEIVED".into(), + ) + .await?; + Ok(Response::builder() + .status(status_code) + // bytes 04 to 05 compatibility + .body(bytes05::Bytes::from(message.to_vec())) + .unwrap()) +} + +async fn send_message( + account_id: String, + idempotency_key: Option, + message: bytes05::Bytes, + store: S, + outgoing_handler: O, +) -> Result +where + S: LeftoversStore + + SettlementStore + + IdempotentStore + + AccountStore + + Clone + + Send + + Sync + + 'static, + O: OutgoingService + Clone + Send + Sync + 'static, + A: SettlementAccount + Account + Send + Sync + 'static, +{ + let input = format!("{}{:?}", account_id, message); + let input_hash = get_hash_of(input.as_ref()); + + let store_clone = store.clone(); + let (status_code, message) = make_idempotent_call( + store, + || do_send_outgoing_message(store_clone, outgoing_handler, account_id, message.to_vec()), + input_hash, + idempotency_key, + StatusCode::CREATED, + "SENT".into(), + ) + .await?; + Ok(Response::builder() + .status(status_code) + // bytes 04 to 05 compatibility + .body(bytes05::Bytes::from(message.to_vec())) + .unwrap()) +} + pub fn create_settlements_filter( store: S, outgoing_handler: O, @@ -50,94 +123,31 @@ where { let with_store = warp::any().map(move || store.clone()).boxed(); let idempotency = warp::header::optional::("idempotency-key"); - let account_id_filter = warp::path("accounts").and(warp::path::param2::()); // account_id + let account_id_filter = warp::path("accounts").and(warp::path::param::()); // account_id // POST /accounts/:account_id/settlements (optional idempotency-key header) // Body is a Quantity object let settlement_endpoint = account_id_filter.and(warp::path("settlements")); - let settlements = warp::post2() + let settlements = warp::post() .and(settlement_endpoint) .and(warp::path::end()) .and(idempotency) .and(warp::body::json()) .and(with_store.clone()) - .and_then( - move |account_id: String, - idempotency_key: Option, - quantity: Quantity, - store: S| { - let input = format!("{}{:?}", account_id, quantity); - let input_hash = get_hash_of(input.as_ref()); - - let idempotency_key_clone = idempotency_key.clone(); - let store_clone = store.clone(); - let receive_settlement_fn = move || { - do_receive_settlement(store_clone, account_id, quantity, idempotency_key_clone) - }; - make_idempotent_call( - store, - receive_settlement_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - "RECEIVED".into(), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); + .and_then(receive_settlement); // POST /accounts/:account_id/messages (optional idempotency-key header) // Body is a Vec object let with_outgoing_handler = warp::any().map(move || outgoing_handler.clone()).boxed(); let messages_endpoint = account_id_filter.and(warp::path("messages")); - let messages = warp::post2() + let messages = warp::post() .and(messages_endpoint) .and(warp::path::end()) .and(idempotency) - .and(warp::body::concat()) - .and(with_store.clone()) - .and(with_outgoing_handler.clone()) - .and_then( - move |account_id: String, - idempotency_key: Option, - body: warp::body::FullBody, - store: S, - outgoing_handler: O| { - // Gets called by our settlement engine, forwards the request outwards - // until it reaches the peer's settlement engine. - let message = Vec::from_buf(body); - let input = format!("{}{:?}", account_id, message); - let input_hash = get_hash_of(input.as_ref()); - - let store_clone = store.clone(); - // Wrap do_send_outgoing_message in a closure to be invoked by - // the idempotency wrapper - let send_outgoing_message_fn = move || { - do_send_outgoing_message(store_clone, outgoing_handler, account_id, message) - }; - make_idempotent_call( - store, - send_outgoing_message_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - "SENT".into(), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); + .and(warp::body::bytes()) + .and(with_store) + .and(with_outgoing_handler) + .and_then(send_message); settlements .or(messages) @@ -145,12 +155,12 @@ where .boxed() } -fn do_receive_settlement( +async fn do_receive_settlement( store: S, account_id: String, body: Quantity, idempotency_key: Option, -) -> Box + Send> +) -> ApiResult where S: LeftoversStore + SettlementStore @@ -167,103 +177,107 @@ where let engine_scale = body.scale; // Convert to the desired data types - let account_id = match Uuid::from_str(&account_id) { - Ok(a) => a, - Err(_) => { - let error_msg = format!("Unable to parse account id: {}", account_id); - error!("{}", error_msg); - return Box::new(err(ApiError::invalid_account_id(Some(&account_id)))); - } - }; + let account_id = Uuid::from_str(&account_id).map_err(move |_| { + let err = ApiError::invalid_account_id(Some(&account_id)); + error!("{}", err); + err + })?; + + let engine_amount = BigUint::from_str(&engine_amount).map_err(|_| { + let error_msg = format!("Could not convert amount: {:?}", engine_amount); + error!("{}", error_msg); + ApiError::from_api_error_type(&CONVERSION_ERROR_TYPE).detail(error_msg) + })?; + + let accounts = store + .get_accounts(vec![account_id]) + .map_err(move |_| { + let err = ApiError::account_not_found() + .detail(format!("Account {} was not found", account_id)); + error!("{}", err); + err + }) + .await?; + + let account = &accounts[0]; + if account.settlement_engine_details().is_none() { + let err = ApiError::account_not_found().detail(format!("Account {} has no settlement engine details configured, cannot send a settlement engine message to that account", accounts[0].id())); + error!("{}", err); + return Err(err); + } - let engine_amount = match BigUint::from_str(&engine_amount) { - Ok(a) => a, - Err(_) => { - let error_msg = format!("Could not convert amount: {:?}", engine_amount); + let account_id = account.id(); + let asset_scale = account.asset_scale(); + // Scale to account's scale from the engine's scale + // If we're downscaling we might have some precision error which + // we must save as leftovers. Upscaling is OK since we're using + // biguint's. + let (scaled_engine_amount, precision_loss) = + scale_with_precision_loss(engine_amount, asset_scale, engine_scale); + + // This will load any leftovers (which are saved in the highest + // so far received scale by the engine), will scale them to + // the account's asset scale and return them. If there was any + // precision loss due to downscaling, it will also update the + // leftovers to the new leftovers value + let scaled_leftover_amount = store_clone + .load_uncredited_settlement_amount(account_id, asset_scale) + .map_err(move |_err| { + let error_msg = format!( + "Error getting uncredited settlement amount for: {}", + account.id() + ); error!("{}", error_msg); - return Box::new(err( - ApiError::from_api_error_type(&CONVERSION_ERROR_TYPE).detail(error_msg) - )); - } - }; - - Box::new( - store.get_accounts(vec![account_id]) - .map_err(move |_err| { - let err = ApiError::account_not_found().detail(format!("Account {} was not found", account_id)); - error!("{}", err); - err - }) - .and_then(move |accounts| { - let account = &accounts[0]; - if account.settlement_engine_details().is_some() { - Ok(account.clone()) - } else { - let error_msg = format!("Account {} does not have settlement engine details configured. Cannot handle incoming settlement", account.id()); - error!("{}", error_msg); - Err(ApiError::from_api_error_type(&NO_ENGINE_CONFIGURED_ERROR_TYPE).detail(error_msg)) - } - }) - .and_then(move |account| { - let account_id = account.id(); - let asset_scale = account.asset_scale(); - // Scale to account's scale from the engine's scale - // If we're downscaling we might have some precision error which - // we must save as leftovers. Upscaling is OK since we're using - // biguint's. - let (scaled_engine_amount, precision_loss) = scale_with_precision_loss(engine_amount, asset_scale, engine_scale); - - // This will load any leftovers (which are saved in the highest - // so far received scale by the engine), will scale them to - // the account's asset scale and return them. If there was any - // precision loss due to downscaling, it will also update the - // leftovers to the new leftovers value - store_clone.load_uncredited_settlement_amount(account_id, asset_scale) - .map_err(move |_err| { - let error_msg = format!("Error getting uncredited settlement amount for: {}", account.id()); - error!("{}", error_msg); - let error_type = ApiErrorType { - r#type: &ProblemType::Default, - status: StatusCode::INTERNAL_SERVER_ERROR, - title: "Load uncredited settlement amount error", - }; - ApiError::from_api_error_type(&error_type).detail(error_msg) - }) - .and_then(move |scaled_leftover_amount| { - // add the leftovers to the scaled engine amount - let total_amount = scaled_engine_amount.clone() + scaled_leftover_amount; - let engine_amount_u64 = total_amount.to_u64().unwrap_or(std::u64::MAX); - - futures::future::join_all(vec![ - // update the account's balance in the store - store.update_balance_for_incoming_settlement(account_id, engine_amount_u64, idempotency_key), - // save any precision loss that occurred during the - // scaling of the engine's amount to the account's scale - store.save_uncredited_settlement_amount(account_id, (precision_loss, engine_scale)) - ]) - .map_err(move |_| { - let error_msg = format!("Error updating the balance and leftovers of account: {}", account_id); - error!("{}", error_msg); - let error_type = ApiErrorType { - r#type: &ProblemType::Default, - status: StatusCode::INTERNAL_SERVER_ERROR, - title: "Balance update error" - }; - ApiError::from_api_error_type(&error_type).detail(error_msg) - }) - .and_then(move |_| { - Ok(ApiResponse::Default) - }) - }) - })) + let error_type = ApiErrorType { + r#type: &ProblemType::Default, + status: StatusCode::INTERNAL_SERVER_ERROR, + title: "Load uncredited settlement amount error", + }; + ApiError::from_api_error_type(&error_type).detail(error_msg) + }) + .await?; + + // add the leftovers to the scaled engine amount + let total_amount = scaled_engine_amount.clone() + scaled_leftover_amount; + let engine_amount_u64 = total_amount.to_u64().unwrap_or(std::u64::MAX); + + let ret = futures::future::join_all(vec![ + // update the account's balance in the store + store.update_balance_for_incoming_settlement( + account_id, + engine_amount_u64, + idempotency_key, + ), + // save any precision loss that occurred during the + // scaling of the engine's amount to the account's scale + store.save_uncredited_settlement_amount(account_id, (precision_loss, engine_scale)), + ]) + .await; + + // if any of the futures errored, then we should propagate that + if ret.iter().any(|r| r.is_err()) { + let error_msg = format!( + "Error updating the balance and leftovers of account: {}", + account_id + ); + error!("{}", error_msg); + let error_type = ApiErrorType { + r#type: &ProblemType::Default, + status: StatusCode::INTERNAL_SERVER_ERROR, + title: "Balance update error", + }; + return Err(ApiError::from_api_error_type(&error_type).detail(error_msg)); + } + + Ok(ApiResponse::Default) } -fn do_send_outgoing_message( +async fn do_send_outgoing_message( store: S, mut outgoing_handler: O, account_id: String, body: Vec, -) -> Box + Send> +) -> ApiResult where S: LeftoversStore + SettlementStore @@ -276,64 +290,65 @@ where O: OutgoingService + Clone + Send + Sync + 'static, A: SettlementAccount + Account + Send + Sync + 'static, { - Box::new(result(Uuid::from_str(&account_id) - .map_err(move |_| { - let err = ApiError::invalid_account_id(Some(&account_id)); - error!("{}", err); - err - })) - .and_then(move |account_id| { - store.get_accounts(vec![account_id]) - .map_err(move |_| { - let err = ApiError::account_not_found().detail(format!("Account {} was not found", account_id)); - error!("{}", err); - err - }) - }) - .and_then(|accounts| { - let account = &accounts[0]; - if account.settlement_engine_details().is_some() { - Ok(account.clone()) - } else { - let err = ApiError::account_not_found().detail(format!("Account {} has no settlement engine details configured, cannot send a settlement engine message to that account", accounts[0].id())); - error!("{}", err); - Err(err) - } - }) - .and_then(move |account| { - // Send the message to the peer's settlement engine. - // Note that we use dummy values for the `from` and `original_amount` - // because this `OutgoingRequest` will bypass the router and thus will not - // use either of these values. Including dummy values in the rare case where - // we do not need them seems easier than using - // `Option`s all over the place. - outgoing_handler.send_request(OutgoingRequest { - from: account.clone(), - to: account.clone(), - original_amount: 0, - prepare: PrepareBuilder { - destination: SE_ILP_ADDRESS.clone(), - amount: 0, - expires_at: SystemTime::now() + Duration::from_secs(30), - data: &body, - execution_condition: &PEER_PROTOCOL_CONDITION, - }.build() - }) - .map_err(move |reject| { - let error_msg = format!("Error sending message to peer settlement engine. Packet rejected with code: {}, message: {}", reject.code(), str::from_utf8(reject.message()).unwrap_or_default()); - let error_type = ApiErrorType { - r#type: &ProblemType::Default, - status: StatusCode::BAD_GATEWAY, - title: "Error sending message to peer engine", - }; - error!("{}", error_msg); - ApiError::from_api_error_type(&error_type).detail(error_msg) - }) - }) - .and_then(move |fulfill| { - let data = Bytes::from(fulfill.data()); - Ok(ApiResponse::Data(data)) - })) + let account_id = Uuid::from_str(&account_id).map_err(move |_| { + let err = ApiError::invalid_account_id(Some(&account_id)); + error!("{}", err); + err + })?; + let accounts = store + .get_accounts(vec![account_id]) + .map_err(move |_| { + let err = ApiError::account_not_found() + .detail(format!("Account {} was not found", account_id)); + error!("{}", err); + err + }) + .await?; + + let account = &accounts[0]; + if account.settlement_engine_details().is_none() { + let err = ApiError::account_not_found().detail(format!("Account {} has no settlement engine details configured, cannot send a settlement engine message to that account", accounts[0].id())); + error!("{}", err); + return Err(err); + } + + // Send the message to the peer's settlement engine. + // Note that we use dummy values for the `from` and `original_amount` + // because this `OutgoingRequest` will bypass the router and thus will not + // use either of these values. Including dummy values in the rare case where + // we do not need them seems easier than using + // `Option`s all over the place. + match outgoing_handler + .send_request(OutgoingRequest { + from: account.clone(), + to: account.clone(), + original_amount: 0, + prepare: PrepareBuilder { + destination: SE_ILP_ADDRESS.clone(), + amount: 0, + expires_at: SystemTime::now() + Duration::from_secs(30), + data: &body, + execution_condition: &PEER_PROTOCOL_CONDITION, + } + .build(), + }) + .await + { + Ok(fulfill) => { + let data = Bytes::from(fulfill.data()); + Ok(ApiResponse::Data(data)) + } + Err(reject) => { + let error_msg = format!("Error sending message to peer settlement engine. Packet rejected with code: {}, message: {}", reject.code(), str::from_utf8(reject.message()).unwrap_or_default()); + let error_type = ApiErrorType { + r#type: &ProblemType::Default, + status: StatusCode::BAD_GATEWAY, + title: "Error sending message to peer engine", + }; + error!("{}", error_msg); + Err(ApiError::from_api_error_type(&error_type).detail(error_msg)) + } + } } #[cfg(test)] @@ -343,7 +358,11 @@ mod tests { use crate::api::test_helpers::*; use serde_json::Value; - fn check_error_status_and_message(response: Response, status_code: u16, message: &str) { + fn check_error_status_and_message( + response: Response, + status_code: u16, + message: &str, + ) { let err: Value = serde_json::from_slice(response.body()).unwrap(); assert_eq!(response.status().as_u16(), status_code); assert_eq!(err.get("status").unwrap(), status_code); @@ -371,17 +390,18 @@ mod tests { // Settlement Tests mod settlement_tests { use super::*; + use bytes05::Bytes; use serde_json::json; const OUR_SCALE: u8 = 11; - fn settlement_call( + async fn settlement_call( api: &F, id: &str, amount: u64, scale: u8, idempotency_key: Option<&str>, - ) -> Response + ) -> Response where F: warp::Filter + 'static, F::Extract: warp::Reply, @@ -394,11 +414,11 @@ mod tests { if let Some(idempotency_key) = idempotency_key { response = response.header("Idempotency-Key", idempotency_key); } - response.reply(api) + response.reply(api).await } - #[test] - fn settlement_ok() { + #[tokio::test] + async fn settlement_ok() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, true); let api = test_api(store.clone(), false); @@ -407,17 +427,17 @@ mod tests { // = 9. When // we send a settlement with scale OUR_SCALE, the connector should respond // with 2 less 0's. - let response = settlement_call(&api, &id, 200, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, &id, 200, OUR_SCALE, Some(IDEMPOTENCY)).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(response.status(), StatusCode::CREATED); // check that it's idempotent - let response = settlement_call(&api, &id, 200, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, &id, 200, OUR_SCALE, Some(IDEMPOTENCY)).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(response.status(), StatusCode::CREATED); // fails with different account id - let response = settlement_call(&api, "2", 200, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, "2", 200, OUR_SCALE, Some(IDEMPOTENCY)).await; check_error_status_and_message( response, 409, @@ -425,7 +445,7 @@ mod tests { ); // fails with different settlement data and account id - let response = settlement_call(&api, "2", 42, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, "2", 42, OUR_SCALE, Some(IDEMPOTENCY)).await; check_error_status_and_message( response, 409, @@ -433,7 +453,7 @@ mod tests { ); // fails with different settlement data and same account id - let response = settlement_call(&api, &id, 42, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, &id, 42, OUR_SCALE, Some(IDEMPOTENCY)).await; check_error_status_and_message( response, 409, @@ -441,7 +461,7 @@ mod tests { ); // works without idempotency key - let response = settlement_call(&api, &id, 400, OUR_SCALE, None); + let response = settlement_call(&api, &id, 400, OUR_SCALE, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(response.status(), StatusCode::CREATED); @@ -452,19 +472,19 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 4); assert_eq!(cached_data.status, StatusCode::CREATED); - assert_eq!(cached_data.body, &Bytes::from("RECEIVED")); + assert_eq!(cached_data.body, &bytes::Bytes::from("RECEIVED")); } - #[test] + #[tokio::test] // The connector must save the difference each time there's precision // loss and try to add it the amount it's being notified to settle for the next time. - fn settlement_leftovers() { + async fn settlement_leftovers() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, true); let api = test_api(store.clone(), false); // Send 205 with scale 11, 2 decimals lost -> 0.05 leftovers - let response = settlement_call(&api, &id, 205, 11, None); + let response = settlement_call(&api, &id, 205, 11, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); // balance should be 2 @@ -472,75 +492,75 @@ mod tests { assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(5u32), 11) ); // Send 855 with scale 12, 3 decimals lost -> 0.855 leftovers, - let response = settlement_call(&api, &id, 855, 12, None); + let response = settlement_call(&api, &id, 855, 12, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(store.get_balance(TEST_ACCOUNT_0.id), 2); // total leftover: 0.905 = 0.05 + 0.855 assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(905u32), 12) ); // send 110 with scale 11, 2 decimals lost -> 0.1 leftover - let response = settlement_call(&api, &id, 110, 11, None); + let response = settlement_call(&api, &id, 110, 11, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(store.get_balance(TEST_ACCOUNT_0.id), 3); assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(1005u32), 12) ); // send 5 with scale 9, will consume the leftovers and increase // total balance by 6 while updating the rest of the leftovers - let response = settlement_call(&api, &id, 5, 9, None); + let response = settlement_call(&api, &id, 5, 9, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(store.get_balance(TEST_ACCOUNT_0.id), 9); assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(5u32), 12) ); // we send a payment with a smaller scale than the account now - let response = settlement_call(&api, &id, 2, 7, None); + let response = settlement_call(&api, &id, 2, 7, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(store.get_balance(TEST_ACCOUNT_0.id), 209); // leftovers are still the same assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(5u32), 12) ); } - #[test] - fn account_has_no_engine_configured() { + #[tokio::test] + async fn account_has_no_engine_configured() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, false); let api = test_api(store.clone(), false); - let response = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); - check_error_status_and_message(response, 404, "Account 00000000-0000-0000-0000-000000000000 does not have settlement engine details configured. Cannot handle incoming settlement"); + let response = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; + check_error_status_and_message(response, 404, "Account 00000000-0000-0000-0000-000000000000 has no settlement engine details configured, cannot send a settlement engine message to that account"); // check that it's idempotent - let response = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); - check_error_status_and_message(response, 404, "Account 00000000-0000-0000-0000-000000000000 does not have settlement engine details configured. Cannot handle incoming settlement"); + let response = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; + check_error_status_and_message(response, 404, "Account 00000000-0000-0000-0000-000000000000 has no settlement engine details configured, cannot send a settlement engine message to that account"); let s = store.clone(); let cache = s.cache.read(); @@ -549,34 +569,34 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 1); assert_eq!(cached_data.status, 404); - assert_eq!(cached_data.body, &Bytes::from("Account 00000000-0000-0000-0000-000000000000 does not have settlement engine details configured. Cannot handle incoming settlement")); + assert_eq!(cached_data.body, &bytes::Bytes::from("Account 00000000-0000-0000-0000-000000000000 has no settlement engine details configured, cannot send a settlement engine message to that account")); } - #[test] - fn update_balance_for_incoming_settlement_fails() { + #[tokio::test] + async fn update_balance_for_incoming_settlement_fails() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(true, true); let api = test_api(store, false); - let response = settlement_call(&api, &id, 100, 18, None); + let response = settlement_call(&api, &id, 100, 18, None).await; assert_eq!(response.status().as_u16(), 500); } - #[test] - fn invalid_account_id() { + #[tokio::test] + async fn invalid_account_id() { // the api is configured to take an accountId type // supplying an id that cannot be parsed to that type must fail let id = "a".to_string(); let store = test_store(false, true); let api = test_api(store.clone(), false); - let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); // check that it's idempotent - let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); - let _ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let _ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; let s = store.clone(); let cache = s.cache.read(); @@ -585,23 +605,26 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 2); assert_eq!(cached_data.status, 400); - assert_eq!(cached_data.body, &Bytes::from("a is an invalid account ID")); + assert_eq!( + cached_data.body, + &bytes::Bytes::from("a is an invalid account ID") + ); } - #[test] - fn account_not_in_store() { + #[tokio::test] + async fn account_not_in_store() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = TestStore::new(vec![], false); let api = test_api(store.clone(), false); - let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 404, "Account 00000000-0000-0000-0000-000000000000 was not found", ); - let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 404, @@ -616,20 +639,21 @@ mod tests { assert_eq!(cached_data.status, 404); assert_eq!( cached_data.body, - &Bytes::from("Account 00000000-0000-0000-0000-000000000000 was not found") + &bytes::Bytes::from("Account 00000000-0000-0000-0000-000000000000 was not found") ); } } mod message_tests { use super::*; + use bytes05::Bytes; - fn messages_call( + async fn messages_call( api: &F, id: &str, message: &[u8], idempotency_key: Option<&str>, - ) -> Response + ) -> Response where F: warp::Filter + 'static, F::Extract: warp::Reply, @@ -642,26 +666,26 @@ mod tests { if let Some(idempotency_key) = idempotency_key { response = response.header("Idempotency-Key", idempotency_key); } - response.reply(api) + response.reply(api).await } - #[test] - fn message_ok() { + #[tokio::test] + async fn message_ok() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, true); let api = test_api(store.clone(), true); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; assert_eq!(ret.status(), StatusCode::CREATED); assert_eq!(ret.body(), &Bytes::from("hello!")); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; assert_eq!(ret.status(), StatusCode::CREATED); assert_eq!(ret.body(), &Bytes::from("hello!")); // Using the same idempotency key with different arguments MUST // fail. - let ret = messages_call(&api, "1", &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, "1", &[], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 409, @@ -670,7 +694,7 @@ mod tests { let data = [0, 1, 2]; // fails with different account id and data - let ret = messages_call(&api, "1", &data[..], Some(IDEMPOTENCY)); + let ret = messages_call(&api, "1", &data[..], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 409, @@ -678,7 +702,7 @@ mod tests { ); // fails for same account id but different data - let ret = messages_call(&api, &id, &data[..], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &data[..], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 409, @@ -691,19 +715,19 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 4); assert_eq!(cached_data.status, StatusCode::CREATED); - assert_eq!(cached_data.body, &Bytes::from("hello!")); + assert_eq!(cached_data.body, &bytes::Bytes::from("hello!")); } - #[test] - fn message_gets_rejected() { + #[tokio::test] + async fn message_gets_rejected() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, true); let api = test_api(store.clone(), false); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 502, "Error sending message to peer settlement engine. Packet rejected with code: F02, message: No other outgoing handler!"); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 502, "Error sending message to peer settlement engine. Packet rejected with code: F02, message: No other outgoing handler!"); let s = store.clone(); @@ -712,24 +736,24 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 1); assert_eq!(cached_data.status, 502); - assert_eq!(cached_data.body, &Bytes::from("Error sending message to peer settlement engine. Packet rejected with code: F02, message: No other outgoing handler!")); + assert_eq!(cached_data.body, &bytes::Bytes::from("Error sending message to peer settlement engine. Packet rejected with code: F02, message: No other outgoing handler!")); } - #[test] - fn invalid_account_id() { + #[tokio::test] + async fn invalid_account_id() { // the api is configured to take an accountId type // supplying an id that cannot be parsed to that type must fail let id = "a".to_string(); let store = test_store(false, true); let api = test_api(store.clone(), true); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); let s = store.clone(); @@ -739,23 +763,26 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 2); assert_eq!(cached_data.status, 400); - assert_eq!(cached_data.body, &Bytes::from("a is an invalid account ID")); + assert_eq!( + cached_data.body, + &bytes::Bytes::from("a is an invalid account ID") + ); } - #[test] - fn account_not_in_store() { + #[tokio::test] + async fn account_not_in_store() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = TestStore::new(vec![], false); let api = test_api(store.clone(), true); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 404, "Account 00000000-0000-0000-0000-000000000000 was not found", ); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 404, @@ -771,7 +798,7 @@ mod tests { assert_eq!(cached_data.status, 404); assert_eq!( cached_data.body, - &Bytes::from("Account 00000000-0000-0000-0000-000000000000 was not found") + &bytes::Bytes::from("Account 00000000-0000-0000-0000-000000000000 was not found") ); } } diff --git a/crates/interledger-settlement/src/api/test_helpers.rs b/crates/interledger-settlement/src/api/test_helpers.rs index ac207ab03..c3abe8363 100644 --- a/crates/interledger-settlement/src/api/test_helpers.rs +++ b/crates/interledger-settlement/src/api/test_helpers.rs @@ -8,10 +8,6 @@ use crate::core::{ }, }; use bytes::Bytes; -use futures::{ - future::{err, ok}, - Future, -}; use hyper::StatusCode; use interledger_packet::{Address, ErrorCode, FulfillBuilder, RejectBuilder}; use interledger_service::{ @@ -22,12 +18,13 @@ use num_bigint::BigUint; use uuid::Uuid; use super::fixtures::{BODY, MESSAGES_API, SERVICE_ADDRESS, SETTLEMENT_API, TEST_ACCOUNT_0}; +use async_trait::async_trait; use lazy_static::lazy_static; use parking_lot::RwLock; +use std::cmp::Ordering; use std::collections::HashMap; use std::str::FromStr; use std::sync::Arc; -use tokio::runtime::Runtime; use url::Url; #[derive(Debug, Clone)] @@ -86,73 +83,75 @@ pub struct TestStore { pub uncredited_settlement_amount: Arc>>, } +#[async_trait] impl SettlementStore for TestStore { type Account = TestAccount; - fn update_balance_for_incoming_settlement( + async fn update_balance_for_incoming_settlement( &self, account_id: Uuid, amount: u64, _idempotency_key: Option, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut accounts = self.accounts.write(); for mut a in &mut *accounts { if a.id() == account_id { a.balance += amount as i64; } } - let ret = if self.should_fail { err(()) } else { ok(()) }; - Box::new(ret) + if self.should_fail { + Err(()) + } else { + Ok(()) + } } - fn refund_settlement( - &self, - _account_id: Uuid, - _settle_amount: u64, - ) -> Box + Send> { - let ret = if self.should_fail { err(()) } else { ok(()) }; - Box::new(ret) + async fn refund_settlement(&self, _account_id: Uuid, _settle_amount: u64) -> Result<(), ()> { + if self.should_fail { + Err(()) + } else { + Ok(()) + } } } +#[async_trait] impl IdempotentStore for TestStore { - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let cache = self.cache.read(); if let Some(data) = cache.get(&idempotency_key) { let mut guard = self.cache_hits.write(); *guard += 1; // used to test how many times this branch gets executed - Box::new(ok(Some(data.clone()))) + Ok(Some(data.clone())) } else { - Box::new(ok(None)) + Ok(None) } } - fn save_idempotent_data( + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut cache = self.cache.write(); cache.insert( idempotency_key, IdempotentData::new(status_code, data, input_hash), ); - Box::new(ok(())) + Ok(()) } } +#[async_trait] impl AccountStore for TestStore { type Account = TestAccount; - fn get_accounts( - &self, - account_ids: Vec, - ) -> Box, Error = ()> + Send> { + async fn get_accounts(&self, account_ids: Vec) -> Result, ()> { let accounts: Vec = self .accounts .read() @@ -166,75 +165,77 @@ impl AccountStore for TestStore { }) .collect(); if accounts.len() == account_ids.len() { - Box::new(ok(accounts)) + Ok(accounts) } else { - Box::new(err(())) + Err(()) } } // stub implementation (not used in these tests) - fn get_account_id_from_username( - &self, - _username: &Username, - ) -> Box + Send> { - Box::new(ok(Uuid::new_v4())) + async fn get_account_id_from_username(&self, _username: &Username) -> Result { + Ok(Uuid::new_v4()) } } +#[async_trait] impl LeftoversStore for TestStore { type AccountId = Uuid; type AssetType = BigUint; - fn save_uncredited_settlement_amount( + async fn save_uncredited_settlement_amount( &self, account_id: Uuid, uncredited_settlement_amount: (Self::AssetType, u8), - ) -> Box + Send> { + ) -> Result<(), ()> { let mut guard = self.uncredited_settlement_amount.write(); if let Some(leftovers) = (*guard).get_mut(&account_id) { - if leftovers.1 > uncredited_settlement_amount.1 { - // the current leftovers maintain the scale so we just need to - // upscale the provided leftovers to the existing leftovers' scale - let scaled = uncredited_settlement_amount - .0 - .normalize_scale(ConvertDetails { - from: uncredited_settlement_amount.1, - to: leftovers.1, - }) - .unwrap(); - *leftovers = (leftovers.0.clone() + scaled, leftovers.1); - } else if leftovers.1 == uncredited_settlement_amount.1 { - *leftovers = ( - leftovers.0.clone() + uncredited_settlement_amount.0, - leftovers.1, - ); - } else { - // if the scale of the provided leftovers is bigger than - // existing scale then we update the scale of the leftovers' - // scale - let scaled = leftovers - .0 - .normalize_scale(ConvertDetails { - from: leftovers.1, - to: uncredited_settlement_amount.1, - }) - .unwrap(); - *leftovers = ( - uncredited_settlement_amount.0 + scaled, - uncredited_settlement_amount.1, - ); + match leftovers.1.cmp(&uncredited_settlement_amount.1) { + Ordering::Greater => { + // the current leftovers maintain the scale so we just need to + // upscale the provided leftovers to the existing leftovers' scale + let scaled = uncredited_settlement_amount + .0 + .normalize_scale(ConvertDetails { + from: uncredited_settlement_amount.1, + to: leftovers.1, + }) + .unwrap(); + *leftovers = (leftovers.0.clone() + scaled, leftovers.1); + } + Ordering::Equal => { + *leftovers = ( + leftovers.0.clone() + uncredited_settlement_amount.0, + leftovers.1, + ); + } + _ => { + // if the scale of the provided leftovers is bigger than + // existing scale then we update the scale of the leftovers' + // scale + let scaled = leftovers + .0 + .normalize_scale(ConvertDetails { + from: leftovers.1, + to: uncredited_settlement_amount.1, + }) + .unwrap(); + *leftovers = ( + uncredited_settlement_amount.0 + scaled, + uncredited_settlement_amount.1, + ); + } } } else { (*guard).insert(account_id, uncredited_settlement_amount); } - Box::new(ok(())) + Ok(()) } - fn load_uncredited_settlement_amount( + async fn load_uncredited_settlement_amount( &self, account_id: Uuid, local_scale: u8, - ) -> Box + Send> { + ) -> Result { let mut guard = self.uncredited_settlement_amount.write(); if let Some(l) = guard.get_mut(&account_id) { let ret = l.clone(); @@ -242,28 +243,25 @@ impl LeftoversStore for TestStore { scale_with_precision_loss(ret.0, local_scale, ret.1); // save the new leftovers *l = (leftover_precision_loss, std::cmp::max(local_scale, ret.1)); - Box::new(ok(scaled_leftover_amount)) + Ok(scaled_leftover_amount) } else { - Box::new(ok(BigUint::from(0u32))) + Ok(BigUint::from(0u32)) } } - fn get_uncredited_settlement_amount( + async fn get_uncredited_settlement_amount( &self, account_id: Uuid, - ) -> Box + Send> { + ) -> Result<(Self::AssetType, u8), ()> { let leftovers = self.uncredited_settlement_amount.read(); - Box::new(ok(if let Some(a) = leftovers.get(&account_id) { + Ok(if let Some(a) = leftovers.get(&account_id) { a.clone() } else { (BigUint::from(0u32), 1) - })) + }) } - fn clear_uncredited_settlement_amount( - &self, - _account_id: Uuid, - ) -> Box + Send> { + async fn clear_uncredited_settlement_amount(&self, _account_id: Uuid) -> Result<(), ()> { unreachable!() } } @@ -321,29 +319,16 @@ pub fn mock_message(status_code: usize) -> mockito::Mock { .with_body(BODY) } -// Futures helper taken from the store_helpers in interledger-store-redis. -pub fn block_on(f: F) -> Result -where - F: Future + Send + 'static, - F::Item: Send, - F::Error: Send, -{ - // Only run one test at a time - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - runtime.block_on(f) -} - pub fn test_service( ) -> SettlementMessageService + Clone, TestAccount> { SettlementMessageService::new(incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&SERVICE_ADDRESS), } - .build())) + .build()) })) } @@ -359,21 +344,21 @@ pub fn test_api( should_fulfill: bool, ) -> warp::filters::BoxedFilter<(impl warp::Reply,)> { let outgoing = outgoing_service_fn(move |_| { - Box::new(if should_fulfill { - ok(FulfillBuilder { + if should_fulfill { + Ok(FulfillBuilder { fulfillment: &[0; 32], data: b"hello!", } .build()) } else { - err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other outgoing handler!", data: &[], triggered_by: Some(&SERVICE_ADDRESS), } .build()) - }) + } }); create_settlements_filter(test_store, outgoing) } diff --git a/crates/interledger-settlement/src/core/backends_common/redis/mod.rs b/crates/interledger-settlement/src/core/backends_common/redis/mod.rs index 172afc96e..c663486ac 100644 --- a/crates/interledger-settlement/src/core/backends_common/redis/mod.rs +++ b/crates/interledger-settlement/src/core/backends_common/redis/mod.rs @@ -4,18 +4,20 @@ use crate::core::{ types::{Convert, ConvertDetails, LeftoversStore}, }; use bytes::Bytes; -use futures::{future::result, Future}; +use futures::TryFutureExt; use http::StatusCode; use num_bigint::BigUint; use redis_crate::{ - self, aio::SharedConnection, cmd, Client, ConnectionInfo, ErrorKind, FromRedisValue, - PipelineCommands, RedisError, RedisWrite, ToRedisArgs, Value, + self, aio::MultiplexedConnection, AsyncCommands, Client, ConnectionInfo, ErrorKind, + FromRedisValue, RedisError, RedisWrite, ToRedisArgs, Value, }; -use std::collections::HashMap as SlowHashMap; +use std::collections::HashMap; use std::str::FromStr; use log::{debug, error, trace}; +use async_trait::async_trait; + #[cfg(test)] mod test_helpers; @@ -33,16 +35,22 @@ impl EngineRedisStoreBuilder { EngineRedisStoreBuilder { redis_url } } - pub fn connect(&self) -> impl Future { - result(Client::open(self.redis_url.clone())) - .map_err(|err| error!("Error creating Redis client: {:?}", err)) - .and_then(|client| { - debug!("Connected to redis: {:?}", client); - client - .get_shared_async_connection() - .map_err(|err| error!("Error connecting to Redis: {:?}", err)) - }) - .and_then(move |connection| Ok(EngineRedisStore { connection })) + pub async fn connect(&self) -> Result { + let client = match Client::open(self.redis_url.clone()) { + Ok(c) => c, + Err(err) => { + error!("Error creating Redis client: {:?}", err); + return Err(()); + } + }; + + let connection = client + .get_multiplexed_tokio_connection() + .map_err(|err| error!("Error connecting to Redis: {:?}", err)) + .await?; + debug!("Connected to redis: {:?}", client); + + Ok(EngineRedisStore { connection }) } } @@ -52,56 +60,54 @@ impl EngineRedisStoreBuilder { /// composed in the stores of other Settlement Engines. #[derive(Clone)] pub struct EngineRedisStore { - pub connection: SharedConnection, + pub connection: MultiplexedConnection, } +#[async_trait] impl IdempotentStore for EngineRedisStore { - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let idempotency_key_clone = idempotency_key.clone(); - Box::new( - cmd("HGETALL") - .arg(idempotency_key.clone()) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error loading idempotency key {}: {:?}", - idempotency_key_clone, err - ) - }) - .and_then( - move |(_connection, ret): (_, SlowHashMap)| { - if let (Some(status_code), Some(data), Some(input_hash_slice)) = ( - ret.get("status_code"), - ret.get("data"), - ret.get("input_hash"), - ) { - trace!("Loaded idempotency key {:?} - {:?}", idempotency_key, ret); - let mut input_hash: [u8; 32] = Default::default(); - input_hash.copy_from_slice(input_hash_slice.as_ref()); - Ok(Some(IdempotentData::new( - StatusCode::from_str(status_code).unwrap(), - Bytes::from(data.clone()), - input_hash, - ))) - } else { - Ok(None) - } - }, - ), - ) + let mut connection = self.connection.clone(); + let ret: HashMap = connection + .hgetall(idempotency_key.clone()) + .map_err(move |err| { + error!( + "Error loading idempotency key {}: {:?}", + idempotency_key_clone, err + ) + }) + .await?; + + if let (Some(status_code), Some(data), Some(input_hash_slice)) = ( + ret.get("status_code"), + ret.get("data"), + ret.get("input_hash"), + ) { + trace!("Loaded idempotency key {:?} - {:?}", idempotency_key, ret); + let mut input_hash: [u8; 32] = Default::default(); + input_hash.copy_from_slice(input_hash_slice.as_ref()); + Ok(Some(IdempotentData::new( + StatusCode::from_str(status_code).unwrap(), + Bytes::from(data.clone()), + input_hash, + ))) + } else { + Ok(None) + } } - fn save_idempotent_data( + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut pipe = redis_crate::pipe(); + let mut connection = self.connection.clone(); pipe.atomic() .cmd("HMSET") // cannot use hset_multiple since data and status_code have different types .arg(&idempotency_key) @@ -114,19 +120,16 @@ impl IdempotentStore for EngineRedisStore { .ignore() .expire(&idempotency_key, 86400) .ignore(); - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(|err| error!("Error caching: {:?}", err)) - .and_then(move |(_connection, _): (_, Vec)| { - trace!( - "Cached {:?}: {:?}, {:?}", - idempotency_key, - status_code, - data, - ); - Ok(()) - }), - ) + pipe.query_async(&mut connection) + .map_err(|err| error!("Error caching: {:?}", err)) + .await?; + trace!( + "Cached {:?}: {:?}, {:?}", + idempotency_key, + status_code, + data, + ); + Ok(()) } } @@ -216,151 +219,138 @@ impl FromRedisValue for AmountWithScale { } } +#[async_trait] impl LeftoversStore for EngineRedisStore { type AccountId = String; type AssetType = BigUint; - fn get_uncredited_settlement_amount( + async fn get_uncredited_settlement_amount( &self, account_id: Self::AccountId, - ) -> Box + Send> { - Box::new( - cmd("LRANGE") - .arg(uncredited_amount_key(&account_id)) - .arg(0) - .arg(-1) - .query_async(self.connection.clone()) - .map_err(move |err| error!("Error getting uncredited_settlement_amount {:?}", err)) - .and_then(move |(_, amount): (_, AmountWithScale)| Ok((amount.num, amount.scale))), - ) + ) -> Result<(Self::AssetType, u8), ()> { + let mut connection = self.connection.clone(); + let amount: AmountWithScale = connection + .lrange(uncredited_amount_key(&account_id), 0, -1) + .map_err(move |err| error!("Error getting uncredited_settlement_amount {:?}", err)) + .await?; + Ok((amount.num, amount.scale)) } - fn save_uncredited_settlement_amount( + async fn save_uncredited_settlement_amount( &self, account_id: Self::AccountId, uncredited_settlement_amount: (Self::AssetType, u8), - ) -> Box + Send> { + ) -> Result<(), ()> { trace!( "Saving uncredited_settlement_amount {:?} {:?}", account_id, uncredited_settlement_amount ); - Box::new( - // We store these amounts as lists of strings - // because we cannot do BigNumber arithmetic in the store - // When loading the amounts, we convert them to the appropriate data - // type and sum them up. - cmd("RPUSH") - .arg(uncredited_amount_key(&account_id)) - .arg(AmountWithScale { + // We store these amounts as lists of strings + // because we cannot do BigNumber arithmetic in the store + // When loading the amounts, we convert them to the appropriate data + // type and sum them up. + let mut connection = self.connection.clone(); + connection + .rpush( + uncredited_amount_key(&account_id), + AmountWithScale { num: uncredited_settlement_amount.0, scale: uncredited_settlement_amount.1, - }) - .query_async(self.connection.clone()) - .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) - .and_then(move |(_conn, _ret): (_, Value)| Ok(())), - ) + }, + ) + .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) + .await?; + + Ok(()) } - fn load_uncredited_settlement_amount( + async fn load_uncredited_settlement_amount( &self, account_id: Self::AccountId, local_scale: u8, - ) -> Box + Send> { + ) -> Result { let connection = self.connection.clone(); trace!("Loading uncredited_settlement_amount {:?}", account_id); - Box::new( - self.get_uncredited_settlement_amount(account_id.clone()) - .and_then(move |amount| { - // scale the amount from the max scale to the local scale, and then - // save any potential leftovers to the store - let (scaled_amount, precision_loss) = - scale_with_precision_loss(amount.0, local_scale, amount.1); - let mut pipe = redis_crate::pipe(); - pipe.atomic(); - pipe.del(uncredited_amount_key(&account_id)).ignore(); - pipe.rpush( - uncredited_amount_key(&account_id), - AmountWithScale { - num: precision_loss, - scale: std::cmp::max(local_scale, amount.1), - }, - ) - .ignore(); - - pipe.query_async(connection.clone()) - .map_err(move |err| { - error!("Error saving uncredited_settlement_amount: {:?}", err) - }) - .and_then(move |(_conn, _ret): (_, Value)| Ok(scaled_amount)) - }), + let amount = self + .get_uncredited_settlement_amount(account_id.clone()) + .await?; + // scale the amount from the max scale to the local scale, and then + // save any potential leftovers to the store + let (scaled_amount, precision_loss) = + scale_with_precision_loss(amount.0, local_scale, amount.1); + + let mut pipe = redis_crate::pipe(); + pipe.atomic(); + pipe.del(uncredited_amount_key(&account_id)).ignore(); + pipe.rpush( + uncredited_amount_key(&account_id), + AmountWithScale { + num: precision_loss, + scale: std::cmp::max(local_scale, amount.1), + }, ) + .ignore(); + + pipe.query_async(&mut connection.clone()) + .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) + .await?; + + Ok(scaled_amount) } - fn clear_uncredited_settlement_amount( + async fn clear_uncredited_settlement_amount( &self, account_id: Self::AccountId, - ) -> Box + Send> { + ) -> Result<(), ()> { trace!("Clearing uncredited_settlement_amount {:?}", account_id,); - Box::new( - cmd("DEL") - .arg(uncredited_amount_key(&account_id)) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!("Error clearing uncredited_settlement_amount: {:?}", err) - }) - .and_then(move |(_conn, _ret): (_, Value)| Ok(())), - ) + let mut connection = self.connection.clone(); + connection + .del(uncredited_amount_key(&account_id)) + .map_err(move |err| error!("Error clearing uncredited_settlement_amount: {:?}", err)) + .await?; + + Ok(()) } } #[cfg(test)] mod tests { use super::*; - use test_helpers::{block_on, test_store, IDEMPOTENCY_KEY}; + use test_helpers::{test_store, IDEMPOTENCY_KEY}; mod idempotency { use super::*; - #[test] - fn saves_and_loads_idempotency_key_data_properly() { - block_on(test_store().and_then(|(store, context)| { - let input_hash: [u8; 32] = Default::default(); - store - .save_idempotent_data( - IDEMPOTENCY_KEY.clone(), - input_hash, - StatusCode::OK, - Bytes::from("TEST"), - ) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .load_idempotent_data(IDEMPOTENCY_KEY.clone()) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |data1| { - assert_eq!( - data1.unwrap(), - IdempotentData::new( - StatusCode::OK, - Bytes::from("TEST"), - input_hash - ) - ); - let _ = context; - - store - .load_idempotent_data("asdf".to_string()) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |data2| { - assert!(data2.is_none()); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() + #[tokio::test] + async fn saves_and_loads_idempotency_key_data_properly() { + // The context must be loaded into scope + let (store, _context) = test_store().await.unwrap(); + let input_hash: [u8; 32] = Default::default(); + store + .save_idempotent_data( + IDEMPOTENCY_KEY.clone(), + input_hash, + StatusCode::OK, + Bytes::from("TEST"), + ) + .await + .unwrap(); + + let data1 = store + .load_idempotent_data(IDEMPOTENCY_KEY.clone()) + .await + .unwrap(); + assert_eq!( + data1.unwrap(), + IdempotentData::new(StatusCode::OK, Bytes::from("TEST"), input_hash) + ); + + let data2 = store + .load_idempotent_data("asdf".to_string()) + .await + .unwrap(); + assert!(data2.is_none()); } } } diff --git a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/mod.rs b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/mod.rs index df4750134..473ea26d8 100644 --- a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/mod.rs +++ b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/mod.rs @@ -3,4 +3,4 @@ mod redis_helpers; #[cfg(test)] mod store_helpers; #[cfg(test)] -pub use store_helpers::{block_on, test_store, IDEMPOTENCY_KEY}; +pub use store_helpers::{test_store, IDEMPOTENCY_KEY}; diff --git a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/redis_helpers.rs b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/redis_helpers.rs index 44ae632ef..4bdb63bba 100644 --- a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/redis_helpers.rs +++ b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/redis_helpers.rs @@ -1,7 +1,7 @@ // Copied from https://github.com/mitsuhiko/redis-rs/blob/9a1777e8a90c82c315a481cdf66beb7d69e681a2/tests/support/mod.rs #![allow(dead_code)] -use futures::Future; +use futures::TryFutureExt; use redis_crate::{self as redis, RedisError}; use std::{env, fs, path::PathBuf, process, thread::sleep, time::Duration}; @@ -155,19 +155,20 @@ impl TestContext { self.client.get_connection().unwrap() } - pub fn async_connection(&self) -> impl Future { + pub async fn async_connection(&self) -> Result { self.client .get_async_connection() .map_err(|err| panic!(err)) + .await } pub fn stop_server(&mut self) { self.server.stop(); } - pub fn shared_async_connection( + pub async fn shared_async_connection( &self, - ) -> impl Future { - self.client.get_shared_async_connection() + ) -> Result { + self.client.get_multiplexed_tokio_connection().await } } diff --git a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/store_helpers.rs b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/store_helpers.rs index f4305d907..ba3d96ebe 100644 --- a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/store_helpers.rs +++ b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/store_helpers.rs @@ -1,31 +1,16 @@ use super::redis_helpers::TestContext; use crate::core::backends_common::redis::{EngineRedisStore, EngineRedisStoreBuilder}; -use env_logger; -use futures::Future; -use tokio::runtime::Runtime; - use lazy_static::lazy_static; lazy_static! { pub static ref IDEMPOTENCY_KEY: String = String::from("abcd"); } -pub fn test_store() -> impl Future { +pub async fn test_store() -> Result<(EngineRedisStore, TestContext), ()> { let context = TestContext::new(); - EngineRedisStoreBuilder::new(context.get_client_connection_info()) + let store = EngineRedisStoreBuilder::new(context.get_client_connection_info()) .connect() - .and_then(|store| Ok((store, context))) -} - -pub fn block_on(f: F) -> Result -where - F: Future + Send + 'static, - F::Item: Send, - F::Error: Send, -{ - // Only run one test at a time - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - runtime.block_on(f) + .await?; + Ok((store, context)) } diff --git a/crates/interledger-settlement/src/core/engines_api.rs b/crates/interledger-settlement/src/core/engines_api.rs index f6e57db73..b03838f9e 100644 --- a/crates/interledger-settlement/src/core/engines_api.rs +++ b/crates/interledger-settlement/src/core/engines_api.rs @@ -7,22 +7,138 @@ use super::{ idempotency::{make_idempotent_call, IdempotentStore}, types::{Quantity, SettlementEngine}, }; -use bytes::buf::FromBuf; use bytes::Bytes; -use futures::Future; use http::StatusCode; use hyper::Response; use interledger_http::error::default_rejection_handler; - use serde::{Deserialize, Serialize}; - -use warp::{self, reject::Rejection, Filter}; +use warp::Filter; #[derive(Serialize, Deserialize, Debug, Clone, Hash)] pub struct CreateAccount { id: String, } +async fn create_engine_account( + idempotency_key: Option, + account_id: CreateAccount, + engine: E, + store: S, +) -> Result +where + E: SettlementEngine + Clone + Send + Sync + 'static, + S: IdempotentStore + Clone + Send + Sync + 'static, +{ + let input_hash = get_hash_of(account_id.id.as_ref()); + let (status_code, message) = make_idempotent_call( + store, + || engine.create_account(account_id.id), + input_hash, + idempotency_key, + StatusCode::CREATED, + Bytes::from("CREATED"), + ) + .await?; + + Ok(Response::builder() + .header("Content-Type", "application/json") + .status(status_code) + // Convert Bytes 0.4 to 0.5 + .body(message.to_vec()) + .unwrap()) +} + +async fn delete_engine_account( + account_id: String, + idempotency_key: Option, + engine: E, + store: S, +) -> Result +where + E: SettlementEngine + Clone + Send + Sync + 'static, + S: IdempotentStore + Clone + Send + Sync + 'static, +{ + let input_hash = get_hash_of(account_id.as_ref()); + let (status_code, message) = make_idempotent_call( + store, + || engine.delete_account(account_id), + input_hash, + idempotency_key, + StatusCode::NO_CONTENT, + Bytes::from("DELETED"), + ) + .await?; + + Ok(Response::builder() + .header("Content-Type", "application/json") + .status(status_code) + // Convert Bytes 0.4 to 0.5 + .body(message.to_vec()) + .unwrap()) +} + +async fn engine_send_money( + id: String, + idempotency_key: Option, + quantity: Quantity, + engine: E, + store: S, +) -> Result +where + E: SettlementEngine + Clone + Send + Sync + 'static, + S: IdempotentStore + Clone + Send + Sync + 'static, +{ + let input = format!("{}{:?}", id, quantity); + let input_hash = get_hash_of(input.as_ref()); + let (status_code, message) = make_idempotent_call( + store, + || engine.send_money(id, quantity), + input_hash, + idempotency_key, + StatusCode::CREATED, + Bytes::from("EXECUTED"), + ) + .await?; + + Ok(Response::builder() + .header("Content-Type", "application/json") + .status(status_code) + // Convert Bytes 0.4 to 0.5 + .body(message.to_vec()) + .unwrap()) +} + +async fn engine_receive_message( + id: String, + idempotency_key: Option, + message: bytes05::Bytes, + engine: E, + store: S, +) -> Result +where + E: SettlementEngine + Clone + Send + Sync + 'static, + S: IdempotentStore + Clone + Send + Sync + 'static, +{ + let input = format!("{}{:?}", id, message); + let input_hash = get_hash_of(input.as_ref()); + let (status_code, message) = make_idempotent_call( + store, + || engine.receive_message(id, message.to_vec()), + input_hash, + idempotency_key, + StatusCode::CREATED, + Bytes::from("RECEIVED"), + ) + .await?; + + Ok(Response::builder() + .header("Content-Type", "application/json") + .status(status_code) + // Convert Bytes 0.4 to 0.5 + .body(message.to_vec()) + .unwrap()) +} + /// Returns a Settlement Engine filter which exposes a Warp-compatible /// idempotent API which forwards calls to the provided settlement engine which /// uses the underlying store for idempotency. @@ -37,161 +153,51 @@ where let with_store = warp::any().map(move || store.clone()).boxed(); let with_engine = warp::any().map(move || engine.clone()).boxed(); let idempotency = warp::header::optional::("idempotency-key"); - let account_id = warp::path("accounts").and(warp::path::param2::()); // account_id + let account_id = warp::path("accounts").and(warp::path::param::()); // account_id // POST /accounts/ (optional idempotency-key header) // Body is a Vec object - let accounts = warp::post2() + let accounts = warp::post() .and(warp::path("accounts")) .and(warp::path::end()) .and(idempotency) .and(warp::body::json()) .and(with_engine.clone()) .and(with_store.clone()) - .and_then( - move |idempotency_key: Option, - account_id: CreateAccount, - engine: E, - store: S| { - let account_id = account_id.id; - let input_hash = get_hash_of(account_id.as_ref()); - - // Wrap do_send_outgoing_message in a closure to be invoked by - // the idempotency wrapper - let create_account_fn = move || engine.create_account(account_id); - make_idempotent_call( - store, - create_account_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - Bytes::from("CREATED"), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .header("Content-Type", "application/json") - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); - - // DELETE /accounts/:id (optional idempotency-key header) - let del_account = warp::delete2() + .and_then(create_engine_account); + + // // DELETE /accounts/:id (optional idempotency-key header) + let del_account = warp::delete() .and(account_id) .and(warp::path::end()) .and(idempotency) .and(with_engine.clone()) .and(with_store.clone()) - .and_then( - move |id: String, idempotency_key: Option, engine: E, store: S| { - let input_hash = get_hash_of(id.as_ref()); - - // Wrap do_send_outgoing_message in a closure to be invoked by - // the idempotency wrapper - let delete_account_fn = move || engine.delete_account(id); - make_idempotent_call( - store, - delete_account_fn, - input_hash, - idempotency_key, - StatusCode::NO_CONTENT, - Bytes::from("DELETED"), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .header("Content-Type", "application/json") - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); - - // POST /accounts/:account_id/settlements (optional idempotency-key header) + .and_then(delete_engine_account); + + // POST /accounts/:aVcount_id/settlements (optional idempotency-key header) // Body is a Quantity object let settlement_endpoint = account_id.and(warp::path("settlements")); - let settlements = warp::post2() + let settlements = warp::post() .and(settlement_endpoint) .and(warp::path::end()) .and(idempotency) .and(warp::body::json()) .and(with_engine.clone()) .and(with_store.clone()) - .and_then( - move |id: String, - idempotency_key: Option, - quantity: Quantity, - engine: E, - store: S| { - let input = format!("{}{:?}", id, quantity); - let input_hash = get_hash_of(input.as_ref()); - let send_money_fn = move || engine.send_money(id, quantity); - make_idempotent_call( - store, - send_money_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - Bytes::from("EXECUTED"), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .header("Content-Type", "application/json") - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); + .and_then(engine_send_money); // POST /accounts/:account_id/messages (optional idempotency-key header) // Body is a Vec object let messages_endpoint = account_id.and(warp::path("messages")); - let messages = warp::post2() + let messages = warp::post() .and(messages_endpoint) .and(warp::path::end()) .and(idempotency) - .and(warp::body::concat()) - .and(with_engine.clone()) - .and(with_store.clone()) - .and_then( - move |id: String, - idempotency_key: Option, - body: warp::body::FullBody, - engine: E, - store: S| { - // Gets called by our settlement engine, forwards the request outwards - // until it reaches the peer's settlement engine. - let message = Vec::from_buf(body); - let input = format!("{}{:?}", id, message); - let input_hash = get_hash_of(input.as_ref()); - - // Wrap do_send_outgoing_message in a closure to be invoked by - // the idempotency wrapper - let receive_message_fn = move || engine.receive_message(id, message); - make_idempotent_call( - store, - receive_message_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - Bytes::from("RECEIVED"), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .header("Content-Type", "application/json") - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); + .and(warp::body::bytes()) + .and(with_engine) + .and(with_store) + .and_then(engine_receive_message); accounts .or(del_account) @@ -205,17 +211,20 @@ where mod tests { use super::*; use crate::core::idempotency::IdempotentData; - use crate::core::types::ApiResponse; + use crate::core::types::{ApiResponse, ApiResult}; + use async_trait::async_trait; use bytes::Bytes; - use futures::future::ok; use http::StatusCode; - use interledger_http::error::ApiError; use parking_lot::RwLock; use serde_json::{json, Value}; use std::collections::HashMap; use std::sync::Arc; - fn check_error_status_and_message(response: Response, status_code: u16, message: &str) { + fn check_error_status_and_message( + response: Response, + status_code: u16, + message: &str, + ) { let err: Value = serde_json::from_slice(response.body()).unwrap(); assert_eq!(response.status().as_u16(), status_code); assert_eq!(err.get("status").unwrap(), status_code); @@ -242,78 +251,66 @@ mod tests { } } + #[async_trait] impl IdempotentStore for TestStore { - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let cache = self.cache.read(); if let Some(data) = cache.get(&idempotency_key) { let mut guard = self.cache_hits.write(); *guard += 1; // used to test how many times this branch gets executed - Box::new(ok(Some(data.clone()))) + Ok(Some(data.clone())) } else { - Box::new(ok(None)) + Ok(None) } } - fn save_idempotent_data( + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut cache = self.cache.write(); cache.insert( idempotency_key, IdempotentData::new(status_code, data, input_hash), ); - Box::new(ok(())) + Ok(()) } } pub static IDEMPOTENCY: &str = "abcd01234"; + #[async_trait] impl SettlementEngine for TestEngine { - fn send_money( - &self, - _account_id: String, - _money: Quantity, - ) -> Box + Send> { - Box::new(ok(ApiResponse::Default)) + async fn send_money(&self, _account_id: String, _money: Quantity) -> ApiResult { + Ok(ApiResponse::Default) } - fn receive_message( - &self, - _account_id: String, - _message: Vec, - ) -> Box + Send> { - Box::new(ok(ApiResponse::Default)) + async fn receive_message(&self, _account_id: String, _message: Vec) -> ApiResult { + Ok(ApiResponse::Default) } - fn create_account( - &self, - _account_id: String, - ) -> Box + Send> { - Box::new(ok(ApiResponse::Default)) + async fn create_account(&self, _account_id: String) -> ApiResult { + Ok(ApiResponse::Default) } - fn delete_account( - &self, - _account_id: String, - ) -> Box + Send> { - Box::new(ok(ApiResponse::Default)) + async fn delete_account(&self, _account_id: String) -> ApiResult { + Ok(ApiResponse::Default) } } - #[test] - fn idempotent_execute_settlement() { + #[tokio::test] + async fn idempotent_execute_settlement() { let store = test_store(); let engine = TestEngine; let api = create_settlement_engine_filter(engine, store.clone()); - let settlement_call = move |id, amount, scale| { + let settlement_call = |id, amount, scale| { warp::test::request() .method("POST") .path(&format!("/accounts/{}/settlements", id)) @@ -322,25 +319,25 @@ mod tests { .reply(&api) }; - let ret = settlement_call("1".to_owned(), 100, 6); + let ret = settlement_call("1".to_owned(), 100, 6).await; assert_eq!(ret.status(), StatusCode::CREATED); assert_eq!(ret.body(), "EXECUTED"); // is idempotent - let ret = settlement_call("1".to_owned(), 100, 6); + let ret = settlement_call("1".to_owned(), 100, 6).await; assert_eq!(ret.status(), StatusCode::CREATED); assert_eq!(ret.body(), "EXECUTED"); // // fails with different id and same data - let ret = settlement_call("42".to_owned(), 100, 6); + let ret = settlement_call("42".to_owned(), 100, 6).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); // fails with same id and different data - let ret = settlement_call("1".to_owned(), 42, 6); + let ret = settlement_call("1".to_owned(), 42, 6).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); // fails with different id and different data - let ret = settlement_call("42".to_owned(), 42, 6); + let ret = settlement_call("42".to_owned(), 42, 6).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); let cache = store.cache.read(); @@ -352,13 +349,13 @@ mod tests { assert_eq!(cached_data.body, "EXECUTED".to_string()); } - #[test] - fn idempotent_receive_message() { + #[tokio::test] + async fn idempotent_receive_message() { let store = test_store(); let engine = TestEngine; let api = create_settlement_engine_filter(engine, store.clone()); - let messages_call = move |id, msg| { + let messages_call = |id, msg| { warp::test::request() .method("POST") .path(&format!("/accounts/{}/messages", id)) @@ -367,25 +364,25 @@ mod tests { .reply(&api) }; - let ret = messages_call("1", vec![0]); + let ret = messages_call("1", vec![0]).await; assert_eq!(ret.status().as_u16(), StatusCode::CREATED); assert_eq!(ret.body(), "RECEIVED"); // is idempotent - let ret = messages_call("1", vec![0]); + let ret = messages_call("1", vec![0]).await; assert_eq!(ret.status().as_u16(), StatusCode::CREATED); assert_eq!(ret.body(), "RECEIVED"); // // fails with different id and same data - let ret = messages_call("42", vec![0]); + let ret = messages_call("42", vec![0]).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); // fails with same id and different data - let ret = messages_call("1", vec![42]); + let ret = messages_call("1", vec![42]).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); // fails with different id and different data - let ret = messages_call("42", vec![42]); + let ret = messages_call("42", vec![42]).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); let cache = store.cache.read(); @@ -397,13 +394,13 @@ mod tests { assert_eq!(cached_data.body, "RECEIVED".to_string()); } - #[test] - fn idempotent_create_account() { + #[tokio::test] + async fn idempotent_create_account() { let store = test_store(); let engine = TestEngine; let api = create_settlement_engine_filter(engine, store.clone()); - let create_account_call = move |id: &str| { + let create_account_call = |id: &str| { warp::test::request() .method("POST") .path("/accounts") @@ -412,17 +409,17 @@ mod tests { .reply(&api) }; - let ret = create_account_call("1"); + let ret = create_account_call("1").await; assert_eq!(ret.status().as_u16(), StatusCode::CREATED); assert_eq!(ret.body(), "CREATED"); // is idempotent - let ret = create_account_call("1"); + let ret = create_account_call("1").await; assert_eq!(ret.status().as_u16(), StatusCode::CREATED); assert_eq!(ret.body(), "CREATED"); // fails with different id - let ret = create_account_call("42"); + let ret = create_account_call("42").await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); let cache = store.cache.read(); @@ -434,13 +431,13 @@ mod tests { assert_eq!(cached_data.body, "CREATED".to_string()); } - #[test] - fn idempotent_delete_account() { + #[tokio::test] + async fn idempotent_delete_account() { let store = test_store(); let engine = TestEngine; let api = create_settlement_engine_filter(engine, store.clone()); - let delete_account_call = move |id: &str| { + let delete_account_call = |id: &str| { warp::test::request() .method("DELETE") .path(&format!("/accounts/{}", id)) @@ -448,17 +445,17 @@ mod tests { .reply(&api) }; - let ret = delete_account_call("1"); + let ret = delete_account_call("1").await; assert_eq!(ret.status(), StatusCode::NO_CONTENT); assert_eq!(ret.body(), "DELETED"); // is idempotent - let ret = delete_account_call("1"); + let ret = delete_account_call("1").await; assert_eq!(ret.status(), StatusCode::NO_CONTENT); assert_eq!(ret.body(), "DELETED"); // fails with different id - let ret = delete_account_call("42"); + let ret = delete_account_call("42").await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); let cache = store.cache.read(); diff --git a/crates/interledger-settlement/src/core/idempotency.rs b/crates/interledger-settlement/src/core/idempotency.rs index f79972577..909e9fe9f 100644 --- a/crates/interledger-settlement/src/core/idempotency.rs +++ b/crates/interledger-settlement/src/core/idempotency.rs @@ -1,10 +1,8 @@ -use super::types::ApiResponse; +use super::types::{ApiResponse, ApiResult}; +use async_trait::async_trait; use bytes::Bytes; -use futures::executor::spawn; -use futures::{ - future::{err, ok, Either}, - Future, -}; +use futures::Future; +use futures::TryFutureExt; use http::StatusCode; use interledger_http::error::*; use log::error; @@ -26,64 +24,65 @@ impl IdempotentData { } } +#[async_trait] pub trait IdempotentStore { /// Returns the API response that was saved when the idempotency key was used /// Also returns a hash of the input data which resulted in the response - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send>; + ) -> Result, ()>; /// Saves the data that was passed along with the api request for later /// The store MUST also save a hash of the input, so that it errors out on requests - fn save_idempotent_data( + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send>; + ) -> Result<(), ()>; } // Helper function that returns any idempotent data that corresponds to a // provided idempotency key. It fails if the hash of the input that // generated the idempotent data does not match the hash of the provided input. -fn check_idempotency( +async fn check_idempotency( store: S, idempotency_key: String, input_hash: [u8; 32], -) -> impl Future, Error = ApiError> +) -> Result, ApiError> where S: IdempotentStore + Clone + Send + Sync + 'static, { - store + let ret: Option = store .load_idempotent_data(idempotency_key.clone()) .map_err(move |_| IDEMPOTENT_STORE_CALL_ERROR.clone()) - .and_then(move |ret: Option| { - if let Some(ret) = ret { - // Check if the hash (ret.2) of the loaded idempotent data matches the hash - // of the provided input data. If not, we should error out since - // the caller provided an idempotency key that was used for a - // different input. - if ret.input_hash == input_hash { - Ok(Some((ret.status, ret.body))) - } else { - Ok(Some(( - StatusCode::from_u16(409).unwrap(), - Bytes::from(IDEMPOTENCY_CONFLICT_ERR), - ))) - } - } else { - Ok(None) - } - }) + .await?; + + if let Some(ret) = ret { + // Check if the hash (ret.2) of the loaded idempotent data matches the hash + // of the provided input data. If not, we should error out since + // the caller provided an idempotency key that was used for a + // different input. + if ret.input_hash == input_hash { + Ok(Some((ret.status, ret.body))) + } else { + Ok(Some(( + StatusCode::from_u16(409).unwrap(), + Bytes::from(IDEMPOTENCY_CONFLICT_ERR), + ))) + } + } else { + Ok(None) + } } // make_idempotent_call takes a function instead of direct arguments so that we // can reuse it for both the messages and the settlements calls -pub fn make_idempotent_call( +pub async fn make_idempotent_call( store: S, - non_idempotent_function: F, + non_idempotent_function: impl FnOnce() -> F, input_hash: [u8; 32], idempotency_key: Option, // As per the spec, the success status code is independent of the @@ -91,88 +90,78 @@ pub fn make_idempotent_call( status_code: StatusCode, // The default value is used when the engine returns a default return type default_return_value: Bytes, -) -> impl Future +) -> Result<(StatusCode, Bytes), ApiError> where - F: FnOnce() -> Box + Send>, + F: Future, S: IdempotentStore + Clone + Send + Sync + 'static, { if let Some(idempotency_key) = idempotency_key { // If there an idempotency key was provided, check idempotency - // and the key was not present or conflicting with an existing - // key, perform the call and save the idempotent return data - Either::A( - check_idempotency(store.clone(), idempotency_key.clone(), input_hash).and_then( - move |ret: Option<(StatusCode, Bytes)>| { - if let Some(ret) = ret { - if ret.0.is_success() { - Either::A(Either::A(ok((ret.0, ret.1)))) - } else { - let err_msg = ApiErrorType { - r#type: &ProblemType::Default, - status: ret.0, - title: "Idempotency Error", - }; - // if check_idempotency returns an error, then it - // has to be an idempotency error - let ret_error = ApiError::from_api_error_type(&err_msg) - .detail(String::from_utf8_lossy(&ret.1).to_string()); - Either::A(Either::B(err(ret_error))) + match check_idempotency(store.clone(), idempotency_key.clone(), input_hash).await? { + Some(ret) => { + if ret.0.is_success() { + // Return an OK response if the idempotent call was successful + Ok((ret.0, ret.1)) + } else { + // Return an HTTP Error otherwise + let err_msg = ApiErrorType { + r#type: &ProblemType::Default, + status: ret.0, + title: "Idempotency Error", + }; + // if check_idempotency returns an error, then it + // has to be an idempotency error + let ret_error = ApiError::from_api_error_type(&err_msg) + .detail(String::from_utf8_lossy(&ret.1).to_string()); + Err(ret_error) + } + } + None => { + // If there was no previous entry, make the idempotent call and save it + // Note: The error is also saved idempotently + let ret = match non_idempotent_function().await { + Ok(r) => r, + Err(ret) => { + let status_code = ret.status; + let data = Bytes::from(ret.detail.clone().unwrap_or_default()); + if store + .save_idempotent_data(idempotency_key, input_hash, status_code, data) + .await + .is_err() + { + // Should we be panicking here instead? + error!("Failed to connect to the store! The request will not be idempotent if retried.") } - } else { - Either::B( - non_idempotent_function().map_err({ - let store = store.clone(); - let idempotency_key = idempotency_key.clone(); - move |ret: ApiError| { - let status_code = ret.status; - let data = Bytes::from(ret.detail.clone().unwrap_or_default()); - spawn(store.save_idempotent_data( - idempotency_key, - input_hash, - status_code, - data, - ).map_err(move |_| error!("Failed to connect to the store! The request will not be idempotent if retried."))); - ret - }}) - .map(move |ret| { - let data = match ret { - ApiResponse::Default => default_return_value, - ApiResponse::Data(d) => d, - }; - (status_code, data) - }).and_then( - move |ret: (StatusCode, Bytes)| { - store - .save_idempotent_data( - idempotency_key, - input_hash, - ret.0, - ret.1.clone(), - ) - .map_err(move |_| { - error!("Failed to connect to the store! The request will not be idempotent if retried."); - IDEMPOTENT_STORE_CALL_ERROR.clone() - }) - .and_then(move |_| Ok((ret.0, ret.1))) - }, - ), - ) + return Err(ret); } - }, - ), - ) + }; + + // NOTE: This is bytes 0.4.12. API Response is defined over it. + let data = match ret { + ApiResponse::Default => default_return_value, + ApiResponse::Data(d) => d, + }; + store + .save_idempotent_data( + idempotency_key, + input_hash, + status_code, + data.clone(), + ) + .map_err(move |_| { + error!("Failed to connect to the store! The request will not be idempotent if retried."); + IDEMPOTENT_STORE_CALL_ERROR.clone() + }).await?; + + Ok((status_code, data)) + } + } } else { // otherwise just make the call w/o any idempotency saves - Either::B( - non_idempotent_function() - .map(move |ret| { - let data = match ret { - ApiResponse::Default => default_return_value, - ApiResponse::Data(d) => d, - }; - (status_code, data) - }) - .and_then(move |ret: (StatusCode, Bytes)| Ok((ret.0, ret.1))), - ) + let data = match non_idempotent_function().await? { + ApiResponse::Default => default_return_value, + ApiResponse::Data(d) => d, + }; + Ok((status_code, data)) } } diff --git a/crates/interledger-settlement/src/core/types.rs b/crates/interledger-settlement/src/core/types.rs index 4168a0805..240edda7b 100644 --- a/crates/interledger-settlement/src/core/types.rs +++ b/crates/interledger-settlement/src/core/types.rs @@ -1,5 +1,5 @@ +use async_trait::async_trait; use bytes::Bytes; -use futures::Future; use http::StatusCode; use interledger_http::error::{ApiError, ApiErrorType, ProblemType}; use interledger_packet::Address; @@ -51,30 +51,19 @@ pub enum ApiResponse { Data(Bytes), } +pub type ApiResult = Result; + /// Trait consumed by the Settlement Engine HTTP API. Every settlement engine /// MUST implement this trait, so that it can be then be exposed over the API. +#[async_trait] pub trait SettlementEngine { - fn create_account( - &self, - account_id: String, - ) -> Box + Send>; + async fn create_account(&self, account_id: String) -> ApiResult; - fn delete_account( - &self, - account_id: String, - ) -> Box + Send>; + async fn delete_account(&self, account_id: String) -> ApiResult; - fn send_money( - &self, - account_id: String, - money: Quantity, - ) -> Box + Send>; + async fn send_money(&self, account_id: String, money: Quantity) -> ApiResult; - fn receive_message( - &self, - account_id: String, - message: Vec, - ) -> Box + Send>; + async fn receive_message(&self, account_id: String, message: Vec) -> ApiResult; } // TODO: Since we still haven't finalized all the settlement details, we might @@ -92,56 +81,54 @@ pub trait SettlementAccount: Account { } } +#[async_trait] pub trait SettlementStore { type Account: Account; - fn update_balance_for_incoming_settlement( + async fn update_balance_for_incoming_settlement( &self, account_id: Uuid, amount: u64, idempotency_key: Option, - ) -> Box + Send>; + ) -> Result<(), ()>; - fn refund_settlement( - &self, - account_id: Uuid, - settle_amount: u64, - ) -> Box + Send>; + async fn refund_settlement(&self, account_id: Uuid, settle_amount: u64) -> Result<(), ()>; } +#[async_trait] pub trait LeftoversStore { type AccountId: ToString; type AssetType: ToString; /// Saves the leftover data - fn save_uncredited_settlement_amount( + async fn save_uncredited_settlement_amount( &self, // The account id that for which there was a precision loss account_id: Self::AccountId, // The amount for which precision loss occurred, along with their scale uncredited_settlement_amount: (Self::AssetType, u8), - ) -> Box + Send>; + ) -> Result<(), ()>; /// Returns the leftover data scaled to `local_scale` from the saved scale. /// If any precision loss occurs during the scaling, it should be saved as /// the new leftover value. - fn load_uncredited_settlement_amount( + async fn load_uncredited_settlement_amount( &self, account_id: Self::AccountId, local_scale: u8, - ) -> Box + Send>; + ) -> Result; /// Clears any uncredited settlement amount associated with the account - fn clear_uncredited_settlement_amount( + async fn clear_uncredited_settlement_amount( &self, account_id: Self::AccountId, - ) -> Box + Send>; + ) -> Result<(), ()>; // Gets the current amount of leftovers in the store - fn get_uncredited_settlement_amount( + async fn get_uncredited_settlement_amount( &self, account_id: Self::AccountId, - ) -> Box + Send>; + ) -> Result<(Self::AssetType, u8), ()>; } #[derive(Debug)] @@ -234,12 +221,9 @@ mod tests { fn u64_test() { // overflows let huge_number = std::u64::MAX / 10; - assert_eq!( - huge_number - .normalize_scale(ConvertDetails { from: 1, to: 18 }) - .unwrap_err(), - (), - ); + assert!(huge_number + .normalize_scale(ConvertDetails { from: 1, to: 18 }) + .is_err(),); // 1 unit with scale 1, is 1 unit with scale 1 assert_eq!( 1u64.normalize_scale(ConvertDetails { from: 1, to: 1 }) @@ -310,15 +294,12 @@ mod tests { #[test] fn f64_test() { // overflow - assert_eq!( - std::f64::MAX - .normalize_scale(ConvertDetails { - from: 1, - to: std::u8::MAX, - }) - .unwrap_err(), - () - ); + assert!(std::f64::MAX + .normalize_scale(ConvertDetails { + from: 1, + to: std::u8::MAX, + }) + .is_err(),); // 1 unit with base 1, is 1 unit with base 1 assert_eq!( diff --git a/crates/interledger-spsp/Cargo.toml b/crates/interledger-spsp/Cargo.toml index c810b46e3..69d83547d 100644 --- a/crates/interledger-spsp/Cargo.toml +++ b/crates/interledger-spsp/Cargo.toml @@ -11,12 +11,15 @@ repository = "https://github.com/interledger-rs/interledger-rs" base64 = { version = "0.10.1", default-features = false } bytes = { version = "0.4.12", default-features = false } failure = { version = "0.1.5", default-features = false } -futures = { version = "0.1.29", default-features = false } -hyper = { version = "0.12.35", default-features = false } +futures = { version = "0.3.1", default-features = false } +hyper = { version = "0.13.1", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", features = ["serde"], default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } interledger-stream = { path = "../interledger-stream", version = "^0.4.0", default-features = false } log = { version = "0.4.8", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10", default-features = false, features = ["default-tls", "json"] } serde = { version = "1.0.101", default-features = false } serde_json = { version = "1.0.41", default-features = false } + +[dev-dependencies] +tokio = { version = "0.2.8", features = ["macros"] } diff --git a/crates/interledger-spsp/src/client.rs b/crates/interledger-spsp/src/client.rs index 0eaf396b3..4b26b8df6 100644 --- a/crates/interledger-spsp/src/client.rs +++ b/crates/interledger-spsp/src/client.rs @@ -1,66 +1,65 @@ use super::{Error, SpspResponse}; -use futures::{future::result, Future}; +use futures::TryFutureExt; use interledger_packet::Address; use interledger_service::{Account, IncomingService}; use interledger_stream::{send_money, StreamDelivery}; use log::{debug, error, trace}; -use reqwest::r#async::Client; +use reqwest::Client; use std::convert::TryFrom; -pub fn query(server: &str) -> impl Future { +pub async fn query(server: &str) -> Result { let server = payment_pointer_to_url(server); trace!("Querying receiver: {}", server); let client = Client::new(); - client + let res = client .get(&server) .header("Accept", "application/spsp4+json") .send() .map_err(|err| Error::HttpError(format!("Error querying SPSP receiver: {:?}", err))) - .and_then(|res| { - res.error_for_status() - .map_err(|err| Error::HttpError(format!("Error querying SPSP receiver: {:?}", err))) - }) - .and_then(|mut res| { - res.json::() - .map_err(|err| Error::InvalidSpspServerResponseError(format!("{:?}", err))) - }) + .await?; + + let res = res + .error_for_status() + .map_err(|err| Error::HttpError(format!("Error querying SPSP receiver: {:?}", err)))?; + + res.json::() + .map_err(|err| Error::InvalidSpspServerResponseError(format!("{:?}", err))) + .await } /// Query the details of the given Payment Pointer and send a payment using the STREAM protocol. /// /// This returns the amount delivered, as reported by the receiver and in the receiver's asset's units. -pub fn pay( +pub async fn pay( service: S, from_account: A, receiver: &str, source_amount: u64, -) -> impl Future +) -> Result where S: IncomingService + Clone, A: Account, { - query(receiver).and_then(move |spsp| { - let shared_secret = spsp.shared_secret; - let dest = spsp.destination_account; - result(Address::try_from(dest).map_err(move |err| { - error!("Error parsing address"); - Error::InvalidSpspServerResponseError(err.to_string()) - })) - .and_then(move |addr| { - debug!("Sending SPSP payment to address: {}", addr); + let spsp = query(receiver).await?; + let shared_secret = spsp.shared_secret; + let dest = spsp.destination_account; + let addr = Address::try_from(dest).map_err(move |err| { + error!("Error parsing address"); + Error::InvalidSpspServerResponseError(err.to_string()) + })?; + debug!("Sending SPSP payment to address: {}", addr); + + let (receipt, _plugin) = + send_money(service, &from_account, addr, &shared_secret, source_amount) + .map_err(move |err| { + error!("Error sending payment: {:?}", err); + Error::SendMoneyError(source_amount) + }) + .await?; - send_money(service, &from_account, addr, &shared_secret, source_amount) - .map(move |(receipt, _plugin)| { - debug!("Sent SPSP payment. StreamDelivery: {:?}", receipt); - receipt - }) - .map_err(move |err| { - error!("Error sending payment: {:?}", err); - Error::SendMoneyError(source_amount) - }) - }) - }) + debug!("Sent SPSP payment. StreamDelivery: {:?}", receipt); + Ok(receipt) } fn payment_pointer_to_url(payment_pointer: &str) -> String { diff --git a/crates/interledger-spsp/src/server.rs b/crates/interledger-spsp/src/server.rs index 65541ceae..06f62d9b9 100644 --- a/crates/interledger-spsp/src/server.rs +++ b/crates/interledger-spsp/src/server.rs @@ -1,12 +1,14 @@ use super::SpspResponse; use bytes::Bytes; -use futures::future::{ok, FutureResult, IntoFuture}; use hyper::{service::Service as HttpService, Body, Error, Request, Response}; use interledger_packet::Address; use interledger_stream::ConnectionGenerator; use log::debug; use std::error::Error as StdError; -use std::{fmt, str}; +use std::{ + fmt, str, + task::{Context, Poll}, +}; /// A Hyper::Service that responds to incoming SPSP Query requests with newly generated /// details for a STREAM connection. @@ -47,24 +49,17 @@ impl SpspResponder { } } -impl HttpService for SpspResponder { - type ReqBody = Body; - type ResBody = Body; +impl HttpService> for SpspResponder { + type Response = Response; type Error = Error; - type Future = FutureResult, Error>; + type Future = futures::future::Ready>; - fn call(&mut self, _request: Request) -> Self::Future { - ok(self.generate_http_response()) + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Ok(()).into() } -} - -impl IntoFuture for SpspResponder { - type Item = Self; - type Error = Never; - type Future = FutureResult; - fn into_future(self) -> Self::Future { - ok(self) + fn call(&mut self, _request: Request) -> Self::Future { + futures::future::ok(self.generate_http_response()) } } @@ -87,11 +82,10 @@ impl StdError for Never { #[cfg(test)] mod spsp_server_test { use super::*; - use futures::Future; use std::str::FromStr; - #[test] - fn spsp_response_headers() { + #[tokio::test] + async fn spsp_response_headers() { let addr = Address::from_str("example.receiver").unwrap(); let mut responder = SpspResponder::new(addr, Bytes::from(&[0; 32][..])); let response = responder @@ -103,7 +97,7 @@ mod spsp_server_test { .body(Body::empty()) .unwrap(), ) - .wait() + .await .unwrap(); assert_eq!( response.headers().get("Content-Type").unwrap(), diff --git a/crates/interledger-store/Cargo.toml b/crates/interledger-store/Cargo.toml index 083f1d0ff..a147d1a99 100644 --- a/crates/interledger-store/Cargo.toml +++ b/crates/interledger-store/Cargo.toml @@ -22,7 +22,8 @@ required-features = ["redis"] [dependencies] bytes = { version = "0.4.12", default-features = false } -futures = { version = "0.1.29", default-features = false } +futures01 = { version = "0.1.29", default-features = false } +futures = { version = "0.3", default-features = false, features = ["compat"] } interledger-api = { path = "../interledger-api", version = "^0.3.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-btp = { path = "../interledger-btp", version = "^0.4.0", default-features = false } @@ -39,21 +40,22 @@ parking_lot = { version = "0.9.0", default-features = false } ring = { version = "0.16.9", default-features = false } serde = { version = "1.0.101", default-features = false, features = ["derive"] } serde_json = { version = "1.0.41", default-features = false } -tokio-executor = { version = "0.1.8", default-features = false } -tokio-timer = { version = "0.2.11", default-features = false } +tokio = { version = "0.2.6", default-features = false, features = ["macros", "rt-core"] } url = { version = "2.1.0", default-features = false, features = ["serde"] } -http = { version = "0.1.18", default-features = false } +http = { version = "0.2", default-features = false } secrecy = { version = "0.5.1", default-features = false, features = ["serde", "bytes"] } zeroize = { version = "1.0.0", default-features = false, features = ["bytes"] } num-bigint = { version = "0.2.3", default-features = false, features = ["std"]} uuid = { version = "0.8.1", default-features = false, features = ["serde"] } # redis feature -redis_crate = { package = "redis", version = "0.13.0", default-features = false, features = ["executor"], optional = true } +# redis_crate = { package = "redis", version = "0.13.0", default-features = false, features = ["executor"], optional = true } +redis_crate = { package = "redis", git = "https://github.com/mitsuhiko/redis-rs", branch = "master", default-features = false, features = ["tokio-rt-core"], optional = true } +runtime = "0.0.0" +async-trait = "0.1.22" [dev-dependencies] env_logger = { version = "0.7.0", default-features = false } net2 = { version = "0.2.33", default-features = false } rand = { version = "0.7.2", default-features = false } -tokio = { version = "0.1.22", default-features = false } os_type = { version = "2.2", default-features = false } diff --git a/crates/interledger-store/src/redis/mod.rs b/crates/interledger-store/src/redis/mod.rs index c8290dc9c..6340516b8 100644 --- a/crates/interledger-store/src/redis/mod.rs +++ b/crates/interledger-store/src/redis/mod.rs @@ -19,12 +19,9 @@ use reconnect::RedisReconnect; use super::account::{Account, AccountWithEncryptedTokens}; use super::crypto::{encrypt_token, generate_keys, DecryptionKey, EncryptionKey}; +use async_trait::async_trait; use bytes::Bytes; -use futures::{ - future::{err, ok, result, Either}, - sync::mpsc::UnboundedSender, - Future, Stream, -}; +use futures::{channel::mpsc::UnboundedSender, FutureExt}; use http::StatusCode; use interledger_api::{AccountDetails, AccountSettings, EncryptedAccountSettings, NodeStore}; use interledger_btp::BtpStore; @@ -48,7 +45,7 @@ use num_bigint::BigUint; use parking_lot::RwLock; use redis_crate::{ self, cmd, from_redis_value, Client, ConnectionInfo, ControlFlow, ErrorKind, FromRedisValue, - PipelineCommands, PubSubCommands, RedisError, RedisWrite, Script, ToRedisArgs, Value, + PubSubCommands, RedisError, RedisWrite, Script, ToRedisArgs, Value, }; use secrecy::{ExposeSecret, Secret, SecretBytes}; use serde::{Deserialize, Serialize}; @@ -64,8 +61,6 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; -use tokio_executor::spawn; -use tokio_timer::Interval; use url::Url; use uuid::Uuid; use zeroize::Zeroize; @@ -128,6 +123,9 @@ pub struct RedisStoreBuilder { node_ilp_address: Address, } +use futures::stream::{StreamExt, TryStreamExt}; +use redis_crate::AsyncCommands; + impl RedisStoreBuilder { pub fn new(redis_url: ConnectionInfo, secret: [u8; 32]) -> Self { RedisStoreBuilder { @@ -148,126 +146,121 @@ impl RedisStoreBuilder { self } - pub fn connect(&mut self) -> impl Future { + pub async fn connect(&mut self) -> Result { let redis_info = self.redis_url.clone(); let (encryption_key, decryption_key) = generate_keys(&self.secret[..]); self.secret.zeroize(); // clear the secret after it has been used for key generation let poll_interval = self.poll_interval; let ilp_address = self.node_ilp_address.clone(); - RedisReconnect::connect(redis_info.clone()) + let client = Client::open(redis_info.clone()) + .map_err(|err| error!("Error creating subscription Redis client: {:?}", err))?; + debug!("Connected subscription client to redis: {:?}", client); + let mut connection = RedisReconnect::connect(redis_info.clone()) .map_err(|_| ()) - .join( - result(Client::open(redis_info.clone())) - .map_err(|err| error!("Error creating subscription Redis client: {:?}", err)) - .and_then(|client| { - debug!("Connected subscription client to redis: {:?}", client); - client.get_connection().map_err(|err| { - error!("Error connecting subscription client to Redis: {:?}", err) - }) - }), - ) - .and_then(move |(connection, mut sub_connection)| { - // Before initializing the store, check if we have an address - // that was configured due to adding a parent. If no parent was - // found, use the builder's provided address (local.host) or the - // one we decided to override it with - redis_crate::cmd("GET") - .arg(PARENT_ILP_KEY) - .query_async(connection.clone()) - .map_err(|err| { - error!( - "Error checking whether we have a parent configured: {:?}", - err - ) - }) - .and_then(move |(_, address): (RedisReconnect, Option)| { - Ok(if let Some(address) = address { - Address::from_str(&address).unwrap() - } else { - ilp_address - }) - }) - .and_then(move |node_ilp_address| { - let store = RedisStore { - ilp_address: Arc::new(RwLock::new(node_ilp_address)), - connection, - subscriptions: Arc::new(RwLock::new(HashMap::new())), - exchange_rates: Arc::new(RwLock::new(HashMap::new())), - routes: Arc::new(RwLock::new(Arc::new(HashMap::new()))), - encryption_key: Arc::new(encryption_key), - decryption_key: Arc::new(decryption_key), - }; - - // Poll for routing table updates - // Note: if this behavior changes, make sure to update the Drop implementation - let connection_clone = Arc::downgrade(&store.connection.conn); - let redis_info = store.connection.redis_info.clone(); - let routing_table = store.routes.clone(); - let poll_routes = - Interval::new(Instant::now(), Duration::from_millis(poll_interval)) - .map_err(|err| error!("Interval error: {:?}", err)) - .for_each(move |_| { - if let Some(conn) = connection_clone.upgrade() { - Either::A(update_routes( - RedisReconnect { - conn, - redis_info: redis_info.clone(), - }, - routing_table.clone(), - )) - } else { - debug!("Not polling routes anymore because connection was closed"); - // TODO make sure the interval stops - Either::B(err(())) - } - }); - spawn(poll_routes); - - // Here we spawn a worker thread to listen for incoming messages on Redis pub/sub, - // running a callback for each message received. - // This currently must be a thread rather than a task due to the redis-rs driver - // not yet supporting asynchronous subscriptions (see https://github.com/mitsuhiko/redis-rs/issues/183). - let subscriptions_clone = store.subscriptions.clone(); - std::thread::spawn(move || { - let sub_status = - sub_connection.psubscribe::<_, _, Vec>(&["*"], move |msg| { - let channel_name = msg.get_channel_name(); - if channel_name.starts_with(STREAM_NOTIFICATIONS_PREFIX) { - if let Ok(account_id) = Uuid::from_str(&channel_name[STREAM_NOTIFICATIONS_PREFIX.len()..]) { - let message: PaymentNotification = match serde_json::from_slice(msg.get_payload_bytes()) { - Ok(s) => s, - Err(e) => { - error!("Failed to get payload from subscription: {}", e); - return ControlFlow::Continue; - } - }; - trace!("Subscribed message received for account {}: {:?}", account_id, message); - match subscriptions_clone.read().get(&account_id) { - Some(sender) => { - if let Err(err) = sender.unbounded_send(message) { - error!("Failed to send message: {}", err); - } - } - None => trace!("Ignoring message for account {} because there were no open subscriptions", account_id), - } - } else { - error!("Invalid Uuid in channel name: {}", channel_name); - } - } else { - warn!("Ignoring unexpected message from Redis subscription for channel: {}", channel_name); + .await?; + let mut sub_connection = client + .get_connection() + .map_err(|err| error!("Error connecting subscription client to Redis: {:?}", err))?; + // Before initializing the store, check if we have an address + // that was configured due to adding a parent. If no parent was + // found, use the builder's provided address (local.host) or the + // one we decided to override it with + let address: Option = connection + .get(PARENT_ILP_KEY) + .map_err(|err| { + error!( + "Error checking whether we have a parent configured: {:?}", + err + ) + }) + .await?; + let node_ilp_address = if let Some(address) = address { + Address::from_str(&address).unwrap() + } else { + ilp_address + }; + + let store = RedisStore { + ilp_address: Arc::new(RwLock::new(node_ilp_address)), + connection, + subscriptions: Arc::new(RwLock::new(HashMap::new())), + exchange_rates: Arc::new(RwLock::new(HashMap::new())), + routes: Arc::new(RwLock::new(Arc::new(HashMap::new()))), + encryption_key: Arc::new(encryption_key), + decryption_key: Arc::new(decryption_key), + }; + + // Poll for routing table updates + // Note: if this behavior changes, make sure to update the Drop implementation + let connection_clone = Arc::downgrade(&store.connection.conn); + let redis_info = store.connection.redis_info.clone(); + let routing_table = store.routes.clone(); + + let poll_routes = async move { + let mut interval = tokio::time::interval(Duration::from_millis(poll_interval)); + // Irrefutable while pattern, can we do something here? + while let _ = interval.tick().await { + if let Some(conn) = connection_clone.upgrade() { + update_routes( + RedisReconnect { + conn, + redis_info: redis_info.clone(), + }, + routing_table.clone(), + ) + .await; + } else { + debug!("Not polling routes anymore because connection was closed"); + break; + } + } + Ok::<(), ()>(()) + }; + tokio::spawn(poll_routes); + + // Here we spawn a worker thread to listen for incoming messages on Redis pub/sub, + // running a callback for each message received. + // This currently must be a thread rather than a task due to the redis-rs driver + // not yet supporting asynchronous subscriptions (see https://github.com/mitsuhiko/redis-rs/issues/183). + let subscriptions_clone = store.subscriptions.clone(); + std::thread::spawn(move || { + let sub_status = + sub_connection.psubscribe::<_, _, Vec>(&["*"], move |msg| { + let channel_name = msg.get_channel_name(); + if channel_name.starts_with(STREAM_NOTIFICATIONS_PREFIX) { + if let Ok(account_id) = Uuid::from_str(&channel_name[STREAM_NOTIFICATIONS_PREFIX.len()..]) { + let message: PaymentNotification = match serde_json::from_slice(msg.get_payload_bytes()) { + Ok(s) => s, + Err(e) => { + error!("Failed to get payload from subscription: {}", e); + return ControlFlow::Continue; + } + }; + trace!("Subscribed message received for account {}: {:?}", account_id, message); + match subscriptions_clone.read().get(&account_id) { + Some(sender) => { + if let Err(err) = sender.unbounded_send(message) { + error!("Failed to send message: {}", err); } - ControlFlow::Continue - }); - match sub_status { - Err(e) => warn!("Could not issue psubscribe to Redis: {}", e), - Ok(_) => debug!("Successfully subscribed to Redis pubsub"), + } + None => trace!("Ignoring message for account {} because there were no open subscriptions", account_id), } - }); + } else { + error!("Invalid Uuid in channel name: {}", channel_name); + } + } else { + warn!("Ignoring unexpected message from Redis subscription for channel: {}", channel_name); + } + ControlFlow::Continue + }); + match sub_status { + Err(e) => warn!("Could not issue psubscribe to Redis: {}", e), + Ok(_) => debug!("Successfully subscribed to Redis pubsub"), + } + }); - Ok(store) - }) - }) + Ok(store) } } @@ -295,24 +288,22 @@ pub struct RedisStore { } impl RedisStore { - fn get_all_accounts_ids(&self) -> impl Future, Error = ()> { - let mut pipe = redis_crate::pipe(); - pipe.smembers("accounts"); - pipe.query_async(self.connection.clone()) + async fn get_all_accounts_ids(&self) -> Result, ()> { + let mut connection = self.connection.clone(); + let account_ids: Vec = connection + .smembers("accounts") .map_err(|err| error!("Error getting account IDs: {:?}", err)) - .and_then(|(_conn, account_ids): (_, Vec>)| { - let account_ids: Vec = account_ids[0].iter().map(|rid| rid.0).collect(); - Ok(account_ids) - }) + .await?; + Ok(account_ids.iter().map(|rid| rid.0).collect()) } - fn redis_insert_account( - &self, + async fn redis_insert_account( + &mut self, encrypted: AccountWithEncryptedTokens, - ) -> Box + Send> { + ) -> Result { let account = encrypted.account.clone(); let ret = encrypted.clone(); - let connection = self.connection.clone(); + let mut connection = self.connection.clone(); let routing_table = self.routes.clone(); // Check that there isn't already an account with values that MUST be unique let mut pipe = redis_crate::pipe(); @@ -322,153 +313,168 @@ impl RedisStore { pipe.exists(PARENT_ILP_KEY); } - Box::new(pipe.query_async(connection.clone()) + let results: Vec = pipe + .query_async(&mut connection.clone()) .map_err(|err| { - error!("Error checking whether account details already exist: {:?}", err) - }) - .and_then( - move |(connection, results): (RedisReconnect, Vec)| { - if results.iter().any(|val| *val) { - warn!("An account already exists with the same {}. Cannot insert account: {:?}", account.id, account); - Err(()) - } else { - Ok((connection, account)) - } + error!( + "Error checking whether account details already exist: {:?}", + err + ) }) - .and_then(move |(connection, account)| { - let mut pipe = redis_crate::pipe(); - pipe.atomic(); + .await?; + if results.iter().any(|val| *val) { + warn!( + "An account already exists with the same {}. Cannot insert account: {:?}", + account.id, account + ); + return Err(()); + } - // Add the account key to the list of accounts - pipe.sadd("accounts", RedisAccountId(account.id)).ignore(); + let mut pipe = redis_crate::pipe(); + pipe.atomic(); - // Save map for Username -> Account ID - pipe.hset("usernames", account.username().as_ref(), RedisAccountId(account.id)).ignore(); + // Add the account key to the list of accounts + pipe.sadd("accounts", RedisAccountId(account.id)).ignore(); - // Set account details - pipe.cmd("HMSET") - .arg(accounts_key(account.id)) - .arg(encrypted).ignore(); + // Save map for Username -> Account ID + pipe.hset( + "usernames", + account.username().as_ref(), + RedisAccountId(account.id), + ) + .ignore(); - // Set balance-related details - pipe.hset_multiple(accounts_key(account.id), &[("balance", 0), ("prepaid_amount", 0)]).ignore(); + // Set account details + pipe.cmd("HMSET") + .arg(accounts_key(account.id)) + .arg(encrypted) + .ignore(); - if account.should_send_routes() { - pipe.sadd("send_routes_to", RedisAccountId(account.id)).ignore(); - } + // Set balance-related details + pipe.hset_multiple( + accounts_key(account.id), + &[("balance", 0), ("prepaid_amount", 0)], + ) + .ignore(); - if account.should_receive_routes() { - pipe.sadd("receive_routes_from", RedisAccountId(account.id)).ignore(); - } + if account.should_send_routes() { + pipe.sadd("send_routes_to", RedisAccountId(account.id)) + .ignore(); + } - if account.ilp_over_btp_url.is_some() { - pipe.sadd("btp_outgoing", RedisAccountId(account.id)).ignore(); - } + if account.should_receive_routes() { + pipe.sadd("receive_routes_from", RedisAccountId(account.id)) + .ignore(); + } - // Add route to routing table - pipe.hset(ROUTES_KEY, account.ilp_address.to_bytes().to_vec(), RedisAccountId(account.id)) - .ignore(); - - // The parent account settings are done via the API. We just - // had to check for the existence of a parent - pipe.query_async(connection) - .map_err(|err| error!("Error inserting account into DB: {:?}", err)) - .and_then(move |(connection, _ret): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - .and_then(move |_| { - debug!("Inserted account {} (ILP address: {})", account.id, account.ilp_address); - Ok(ret) - }) - })) - } - - fn redis_update_account( + if account.ilp_over_btp_url.is_some() { + pipe.sadd("btp_outgoing", RedisAccountId(account.id)) + .ignore(); + } + + // Add route to routing table + pipe.hset( + ROUTES_KEY, + account.ilp_address.to_bytes().to_vec(), + RedisAccountId(account.id), + ) + .ignore(); + + // The parent account settings are done via the API. We just + // had to check for the existence of a parent + let _ = pipe + .query_async(&mut connection) + .map_err(|err| error!("Error inserting account into DB: {:?}", err)) + .await?; + + let _ = update_routes(connection, routing_table).await?; + debug!( + "Inserted account {} (ILP address: {})", + account.id, account.ilp_address + ); + Ok(ret) + } + + async fn redis_update_account( &self, encrypted: AccountWithEncryptedTokens, - ) -> Box + Send> { + ) -> Result { let account = encrypted.account.clone(); - let connection = self.connection.clone(); + let mut connection = self.connection.clone(); let routing_table = self.routes.clone(); - Box::new( - // Check to make sure an account with this ID already exists - redis_crate::cmd("EXISTS") - .arg(accounts_key(account.id)) - // TODO this needs to be atomic with the insertions later, - // waiting on #186 - // TODO: Do not allow this update to happen if - // AccountDetails.RoutingRelation == Parent and parent is - // already set - .query_async(connection.clone()) - .map_err(|err| error!("Error checking whether ID exists: {:?}", err)) - .and_then(move |(connection, exists): (RedisReconnect, bool)| { - if !exists { - warn!( - "No account exists with ID {}, cannot update account {:?}", - account.id, account - ); - return Either::A(err(())); - } - let mut pipe = redis_crate::pipe(); - pipe.atomic(); - // Add the account key to the list of accounts - pipe.sadd("accounts", RedisAccountId(account.id)).ignore(); + // Check to make sure an account with this ID already exists + // TODO this needs to be atomic with the insertions later, + // waiting on #186 + // TODO: Do not allow this update to happen if + // AccountDetails.RoutingRelation == Parent and parent is + // already set + let exists: bool = connection + .exists(accounts_key(account.id)) + .map_err(|err| error!("Error checking whether ID exists: {:?}", err)) + .await?; + + if !exists { + warn!( + "No account exists with ID {}, cannot update account {:?}", + account.id, account + ); + return Err(()); + } + let mut pipe = redis_crate::pipe(); + pipe.atomic(); - // Set account details - pipe.cmd("HMSET") - .arg(accounts_key(account.id)) - .arg(encrypted.clone()) - .ignore(); + // Add the account key to the list of accounts + pipe.sadd("accounts", RedisAccountId(account.id)).ignore(); - if account.should_send_routes() { - pipe.sadd("send_routes_to", RedisAccountId(account.id)) - .ignore(); - } + // Set account details + pipe.cmd("HMSET") + .arg(accounts_key(account.id)) + .arg(encrypted.clone()) + .ignore(); - if account.should_receive_routes() { - pipe.sadd("receive_routes_from", RedisAccountId(account.id)) - .ignore(); - } + if account.should_send_routes() { + pipe.sadd("send_routes_to", RedisAccountId(account.id)) + .ignore(); + } - if account.ilp_over_btp_url.is_some() { - pipe.sadd("btp_outgoing", RedisAccountId(account.id)) - .ignore(); - } + if account.should_receive_routes() { + pipe.sadd("receive_routes_from", RedisAccountId(account.id)) + .ignore(); + } - // Add route to routing table - pipe.hset( - ROUTES_KEY, - account.ilp_address.to_bytes().to_vec(), - RedisAccountId(account.id), - ) - .ignore(); - - Either::B( - pipe.query_async(connection) - .map_err(|err| error!("Error inserting account into DB: {:?}", err)) - .and_then(move |(connection, _ret): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - .and_then(move |_| { - debug!( - "Inserted account {} (id: {}, ILP address: {})", - account.username, account.id, account.ilp_address - ); - Ok(encrypted) - }), - ) - }), + if account.ilp_over_btp_url.is_some() { + pipe.sadd("btp_outgoing", RedisAccountId(account.id)) + .ignore(); + } + + // Add route to routing table + pipe.hset( + ROUTES_KEY, + account.ilp_address.to_bytes().to_vec(), + RedisAccountId(account.id), ) + .ignore(); + + let _ = pipe + .query_async(&mut connection) + .map_err(|err| error!("Error inserting account into DB: {:?}", err)) + .await?; + let _ = update_routes(connection, routing_table).await?; + debug!( + "Inserted account {} (id: {}, ILP address: {})", + account.username, account.id, account.ilp_address + ); + Ok(encrypted) } - fn redis_modify_account( + async fn redis_modify_account( &self, id: Uuid, settings: EncryptedAccountSettings, - ) -> Box + Send> { + ) -> Result { let connection = self.connection.clone(); - let self_clone = self.clone(); + let mut self_clone = self.clone(); let mut pipe = redis_crate::pipe(); pipe.atomic(); @@ -521,135 +527,114 @@ impl RedisStore { pipe.hset(accounts_key(id), "settle_to", settle_to); } - Box::new( - pipe.query_async(connection.clone()) - .map_err(|err| error!("Error modifying user account: {:?}", err)) - .and_then(move |(_connection, _ret): (RedisReconnect, Value)| { - // return the updated account - self_clone.redis_get_account(id) - }), - ) + let _ = pipe + .query_async(&mut connection.clone()) + .map_err(|err| error!("Error modifying user account: {:?}", err)) + .await?; + + // return the updated account + self_clone.redis_get_account(id).await } - fn redis_get_account( - &self, - id: Uuid, - ) -> Box + Send> { - Box::new( - LOAD_ACCOUNTS - .arg(id.to_string()) - .invoke_async(self.connection.clone()) - .map_err(|err| error!("Error loading accounts: {:?}", err)) - .and_then(|(_, mut accounts): (_, Vec)| { - accounts.pop().ok_or(()) - }), - ) + async fn redis_get_account(&mut self, id: Uuid) -> Result { + let mut accounts: Vec = LOAD_ACCOUNTS + .arg(id.to_string()) + .invoke_async(&mut self.connection.clone()) + .map_err(|err| error!("Error loading accounts: {:?}", err)) + .await?; + accounts.pop().ok_or(()) } - fn redis_delete_account( - &self, - id: Uuid, - ) -> Box + Send> { - let connection = self.connection.clone(); + async fn redis_delete_account(&mut self, id: Uuid) -> Result { + let mut connection = self.connection.clone(); let routing_table = self.routes.clone(); - Box::new(self.redis_get_account(id).and_then(move |encrypted| { - let account = encrypted.account.clone(); - let mut pipe = redis_crate::pipe(); - pipe.atomic(); - - pipe.srem("accounts", RedisAccountId(account.id)).ignore(); + let encrypted = self.redis_get_account(id).await?; + let account = encrypted.account.clone(); + let mut pipe = redis_crate::pipe(); + pipe.atomic(); - pipe.del(accounts_key(account.id)).ignore(); - pipe.hdel("usernames", account.username().as_ref()).ignore(); + pipe.srem("accounts", RedisAccountId(account.id)).ignore(); - if account.should_send_routes() { - pipe.srem("send_routes_to", RedisAccountId(account.id)) - .ignore(); - } + pipe.del(accounts_key(account.id)).ignore(); + pipe.hdel("usernames", account.username().as_ref()).ignore(); - if account.should_receive_routes() { - pipe.srem("receive_routes_from", RedisAccountId(account.id)) - .ignore(); - } + if account.should_send_routes() { + pipe.srem("send_routes_to", RedisAccountId(account.id)) + .ignore(); + } - if account.ilp_over_btp_url.is_some() { - pipe.srem("btp_outgoing", RedisAccountId(account.id)) - .ignore(); - } + if account.should_receive_routes() { + pipe.srem("receive_routes_from", RedisAccountId(account.id)) + .ignore(); + } - pipe.hdel(ROUTES_KEY, account.ilp_address.to_bytes().to_vec()) + if account.ilp_over_btp_url.is_some() { + pipe.srem("btp_outgoing", RedisAccountId(account.id)) .ignore(); + } + + pipe.hdel(ROUTES_KEY, account.ilp_address.to_bytes().to_vec()) + .ignore(); - pipe.del(uncredited_amount_key(id)); + pipe.del(uncredited_amount_key(id)); - pipe.query_async(connection) - .map_err(|err| error!("Error deleting account from DB: {:?}", err)) - .and_then(move |(connection, _ret): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - .and_then(move |_| { - debug!("Deleted account {}", account.id); - Ok(encrypted) - }) - })) + pipe.query_async(&mut connection) + .map_err(|err| error!("Error deleting account from DB: {:?}", err)) + .await?; + + update_routes(connection, routing_table).await?; + debug!("Deleted account {}", account.id); + Ok(encrypted) } } +#[async_trait] impl AccountStore for RedisStore { type Account = Account; // TODO cache results to avoid hitting Redis for each packet - fn get_accounts( - &self, - account_ids: Vec, - ) -> Box, Error = ()> + Send> { + async fn get_accounts(&self, account_ids: Vec) -> Result, ()> { let decryption_key = self.decryption_key.clone(); let num_accounts = account_ids.len(); let mut script = LOAD_ACCOUNTS.prepare_invoke(); for id in account_ids.iter() { script.arg(id.to_string()); } - Box::new( - script - .invoke_async(self.connection.clone()) - .map_err(|err| error!("Error loading accounts: {:?}", err)) - .and_then(move |(_, accounts): (_, Vec)| { - if accounts.len() == num_accounts { - let accounts = accounts - .into_iter() - .map(|account| { - account.decrypt_tokens(&decryption_key.expose_secret().0) - }) - .collect(); - Ok(accounts) - } else { - Err(()) - } - }), - ) + + // Need to clone the connection here to avoid lifetime errors + let connection = self.connection.clone(); + let accounts: Vec = script + .invoke_async(&mut connection.clone()) + .map_err(|err| error!("Error loading accounts: {:?}", err)) + .await?; + + // Decrypt the accounts. TODO: This functionality should be + // decoupled from redis so that it gets reused by the other backends + if accounts.len() == num_accounts { + let accounts = accounts + .into_iter() + .map(|account| account.decrypt_tokens(&decryption_key.expose_secret().0)) + .collect(); + Ok(accounts) + } else { + Err(()) + } } - fn get_account_id_from_username( - &self, - username: &Username, - ) -> Box + Send> { + async fn get_account_id_from_username(&self, username: &Username) -> Result { let username = username.clone(); - Box::new( - cmd("HGET") - .arg("usernames") - .arg(username.as_ref()) - .query_async(self.connection.clone()) - .map_err(move |err| error!("Error getting account id: {:?}", err)) - .and_then( - move |(_connection, id): (_, Option)| match id { - Some(rid) => Ok(rid.0), - None => { - debug!("Username not found: {}", username); - Err(()) - } - }, - ), - ) + let mut connection = self.connection.clone(); + let id: Option = connection + .hget("usernames", username.as_ref()) + .map_err(move |err| error!("Error getting account id: {:?}", err)) + .await?; + match id { + Some(rid) => Ok(rid.0), + None => { + debug!("Username not found: {}", username); + Err(()) + } + } } } @@ -668,147 +653,146 @@ impl StreamNotificationsStore for RedisStore { fn publish_payment_notification(&self, payment: PaymentNotification) { let username = payment.to_username.clone(); let message = serde_json::to_string(&payment).unwrap(); - let connection = self.connection.clone(); - spawn( - self.get_account_id_from_username(&username) - .map_err(move |_| { + let mut connection = self.connection.clone(); + let self_clone = self.clone(); + tokio::spawn(async move { + let account_id = self_clone + .get_account_id_from_username(&username) + .map_err(|_| { error!( "Failed to find account ID corresponding to username: {}", username ) }) - .and_then(move |account_id| { - debug!( - "Publishing payment notification {} for account {}", - message, account_id - ); - redis_crate::cmd("PUBLISH") - .arg(format!("{}{}", STREAM_NOTIFICATIONS_PREFIX, account_id)) - .arg(message) - .query_async(connection) - .map_err(move |err| error!("Error publish message to Redis: {:?}", err)) - .and_then(move |(_, _): (_, i32)| Ok(())) - }), - ); + .await?; + + debug!( + "Publishing payment notification {} for account {}", + message, account_id + ); + // https://github.com/rust-lang/rust/issues/64960#issuecomment-544219926 + let published_args = format!("{}{}", STREAM_NOTIFICATIONS_PREFIX, account_id.clone()); + redis_crate::cmd("PUBLISH") + .arg(published_args) + .arg(message) + .query_async(&mut connection) + .map_err(move |err| error!("Error publish message to Redis: {:?}", err)) + .await?; + + Ok::<(), ()>(()) + }); } } +#[async_trait] impl BalanceStore for RedisStore { /// Returns the balance **from the account holder's perspective**, meaning the sum of /// the Payable Balance and Pending Outgoing minus the Receivable Balance and the Pending Incoming. - fn get_balance(&self, account: Account) -> Box + Send> { - Box::new( - cmd("HMGET") - .arg(accounts_key(account.id)) - .arg(&["balance", "prepaid_amount"]) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error getting balance for account: {} {:?}", - account.id, err - ) - }) - .and_then(|(_connection, values): (_, Vec)| { - let balance = values[0]; - let prepaid_amount = values[1]; - Ok(balance + prepaid_amount) - }), - ) + async fn get_balance(&self, account: Account) -> Result { + let mut connection = self.connection.clone(); + let values: Vec = connection + .hget(accounts_key(account.id), &["balance", "prepaid_amount"]) + .map_err(move |err| { + error!( + "Error getting balance for account: {} {:?}", + account.id, err + ) + }) + .await?; + + let balance = values[0]; + let prepaid_amount = values[1]; + Ok(balance + prepaid_amount) } - fn update_balances_for_prepare( + async fn update_balances_for_prepare( &self, from_account: Account, // TODO: Make this take only the id incoming_amount: u64, - ) -> Box + Send> { - if incoming_amount > 0 { - let from_account_id = from_account.id; - Box::new( - PROCESS_PREPARE - .arg(RedisAccountId(from_account_id)) - .arg(incoming_amount) - .invoke_async(self.connection.clone()) - .map_err(move |err| { - warn!( - "Error handling prepare from account: {}: {:?}", - from_account_id, err - ) - }) - .and_then(move |(_connection, balance): (_, i64)| { - trace!( - "Processed prepare with incoming amount: {}. Account {} has balance (including prepaid amount): {} ", - incoming_amount, from_account_id, balance - ); - Ok(()) - }), - ) - } else { - Box::new(ok(())) + ) -> Result<(), ()> { + // Don't do anything if the amount was 0 + if incoming_amount == 0 { + return Ok(()); } + + let from_account_id = from_account.id; + let balance: i64 = PROCESS_PREPARE + .arg(RedisAccountId(from_account_id)) + .arg(incoming_amount) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + warn!( + "Error handling prepare from account: {}: {:?}", + from_account_id, err + ) + }) + .await?; + + trace!( + "Processed prepare with incoming amount: {}. Account {} has balance (including prepaid amount): {} ", + incoming_amount, from_account_id, balance + ); + Ok(()) } - fn update_balances_for_fulfill( + async fn update_balances_for_fulfill( &self, to_account: Account, // TODO: Make this take only the id outgoing_amount: u64, - ) -> Box + Send> { - if outgoing_amount > 0 { - let to_account_id = to_account.id; - Box::new( - PROCESS_FULFILL - .arg(RedisAccountId(to_account_id)) - .arg(outgoing_amount) - .invoke_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error handling Fulfill received from account: {}: {:?}", - to_account_id, err - ) - }) - .and_then(move |(_connection, (balance, amount_to_settle)): (_, (i64, u64))| { - trace!("Processed fulfill for account {} for outgoing amount {}. Fulfill call result: {} {}", - to_account_id, - outgoing_amount, - balance, - amount_to_settle, - ); - Ok((balance, amount_to_settle)) - }) - ) - } else { - Box::new(ok((0, 0))) + ) -> Result<(i64, u64), ()> { + if outgoing_amount == 0 { + return Ok((0, 0)); } + let to_account_id = to_account.id; + let (balance, amount_to_settle): (i64, u64) = PROCESS_FULFILL + .arg(RedisAccountId(to_account_id)) + .arg(outgoing_amount) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + error!( + "Error handling Fulfill received from account: {}: {:?}", + to_account_id, err + ) + }) + .await?; + + trace!( + "Processed fulfill for account {} for outgoing amount {}. Fulfill call result: {} {}", + to_account_id, + outgoing_amount, + balance, + amount_to_settle, + ); + Ok((balance, amount_to_settle)) } - fn update_balances_for_reject( + async fn update_balances_for_reject( &self, from_account: Account, // TODO: Make this take only the id incoming_amount: u64, - ) -> Box + Send> { - if incoming_amount > 0 { - let from_account_id = from_account.id; - Box::new( - PROCESS_REJECT - .arg(RedisAccountId(from_account_id)) - .arg(incoming_amount) - .invoke_async(self.connection.clone()) - .map_err(move |err| { - warn!( - "Error handling reject for packet from account: {}: {:?}", - from_account_id, err - ) - }) - .and_then(move |(_connection, balance): (_, i64)| { - trace!( - "Processed reject for incoming amount: {}. Account {} has balance (including prepaid amount): {}", - incoming_amount, from_account_id, balance - ); - Ok(()) - }), - ) - } else { - Box::new(ok(())) + ) -> Result<(), ()> { + if incoming_amount == 0 { + return Ok(()); } + + let from_account_id = from_account.id; + let balance: i64 = PROCESS_REJECT + .arg(RedisAccountId(from_account_id)) + .arg(incoming_amount) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + warn!( + "Error handling reject for packet from account: {}: {:?}", + from_account_id, err + ) + }) + .await?; + + trace!( + "Processed reject for incoming amount: {}. Account {} has balance (including prepaid amount): {}", + incoming_amount, from_account_id, balance + ); + Ok(()) } } @@ -840,145 +824,110 @@ impl ExchangeRateStore for RedisStore { } } +#[async_trait] impl BtpStore for RedisStore { type Account = Account; - fn get_account_from_btp_auth( + async fn get_account_from_btp_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { + ) -> Result { // TODO make sure it can't do script injection! // TODO cache the result so we don't hit redis for every packet (is that // necessary if redis is often used as a cache?) let decryption_key = self.decryption_key.clone(); + let mut connection = self.connection.clone(); let token = token.to_owned(); - Box::new( - ACCOUNT_FROM_USERNAME - .arg(username.as_ref()) - .invoke_async(self.connection.clone()) - .map_err(|err| error!("Error getting account from BTP token: {:?}", err)) - .and_then( - move |(_connection, account): (_, Option)| { - if let Some(account) = account { - let account = account.decrypt_tokens(&decryption_key.expose_secret().0); - if let Some(t) = account.ilp_over_btp_incoming_token.clone() { - let t = t.expose_secret().clone(); - if t == Bytes::from(token) { - Ok(account) - } else { - debug!( - "Found account {} but BTP auth token was wrong", - account.username - ); - Err(()) - } - } else { - debug!( - "Account {} does not have an incoming btp token configured", - account.username - ); - Err(()) - } - } else { - warn!("No account found with BTP token"); - Err(()) - } - }, - ), - ) + let username = username.to_owned(); // TODO: Can we avoid taking ownership? + + let account: Option = ACCOUNT_FROM_USERNAME + .arg(username.as_ref()) + .invoke_async(&mut connection) + .map_err(|err| error!("Error getting account from BTP token: {:?}", err)) + .await?; + + if let Some(account) = account { + let account = account.decrypt_tokens(&decryption_key.expose_secret().0); + if let Some(t) = account.ilp_over_btp_incoming_token.clone() { + let t = t.expose_secret().clone(); + if t == Bytes::from(token) { + Ok(account) + } else { + debug!( + "Found account {} but BTP auth token was wrong", + account.username + ); + Err(()) + } + } else { + debug!( + "Account {} does not have an incoming btp token configured", + account.username + ); + Err(()) + } + } else { + warn!("No account found with BTP token"); + Err(()) + } } - fn get_btp_outgoing_accounts( - &self, - ) -> Box, Error = ()> + Send> { + async fn get_btp_outgoing_accounts(&self) -> Result, ()> { let decryption_key = self.decryption_key.clone(); - Box::new( - cmd("SMEMBERS") - .arg("btp_outgoing") - .query_async(self.connection.clone()) - .map_err(|err| error!("Error getting members of set btp_outgoing: {:?}", err)) - .and_then( - move |(connection, account_ids): (RedisReconnect, Vec)| { - if account_ids.is_empty() { - Either::A(ok(Vec::new())) - } else { - let mut script = LOAD_ACCOUNTS.prepare_invoke(); - for id in account_ids.iter() { - script.arg(id.to_string()); - } - Either::B( - script - .invoke_async(connection.clone()) - .map_err(|err| { - error!( - "Error getting accounts with outgoing BTP details: {:?}", - err - ) - }) - .and_then( - move |(_connection, accounts): ( - RedisReconnect, - Vec, - )| { - let accounts: Vec = accounts - .into_iter() - .map(|account| { - account.decrypt_tokens( - &decryption_key.expose_secret().0, - ) - }) - .collect(); - Ok(accounts) - }, - ), - ) - } - }, - ), - ) + let mut connection = self.connection.clone(); + + let account_ids: Vec = connection + .smembers("btp_outgoing") + .map_err(|err| error!("Error getting members of set btp_outgoing: {:?}", err)) + .await?; + let account_ids: Vec = account_ids.into_iter().map(|id| id.0).collect(); + + if account_ids.is_empty() { + return Ok(Vec::new()); + } + + let accounts = self.get_accounts(account_ids).await?; + Ok(accounts) } } +#[async_trait] impl HttpStore for RedisStore { type Account = Account; /// Checks if the stored token for the provided account id matches the /// provided token, and if so, returns the account associated with that token - fn get_account_from_http_auth( + async fn get_account_from_http_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { + ) -> Result { // TODO make sure it can't do script injection! let decryption_key = self.decryption_key.clone(); let token = token.to_owned(); - Box::new( - ACCOUNT_FROM_USERNAME - .arg(username.as_ref()) - .invoke_async(self.connection.clone()) - .map_err(|err| error!("Error getting account from HTTP auth: {:?}", err)) - .and_then( - move |(_connection, account): (_, Option)| { - if let Some(account) = account { - let account = account.decrypt_tokens(&decryption_key.expose_secret().0); - if let Some(t) = account.ilp_over_http_incoming_token.clone() { - let t = t.expose_secret().clone(); - if t == Bytes::from(token) { - Ok(account) - } else { - Err(()) - } - } else { - Err(()) - } - } else { - warn!("No account found with given HTTP auth"); - Err(()) - } - }, - ), - ) + let account: Option = ACCOUNT_FROM_USERNAME + .arg(username.as_ref()) + .invoke_async(&mut self.connection.clone()) + .map_err(|err| error!("Error getting account from HTTP auth: {:?}", err)) + .await?; + + if let Some(account) = account { + let account = account.decrypt_tokens(&decryption_key.expose_secret().0); + if let Some(t) = account.ilp_over_http_incoming_token.clone() { + let t = t.expose_secret().clone(); + if t == Bytes::from(token) { + Ok(account) + } else { + Err(()) + } + } else { + Err(()) + } + } else { + warn!("No account found with given HTTP auth"); + Err(()) + } } } @@ -988,19 +937,14 @@ impl RouterStore for RedisStore { } } +#[async_trait] impl NodeStore for RedisStore { type Account = Account; - fn insert_account( - &self, - account: AccountDetails, - ) -> Box + Send> { + async fn insert_account(&self, account: AccountDetails) -> Result { let encryption_key = self.encryption_key.clone(); let id = Uuid::new_v4(); - let account = match Account::try_from(id, account, self.get_ilp_address()) { - Ok(account) => account, - Err(_) => return Box::new(err(())), - }; + let account = Account::try_from(id, account, self.get_ilp_address())?; debug!( "Generated account id for {}: {}", account.username.clone(), @@ -1009,32 +953,24 @@ impl NodeStore for RedisStore { let encrypted = account .clone() .encrypt_tokens(&encryption_key.expose_secret().0); - Box::new( - self.redis_insert_account(encrypted) - .and_then(move |_| Ok(account)), - ) + let mut self_clone = self.clone(); + + self_clone.redis_insert_account(encrypted).await?; + Ok(account) } - fn delete_account(&self, id: Uuid) -> Box + Send> { + async fn delete_account(&self, id: Uuid) -> Result { let decryption_key = self.decryption_key.clone(); - Box::new( - self.redis_delete_account(id).and_then(move |account| { - Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) - }), - ) + let mut self_clone = self.clone(); + let account = self_clone.redis_delete_account(id).await?; + Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) } - fn update_account( - &self, - id: Uuid, - account: AccountDetails, - ) -> Box + Send> { + async fn update_account(&self, id: Uuid, account: AccountDetails) -> Result { let encryption_key = self.encryption_key.clone(); let decryption_key = self.decryption_key.clone(); - let account = match Account::try_from(id, account, self.get_ilp_address()) { - Ok(account) => account, - Err(_) => return Box::new(err(())), - }; + let account = Account::try_from(id, account, self.get_ilp_address())?; + debug!( "Generated account id for {}: {}", account.username.clone(), @@ -1043,19 +979,16 @@ impl NodeStore for RedisStore { let encrypted = account .clone() .encrypt_tokens(&encryption_key.expose_secret().0); - Box::new( - self.redis_update_account(encrypted) - .and_then(move |account| { - Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) - }), - ) + + let account = self.redis_update_account(encrypted).await?; + Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) } - fn modify_account_settings( + async fn modify_account_settings( &self, id: Uuid, settings: AccountSettings, - ) -> Box + Send> { + ) -> Result { let encryption_key = self.encryption_key.clone(); let decryption_key = self.decryption_key.clone(); let settings = EncryptedAccountSettings { @@ -1089,42 +1022,41 @@ impl NodeStore for RedisStore { }), }; - Box::new( - self.redis_modify_account(id, settings) - .and_then(move |account| { - Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) - }), - ) + let account = self.redis_modify_account(id, settings).await?; + Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) } // TODO limit the number of results and page through them - fn get_all_accounts(&self) -> Box, Error = ()> + Send> { + async fn get_all_accounts(&self) -> Result, ()> { let decryption_key = self.decryption_key.clone(); - let mut pipe = redis_crate::pipe(); - let connection = self.connection.clone(); - pipe.smembers("accounts"); - Box::new(self.get_all_accounts_ids().and_then(move |account_ids| { - let mut script = LOAD_ACCOUNTS.prepare_invoke(); - for id in account_ids.iter() { - script.arg(id.to_string()); - } - script - .invoke_async(connection.clone()) - .map_err(|err| error!("Error getting account ids: {:?}", err)) - .and_then(move |(_, accounts): (_, Vec)| { - let accounts: Vec = accounts - .into_iter() - .map(|account| account.decrypt_tokens(&decryption_key.expose_secret().0)) - .collect(); - Ok(accounts) - }) - })) + let mut connection = self.connection.clone(); + + let account_ids = self.get_all_accounts_ids().await?; + + let mut script = LOAD_ACCOUNTS.prepare_invoke(); + for id in account_ids.iter() { + script.arg(id.to_string()); + } + + let accounts: Vec = script + .invoke_async(&mut connection) + .map_err(|err| error!("Error getting account ids: {:?}", err)) + .await?; + + // TODO this should be refactored so that it gets reused in multiple backends + let accounts: Vec = accounts + .into_iter() + .map(|account| account.decrypt_tokens(&decryption_key.expose_secret().0)) + .collect(); + + Ok(accounts) } - fn set_static_routes(&self, routes: R) -> Box + Send> + async fn set_static_routes(&self, routes: R) -> Result<(), ()> where - R: IntoIterator, + R: IntoIterator + Send + 'async_trait, { + let mut connection = self.connection.clone(); let routes: Vec<(String, RedisAccountId)> = routes .into_iter() .map(|(s, id)| (s, RedisAccountId(id))) @@ -1137,246 +1069,229 @@ impl NodeStore for RedisStore { } let routing_table = self.routes.clone(); - Box::new(pipe.query_async(self.connection.clone()) - .map_err(|err| error!("Error checking if accounts exist while setting static routes: {:?}", err)) - .and_then(|(connection, accounts_exist): (RedisReconnect, Vec)| { - if accounts_exist.iter().all(|a| *a) { - Ok(connection) - } else { - error!("Error setting static routes because not all of the given accounts exist"); - Err(()) - } + + let accounts_exist: Vec = pipe + .query_async(&mut connection) + .map_err(|err| { + error!( + "Error checking if accounts exist while setting static routes: {:?}", + err + ) }) - .and_then(move |connection| { + .await?; + + if !accounts_exist.iter().all(|a| *a) { + error!("Error setting static routes because not all of the given accounts exist"); + return Err(()); + } + let mut pipe = redis_crate::pipe(); pipe.atomic() .del(STATIC_ROUTES_KEY) .ignore() .hset_multiple(STATIC_ROUTES_KEY, &routes) .ignore(); - pipe.query_async(connection) - .map_err(|err| error!("Error setting static routes: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - })) + + pipe.query_async(&mut connection) + .map_err(|err| error!("Error setting static routes: {:?}", err)) + .await?; + + update_routes(connection, routing_table).await?; + Ok(()) } - fn set_static_route( - &self, - prefix: String, - account_id: Uuid, - ) -> Box + Send> { + async fn set_static_route(&self, prefix: String, account_id: Uuid) -> Result<(), ()> { let routing_table = self.routes.clone(); let prefix_clone = prefix.clone(); - Box::new( - cmd("EXISTS") - .arg(accounts_key(account_id)) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error checking if account exists before setting static route: {:?}", err)) - .and_then(move |(connection, exists): (RedisReconnect, bool)| { - if exists { - Ok(connection) - } else { - error!("Cannot set static route for prefix: {} because account {} does not exist", prefix_clone, account_id); - Err(()) - } - }) - .and_then(move |connection| { - cmd("HSET") - .arg(STATIC_ROUTES_KEY) - .arg(prefix) - .arg(RedisAccountId(account_id)) - .query_async(connection) - .map_err(|err| error!("Error setting static route: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) + let mut connection = self.connection.clone(); + + let exists: bool = connection + .exists(accounts_key(account_id)) + .map_err(|err| { + error!( + "Error checking if account exists before setting static route: {:?}", + err + ) }) - ) + .await?; + if !exists { + error!( + "Cannot set static route for prefix: {} because account {} does not exist", + prefix_clone, account_id + ); + return Err(()); + } + + connection + .hset(STATIC_ROUTES_KEY, prefix, RedisAccountId(account_id)) + .map_err(|err| error!("Error setting static route: {:?}", err)) + .await?; + + update_routes(connection, routing_table).await?; + + Ok(()) } - fn set_default_route(&self, account_id: Uuid) -> Box + Send> { + async fn set_default_route(&self, account_id: Uuid) -> Result<(), ()> { let routing_table = self.routes.clone(); // TODO replace this with a lua script to do both calls at once - Box::new( - cmd("EXISTS") - .arg(accounts_key(account_id)) - .query_async(self.connection.clone()) - .map_err(|err| { - error!( - "Error checking if account exists before setting default route: {:?}", - err - ) - }) - .and_then(move |(connection, exists): (RedisReconnect, bool)| { - if exists { - Ok(connection) - } else { - error!( - "Cannot set default route because account {} does not exist", - account_id - ); - Err(()) - } - }) - .and_then(move |connection| { - cmd("SET") - .arg(DEFAULT_ROUTE_KEY) - .arg(RedisAccountId(account_id)) - .query_async(connection) - .map_err(|err| error!("Error setting default route: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - debug!("Set default route to account id: {}", account_id); - update_routes(connection, routing_table) - }) - }), - ) + let mut connection = self.connection.clone(); + let exists: bool = connection + .exists(accounts_key(account_id)) + .map_err(|err| { + error!( + "Error checking if account exists before setting default route: {:?}", + err + ) + }) + .await?; + if !exists { + error!( + "Cannot set default route because account {} does not exist", + account_id + ); + return Err(()); + } + + connection + .set(DEFAULT_ROUTE_KEY, RedisAccountId(account_id)) + .map_err(|err| error!("Error setting default route: {:?}", err)) + .await?; + debug!("Set default route to account id: {}", account_id); + update_routes(connection, routing_table).await?; + Ok(()) } - fn set_settlement_engines( + async fn set_settlement_engines( &self, - asset_to_url_map: impl IntoIterator, - ) -> Box + Send> { + asset_to_url_map: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()> { + let mut connection = self.connection.clone(); let asset_to_url_map: Vec<(String, String)> = asset_to_url_map .into_iter() .map(|(asset_code, url)| (asset_code, url.to_string())) .collect(); debug!("Setting settlement engines to {:?}", asset_to_url_map); - Box::new( - cmd("HMSET") - .arg(SETTLEMENT_ENGINES_KEY) - .arg(asset_to_url_map) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error setting settlement engines: {:?}", err)) - .and_then(|(_, _): (RedisReconnect, Value)| Ok(())), - ) + connection + .hset_multiple(SETTLEMENT_ENGINES_KEY, &asset_to_url_map) + .map_err(|err| error!("Error setting settlement engines: {:?}", err)) + .await?; + Ok(()) } - fn get_asset_settlement_engine( - &self, - asset_code: &str, - ) -> Box, Error = ()> + Send> { - Box::new( - cmd("HGET") - .arg(SETTLEMENT_ENGINES_KEY) - .arg(asset_code) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error getting settlement engine: {:?}", err)) - .map(|(_, url): (_, Option)| { - if let Some(url) = url { - Url::parse(url.as_str()) - .map_err(|err| { - error!( - "Settlement engine URL loaded from Redis was not a valid URL: {:?}", - err - ) - }) - .ok() - } else { - None - } - }), - ) + async fn get_asset_settlement_engine(&self, asset_code: &str) -> Result, ()> { + let mut connection = self.connection.clone(); + let asset_code = asset_code.to_owned(); + + let url: Option = connection + .hget(SETTLEMENT_ENGINES_KEY, asset_code) + .map_err(|err| error!("Error getting settlement engine: {:?}", err)) + .await?; + if let Some(url) = url { + match Url::parse(url.as_str()) { + Ok(url) => Ok(Some(url)), + Err(err) => { + error!( + "Settlement engine URL loaded from Redis was not a valid URL: {:?}", + err + ); + return Err(()); + } + } + } else { + Ok(None) + } } } +#[async_trait] impl AddressStore for RedisStore { // Updates the ILP address of the store & iterates over all children and // updates their ILP Address to match the new address. - fn set_ilp_address( - &self, - ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, ilp_address: Address) -> Result<(), ()> { debug!("Setting ILP address to: {}", ilp_address); + let self_clone = self.clone(); let routing_table = self.routes.clone(); - let connection = self.connection.clone(); + let mut connection = self.connection.clone(); let ilp_address_clone = ilp_address.clone(); // Set the ILP address we have in memory (*self.ilp_address.write()) = ilp_address.clone(); // Save it to Redis - Box::new( - cmd("SET") - .arg(PARENT_ILP_KEY) - .arg(ilp_address.as_bytes()) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error setting ILP address {:?}", err)) - .and_then(move |(_, _): (RedisReconnect, Value)| Ok(())) - .join(self.get_all_accounts().and_then(move |accounts| { - // TODO: This can be an expensive operation if this function - // gets called often. This currently only gets called when - // inserting a new parent account in the API. It'd be nice - // if we could generate a child's ILP address on the fly, - // instead of having to store the username appended to the - // node's ilp address. Currently this is not possible, as - // account.ilp_address() cannot access any state that exists - // on the store. - let mut pipe = redis_crate::pipe(); - for account in accounts { - // Update the address and routes of all children and non-routing accounts. - if account.routing_relation() != RoutingRelation::Parent - && account.routing_relation() != RoutingRelation::Peer - { - // remove the old route - pipe.hdel(ROUTES_KEY, &account.ilp_address as &str).ignore(); - - // if the username of the account ends with the - // node's address, we're already configured so no - // need to append anything. - let ilp_address_clone2 = ilp_address_clone.clone(); - // Note: We are assuming that if the node's address - // ends with the account's username, then this - // account represents the node's non routing - // account. Is this a reasonable assumption to make? - let new_ilp_address = - if ilp_address_clone2.segments().rev().next().unwrap() - == account.username().to_string() - { - ilp_address_clone2 - } else { - ilp_address_clone - .with_suffix(account.username().as_bytes()) - .unwrap() - }; - pipe.hset( - accounts_key(account.id()), - "ilp_address", - new_ilp_address.as_bytes(), - ) - .ignore(); - - pipe.hset( - ROUTES_KEY, - new_ilp_address.as_bytes(), - RedisAccountId(account.id()), - ) - .ignore(); - } - } - pipe.query_async(connection.clone()) - .map_err(|err| error!("Error updating children: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - })) - .and_then(move |_| Ok(())), - ) + connection + .set(PARENT_ILP_KEY, ilp_address.as_bytes()) + .map_err(|err| error!("Error setting ILP address {:?}", err)) + .await?; + + let accounts = self.get_all_accounts().await?; + // TODO: This can be an expensive operation if this function + // gets called often. This currently only gets called when + // inserting a new parent account in the API. It'd be nice + // if we could generate a child's ILP address on the fly, + // instead of having to store the username appended to the + // node's ilp address. Currently this is not possible, as + // account.ilp_address() cannot access any state that exists + // on the store. + let mut pipe = redis_crate::pipe(); + for account in accounts { + // Update the address and routes of all children and non-routing accounts. + if account.routing_relation() != RoutingRelation::Parent + && account.routing_relation() != RoutingRelation::Peer + { + // remove the old route + pipe.hdel(ROUTES_KEY, &account.ilp_address as &str).ignore(); + + // if the username of the account ends with the + // node's address, we're already configured so no + // need to append anything. + let ilp_address_clone2 = ilp_address_clone.clone(); + // Note: We are assuming that if the node's address + // ends with the account's username, then this + // account represents the node's non routing + // account. Is this a reasonable assumption to make? + let new_ilp_address = if ilp_address_clone2.segments().rev().next().unwrap() + == account.username().to_string() + { + ilp_address_clone2 + } else { + ilp_address_clone + .with_suffix(account.username().as_bytes()) + .unwrap() + }; + pipe.hset( + accounts_key(account.id()), + "ilp_address", + new_ilp_address.as_bytes(), + ) + .ignore(); + + pipe.hset( + ROUTES_KEY, + new_ilp_address.as_bytes(), + RedisAccountId(account.id()), + ) + .ignore(); + } + } + + pipe.query_async(&mut connection.clone()) + .map_err(|err| error!("Error updating children: {:?}", err)) + .await?; + update_routes(connection, routing_table).await?; + Ok(()) } - fn clear_ilp_address(&self) -> Box + Send> { - let self_clone = self.clone(); - Box::new( - cmd("DEL") - .arg(PARENT_ILP_KEY) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error removing parent address: {:?}", err)) - .and_then(move |(_, _): (RedisReconnect, Value)| { - *(self_clone.ilp_address.write()) = DEFAULT_ILP_ADDRESS.clone(); - Ok(()) - }), - ) + async fn clear_ilp_address(&self) -> Result<(), ()> { + let mut connection = self.connection.clone(); + connection + .del(PARENT_ILP_KEY) + .map_err(|err| error!("Error removing parent address: {:?}", err)) + .await?; + + // overwrite the ilp address with the default value + *(self.ilp_address.write()) = DEFAULT_ILP_ADDRESS.clone(); + Ok(()) } fn get_ilp_address(&self) -> Address { @@ -1387,163 +1302,105 @@ impl AddressStore for RedisStore { type RoutingTable = HashMap; +#[async_trait] impl RouteManagerStore for RedisStore { type Account = Account; - fn get_accounts_to_send_routes_to( + async fn get_accounts_to_send_routes_to( &self, ignore_accounts: Vec, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let decryption_key = self.decryption_key.clone(); - Box::new( - cmd("SMEMBERS") - .arg("send_routes_to") - .query_async(self.connection.clone()) - .map_err(|err| error!("Error getting members of set send_routes_to: {:?}", err)) - .and_then( - move |(connection, account_ids): (RedisReconnect, Vec)| { - if account_ids.is_empty() { - Either::A(ok(Vec::new())) - } else { - let mut script = LOAD_ACCOUNTS.prepare_invoke(); - for id in account_ids.iter() { - if !ignore_accounts.contains(&id.0) { - script.arg(id.to_string()); - } - } - Either::B( - script - .invoke_async(connection.clone()) - .map_err(|err| { - error!( - "Error getting accounts to send routes to: {:?}", - err - ) - }) - .and_then( - move |(_connection, accounts): ( - RedisReconnect, - Vec, - )| { - let accounts: Vec = accounts - .into_iter() - .map(|account| { - account.decrypt_tokens( - &decryption_key.expose_secret().0, - ) - }) - .collect(); - Ok(accounts) - }, - ), - ) - } - }, - ), - ) + let mut connection = self.connection.clone(); + + let account_ids: Vec = connection + .smembers("send_routes_to") + .map_err(|err| error!("Error getting members of set send_routes_to: {:?}", err)) + .await?; + let account_ids: Vec = account_ids + .into_iter() + .map(|id| id.0) + .filter(|id| !ignore_accounts.contains(&id)) + .collect(); + if account_ids.is_empty() { + return Ok(Vec::new()); + } + + let accounts = self.get_accounts(account_ids).await?; + Ok(accounts) } - fn get_accounts_to_receive_routes_from( - &self, - ) -> Box, Error = ()> + Send> { + async fn get_accounts_to_receive_routes_from(&self) -> Result, ()> { let decryption_key = self.decryption_key.clone(); - Box::new( - cmd("SMEMBERS") - .arg("receive_routes_from") - .query_async(self.connection.clone()) - .map_err(|err| { - error!( - "Error getting members of set receive_routes_from: {:?}", - err - ) - }) - .and_then( - |(connection, account_ids): (RedisReconnect, Vec)| { - if account_ids.is_empty() { - Either::A(ok(Vec::new())) - } else { - let mut script = LOAD_ACCOUNTS.prepare_invoke(); - for id in account_ids.iter() { - script.arg(id.to_string()); - } - Either::B( - script - .invoke_async(connection.clone()) - .map_err(|err| { - error!( - "Error getting accounts to receive routes from: {:?}", - err - ) - }) - .and_then( - move |(_connection, accounts): ( - RedisReconnect, - Vec, - )| { - let accounts: Vec = accounts - .into_iter() - .map(|account| { - account.decrypt_tokens( - &decryption_key.expose_secret().0, - ) - }) - .collect(); - Ok(accounts) - }, - ), - ) - } - }, - ), - ) + let mut connection = self.connection.clone(); + let account_ids: Vec = connection + .smembers("receive_routes_from") + .map_err(|err| { + error!( + "Error getting members of set receive_routes_from: {:?}", + err + ) + }) + .await?; + let account_ids: Vec = account_ids.into_iter().map(|id| id.0).collect(); + + if account_ids.is_empty() { + return Ok(Vec::new()); + } + + let accounts = self.get_accounts(account_ids).await?; + Ok(accounts) } - fn get_local_and_configured_routes( + async fn get_local_and_configured_routes( &self, - ) -> Box, RoutingTable), Error = ()> + Send> - { - let get_static_routes = cmd("HGETALL") + ) -> Result<(RoutingTable, RoutingTable), ()> { + let mut connection = self.connection.clone(); + let static_routes: Vec<(String, RedisAccountId)> = cmd("HGETALL") .arg(STATIC_ROUTES_KEY) - .query_async(self.connection.clone()) + .query_async(&mut connection) .map_err(|err| error!("Error getting static routes: {:?}", err)) - .and_then( - |(_, static_routes): (RedisReconnect, Vec<(String, RedisAccountId)>)| { - Ok(static_routes) - }, - ); - Box::new(self.get_all_accounts().join(get_static_routes).and_then( - |(accounts, static_routes)| { - let local_table = HashMap::from_iter( - accounts - .iter() - .map(|account| (account.ilp_address.to_string(), account.clone())), - ); + .await?; - let account_map: HashMap = HashMap::from_iter(accounts.iter().map(|account| (account.id, account))); - let configured_table: HashMap = HashMap::from_iter(static_routes.into_iter() - .filter_map(|(prefix, account_id)| { - if let Some(account) = account_map.get(&account_id.0) { - Some((prefix, (*account).clone())) - } else { - warn!("No account for ID: {}, ignoring configured route for prefix: {}", account_id, prefix); - None - } - })); + let accounts = self.get_all_accounts().await?; - Ok((local_table, configured_table)) - }, - )) + let local_table = HashMap::from_iter( + accounts + .iter() + .map(|account| (account.ilp_address.to_string(), account.clone())), + ); + + let account_map: HashMap = + HashMap::from_iter(accounts.iter().map(|account| (account.id, account))); + let configured_table: HashMap = HashMap::from_iter( + static_routes + .into_iter() + .filter_map(|(prefix, account_id)| { + if let Some(account) = account_map.get(&account_id.0) { + Some((prefix, (*account).clone())) + } else { + warn!( + "No account for ID: {}, ignoring configured route for prefix: {}", + account_id, prefix + ); + None + } + }), + ); + + Ok((local_table, configured_table)) } - fn set_routes( + async fn set_routes( &mut self, - routes: impl IntoIterator, - ) -> Box + Send> { + routes: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()> { let routes: Vec<(String, RedisAccountId)> = routes .into_iter() .map(|(prefix, account)| (prefix, RedisAccountId(account.id))) .collect(); let num_routes = routes.len(); + let mut connection = self.connection.clone(); // Save routes to Redis let routing_tale = self.routes.clone(); @@ -1553,28 +1410,28 @@ impl RouteManagerStore for RedisStore { .ignore() .hset_multiple(ROUTES_KEY, &routes) .ignore(); - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(|err| error!("Error setting routes: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - trace!("Saved {} routes to Redis", num_routes); - update_routes(connection, routing_tale) - }), - ) + + pipe.query_async(&mut connection) + .map_err(|err| error!("Error setting routes: {:?}", err)) + .await?; + trace!("Saved {} routes to Redis", num_routes); + + update_routes(connection, routing_tale).await } } +#[async_trait] impl RateLimitStore for RedisStore { type Account = Account; /// Apply rate limits for number of packets per minute and amount of money per minute /// /// This uses https://github.com/brandur/redis-cell so the redis-cell module MUST be loaded into redis before this is run - fn apply_rate_limits( + async fn apply_rate_limits( &self, account: Account, prepare_amount: u64, - ) -> Box + Send> { + ) -> Result<(), RateLimitError> { if account.amount_per_minute_limit.is_some() || account.packets_per_minute_limit.is_some() { let mut pipe = redis_crate::pipe(); let packet_limit = account.packets_per_minute_limit.is_some(); @@ -1582,8 +1439,9 @@ impl RateLimitStore for RedisStore { if let Some(limit) = account.packets_per_minute_limit { let limit = limit - 1; + let packets_limit = format!("limit:packets:{}", account.id); pipe.cmd("CL.THROTTLE") - .arg(format!("limit:packets:{}", account.id)) + .arg(packets_limit) .arg(limit) .arg(limit) .arg(60) @@ -1592,113 +1450,115 @@ impl RateLimitStore for RedisStore { if let Some(limit) = account.amount_per_minute_limit { let limit = limit - 1; + let throughput_limit = format!("limit:throughput:{}", account.id); pipe.cmd("CL.THROTTLE") - .arg(format!("limit:throughput:{}", account.id)) + .arg(throughput_limit) // TODO allow separate configuration for burst limit .arg(limit) .arg(limit) .arg(60) .arg(prepare_amount); } - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(|err| { - error!("Error applying rate limits: {:?}", err); - RateLimitError::StoreError - }) - .and_then(move |(_, results): (_, Vec>)| { - if packet_limit && amount_limit { - if results[0][0] == 1 { - Err(RateLimitError::PacketLimitExceeded) - } else if results[1][0] == 1 { - Err(RateLimitError::ThroughputLimitExceeded) - } else { - Ok(()) - } - } else if packet_limit && results[0][0] == 1 { - Err(RateLimitError::PacketLimitExceeded) - } else if amount_limit && results[0][0] == 1 { - Err(RateLimitError::ThroughputLimitExceeded) - } else { - Ok(()) - } - }), - ) + + let mut connection = self.connection.clone(); + let results: Vec> = pipe + .query_async(&mut connection) + .map_err(|err| { + error!("Error applying rate limits: {:?}", err); + RateLimitError::StoreError + }) + .await?; + + if packet_limit && amount_limit { + if results[0][0] == 1 { + Err(RateLimitError::PacketLimitExceeded) + } else if results[1][0] == 1 { + Err(RateLimitError::ThroughputLimitExceeded) + } else { + Ok(()) + } + } else if packet_limit && results[0][0] == 1 { + Err(RateLimitError::PacketLimitExceeded) + } else if amount_limit && results[0][0] == 1 { + Err(RateLimitError::ThroughputLimitExceeded) + } else { + Ok(()) + } } else { - Box::new(ok(())) + Ok(()) } } - fn refund_throughput_limit( + async fn refund_throughput_limit( &self, account: Account, prepare_amount: u64, - ) -> Box + Send> { + ) -> Result<(), ()> { if let Some(limit) = account.amount_per_minute_limit { + let mut connection = self.connection.clone(); let limit = limit - 1; - Box::new( - cmd("CL.THROTTLE") - .arg(format!("limit:throughput:{}", account.id)) - .arg(limit) - .arg(limit) - .arg(60) - // TODO make sure this doesn't overflow - .arg(0i64 - (prepare_amount as i64)) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error refunding throughput limit: {:?}", err)) - .and_then(|(_, _): (_, Value)| Ok(())), - ) - } else { - Box::new(ok(())) + let throughput_limit = format!("limit:throughput:{}", account.id); + cmd("CL.THROTTLE") + .arg(throughput_limit) + .arg(limit) + .arg(limit) + .arg(60) + // TODO make sure this doesn't overflow + .arg(0i64 - (prepare_amount as i64)) + .query_async(&mut connection) + .map_err(|err| error!("Error refunding throughput limit: {:?}", err)) + .await?; } + + Ok(()) } } +#[async_trait] impl IdempotentStore for RedisStore { - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let idempotency_key_clone = idempotency_key.clone(); - Box::new( - cmd("HGETALL") - .arg(prefixed_idempotency_key(idempotency_key.clone())) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error loading idempotency key {}: {:?}", - idempotency_key_clone, err - ) - }) - .and_then(move |(_connection, ret): (_, HashMap)| { - if let (Some(status_code), Some(data), Some(input_hash_slice)) = ( - ret.get("status_code"), - ret.get("data"), - ret.get("input_hash"), - ) { - trace!("Loaded idempotency key {:?} - {:?}", idempotency_key, ret); - let mut input_hash: [u8; 32] = Default::default(); - input_hash.copy_from_slice(input_hash_slice.as_ref()); - Ok(Some(IdempotentData::new( - StatusCode::from_str(status_code).unwrap(), - Bytes::from(data.clone()), - input_hash, - ))) - } else { - Ok(None) - } - }), - ) + let mut connection = self.connection.clone(); + let ret: HashMap = connection + .hgetall(prefixed_idempotency_key(idempotency_key.clone())) + .map_err(move |err| { + error!( + "Error loading idempotency key {}: {:?}", + idempotency_key_clone, err + ) + }) + .await?; + + if let (Some(status_code), Some(data), Some(input_hash_slice)) = ( + ret.get("status_code"), + ret.get("data"), + ret.get("input_hash"), + ) { + trace!("Loaded idempotency key {:?} - {:?}", idempotency_key, ret); + let mut input_hash: [u8; 32] = Default::default(); + input_hash.copy_from_slice(input_hash_slice.as_ref()); + Ok(Some(IdempotentData::new( + StatusCode::from_str(status_code).unwrap(), + Bytes::from(data.clone()), + input_hash, + ))) + } else { + Ok(None) + } } - fn save_idempotent_data( + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut pipe = redis_crate::pipe(); + let mut connection = self.connection.clone(); pipe.atomic() .cmd("HMSET") // cannot use hset_multiple since data and status_code have different types .arg(&prefixed_idempotency_key(idempotency_key.clone())) @@ -1711,76 +1571,77 @@ impl IdempotentStore for RedisStore { .ignore() .expire(&prefixed_idempotency_key(idempotency_key.clone()), 86400) .ignore(); - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(|err| error!("Error caching: {:?}", err)) - .and_then(move |(_connection, _): (_, Vec)| { - trace!( - "Cached {:?}: {:?}, {:?}", - idempotency_key, - status_code, - data, - ); - Ok(()) - }), - ) + pipe.query_async(&mut connection) + .map_err(|err| error!("Error caching: {:?}", err)) + .await?; + + trace!( + "Cached {:?}: {:?}, {:?}", + idempotency_key, + status_code, + data, + ); + Ok(()) } } +#[async_trait] impl SettlementStore for RedisStore { type Account = Account; - fn update_balance_for_incoming_settlement( + async fn update_balance_for_incoming_settlement( &self, account_id: Uuid, amount: u64, idempotency_key: Option, - ) -> Box + Send> { + ) -> Result<(), ()> { let idempotency_key = idempotency_key.unwrap(); - Box::new( - PROCESS_INCOMING_SETTLEMENT + let balance: i64 = PROCESS_INCOMING_SETTLEMENT .arg(RedisAccountId(account_id)) .arg(amount) .arg(idempotency_key) - .invoke_async(self.connection.clone()) - .map_err(move |err| error!("Error processing incoming settlement from account: {} for amount: {}: {:?}", account_id, amount, err)) - .and_then(move |(_connection, balance): (_, i64)| { - trace!("Processed incoming settlement from account: {} for amount: {}. Balance is now: {}", account_id, amount, balance); - Ok(()) - })) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + error!( + "Error processing incoming settlement from account: {} for amount: {}: {:?}", + account_id, amount, err + ) + }) + .await?; + trace!( + "Processed incoming settlement from account: {} for amount: {}. Balance is now: {}", + account_id, + amount, + balance + ); + Ok(()) } - fn refund_settlement( - &self, - account_id: Uuid, - settle_amount: u64, - ) -> Box + Send> { + async fn refund_settlement(&self, account_id: Uuid, settle_amount: u64) -> Result<(), ()> { trace!( "Refunding settlement for account: {} of amount: {}", account_id, settle_amount ); - Box::new( - REFUND_SETTLEMENT - .arg(RedisAccountId(account_id)) - .arg(settle_amount) - .invoke_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error refunding settlement for account: {} of amount: {}: {:?}", - account_id, settle_amount, err - ) - }) - .and_then(move |(_connection, balance): (_, i64)| { - trace!( - "Refunded settlement for account: {} of amount: {}. Balance is now: {}", - account_id, - settle_amount, - balance - ); - Ok(()) - }), - ) + let balance: i64 = REFUND_SETTLEMENT + .arg(RedisAccountId(account_id)) + .arg(settle_amount) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + error!( + "Error refunding settlement for account: {} of amount: {}: {:?}", + account_id, settle_amount, err + ) + }) + .await?; + + trace!( + "Refunded settlement for account: {} of amount: {}. Balance is now: {}", + account_id, + settle_amount, + balance + ); + Ok(()) } } @@ -1870,155 +1731,144 @@ impl FromRedisValue for AmountWithScale { } } +#[async_trait] impl LeftoversStore for RedisStore { type AccountId = Uuid; type AssetType = BigUint; - fn get_uncredited_settlement_amount( + async fn get_uncredited_settlement_amount( &self, account_id: Uuid, - ) -> Box + Send> { + ) -> Result<(Self::AssetType, u8), ()> { let mut pipe = redis_crate::pipe(); pipe.atomic(); // get the amounts and instantly delete them pipe.lrange(uncredited_amount_key(account_id.to_string()), 0, -1); pipe.del(uncredited_amount_key(account_id.to_string())) .ignore(); - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(move |err| error!("Error getting uncredited_settlement_amount {:?}", err)) - .and_then(move |(_, amounts): (_, Vec)| { - // this call will only return 1 element - let amount = amounts[0].clone(); - Ok((amount.num, amount.scale)) - }), - ) + + let amounts: Vec = pipe + .query_async(&mut self.connection.clone()) + .map_err(move |err| error!("Error getting uncredited_settlement_amount {:?}", err)) + .await?; + + // this call will only return 1 element + let amount = amounts[0].clone(); + Ok((amount.num, amount.scale)) } - fn save_uncredited_settlement_amount( + async fn save_uncredited_settlement_amount( &self, account_id: Uuid, uncredited_settlement_amount: (Self::AssetType, u8), - ) -> Box + Send> { + ) -> Result<(), ()> { trace!( "Saving uncredited_settlement_amount {:?} {:?}", account_id, uncredited_settlement_amount ); - Box::new( - // We store these amounts as lists of strings - // because we cannot do BigNumber arithmetic in the store - // When loading the amounts, we convert them to the appropriate data - // type and sum them up. - cmd("RPUSH") - .arg(uncredited_amount_key(account_id)) - .arg(AmountWithScale { + // We store these amounts as lists of strings + // because we cannot do BigNumber arithmetic in the store + // When loading the amounts, we convert them to the appropriate data + // type and sum them up. + let mut connection = self.connection.clone(); + connection + .rpush( + uncredited_amount_key(account_id), + AmountWithScale { num: uncredited_settlement_amount.0, scale: uncredited_settlement_amount.1, - }) - .query_async(self.connection.clone()) - .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) - .and_then(move |(_conn, _ret): (_, Value)| Ok(())), - ) + }, + ) + .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) + .await?; + + Ok(()) } - fn load_uncredited_settlement_amount( + async fn load_uncredited_settlement_amount( &self, account_id: Uuid, local_scale: u8, - ) -> Box + Send> { - let connection = self.connection.clone(); + ) -> Result { + let mut connection = self.connection.clone(); trace!("Loading uncredited_settlement_amount {:?}", account_id); - Box::new( - self.get_uncredited_settlement_amount(account_id) - .and_then(move |amount| { - // scale the amount from the max scale to the local scale, and then - // save any potential leftovers to the store - let (scaled_amount, precision_loss) = - scale_with_precision_loss(amount.0, local_scale, amount.1); - if precision_loss > BigUint::from(0u32) { - Either::A( - cmd("RPUSH") - .arg(uncredited_amount_key(account_id)) - .arg(AmountWithScale { - num: precision_loss, - scale: std::cmp::max(local_scale, amount.1), - }) - .query_async(connection.clone()) - .map_err(move |err| { - error!("Error saving uncredited_settlement_amount: {:?}", err) - }) - .and_then(move |(_conn, _ret): (_, Value)| Ok(scaled_amount)), - ) - } else { - Either::B(ok(scaled_amount)) - } - }), - ) + let amount = self.get_uncredited_settlement_amount(account_id).await?; + // scale the amount from the max scale to the local scale, and then + // save any potential leftovers to the store + let (scaled_amount, precision_loss) = + scale_with_precision_loss(amount.0, local_scale, amount.1); + + if precision_loss > BigUint::from(0u32) { + connection + .rpush( + uncredited_amount_key(account_id), + AmountWithScale { + num: precision_loss, + scale: std::cmp::max(local_scale, amount.1), + }, + ) + .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) + .await?; + } + + Ok(scaled_amount) } - fn clear_uncredited_settlement_amount( - &self, - account_id: Uuid, - ) -> Box + Send> { - trace!("Clearing uncredited_settlement_amount {:?}", account_id,); - Box::new( - cmd("DEL") - .arg(uncredited_amount_key(account_id)) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!("Error clearing uncredited_settlement_amount: {:?}", err) - }) - .and_then(move |(_conn, _ret): (_, Value)| Ok(())), - ) + async fn clear_uncredited_settlement_amount(&self, account_id: Uuid) -> Result<(), ()> { + trace!("Clearing uncredited_settlement_amount {:?}", account_id); + let mut connection = self.connection.clone(); + connection + .del(uncredited_amount_key(account_id)) + .map_err(move |err| error!("Error clearing uncredited_settlement_amount: {:?}", err)) + .await?; + Ok(()) } } type RouteVec = Vec<(String, RedisAccountId)>; +use futures::future::TryFutureExt; + // TODO replace this with pubsub when async pubsub is added upstream: https://github.com/mitsuhiko/redis-rs/issues/183 -fn update_routes( - connection: RedisReconnect, +async fn update_routes( + mut connection: RedisReconnect, routing_table: Arc>>>, -) -> impl Future { +) -> Result<(), ()> { let mut pipe = redis_crate::pipe(); pipe.hgetall(ROUTES_KEY) .hgetall(STATIC_ROUTES_KEY) .get(DEFAULT_ROUTE_KEY); - pipe.query_async(connection) + let (routes, static_routes, default_route): (RouteVec, RouteVec, Option) = pipe + .query_async(&mut connection) .map_err(|err| error!("Error polling for routing table updates: {:?}", err)) - .and_then( - move |(_connection, (routes, static_routes, default_route)): ( - _, - (RouteVec, RouteVec, Option), - )| { - trace!( - "Loaded routes from redis. Static routes: {:?}, default route: {:?}, other routes: {:?}", - static_routes, - default_route, - routes - ); - // If there is a default route set in the db, - // set the entry for "" in the routing table to route to that account - let default_route_iter = iter::once(default_route) - .filter_map(|r| r) - .map(|rid| (String::new(), rid.0)); - let routes = HashMap::from_iter( - routes - .into_iter().map(|(s, rid)| (s, rid.0)) - // Include the default route if there is one - .chain(default_route_iter) - // Having the static_routes inserted after ensures that they will overwrite - // any routes with the same prefix from the first set - .chain(static_routes.into_iter().map(|(s, rid)| (s, rid.0))) - ); - // TODO we may not want to print this because the routing table will be very big - // if the node has a lot of local accounts - trace!("Routing table is: {:?}", routes); - *routing_table.write() = Arc::new(routes); - Ok(()) - }, - ) + .await?; + trace!( + "Loaded routes from redis. Static routes: {:?}, default route: {:?}, other routes: {:?}", + static_routes, + default_route, + routes + ); + // If there is a default route set in the db, + // set the entry for "" in the routing table to route to that account + let default_route_iter = iter::once(default_route) + .filter_map(|r| r) + .map(|rid| (String::new(), rid.0)); + let routes = HashMap::from_iter( + routes + .into_iter() + .map(|(s, rid)| (s, rid.0)) + // Include the default route if there is one + .chain(default_route_iter) + // Having the static_routes inserted after ensures that they will overwrite + // any routes with the same prefix from the first set + .chain(static_routes.into_iter().map(|(s, rid)| (s, rid.0))), + ); + // TODO we may not want to print this because the routing table will be very big + // if the node has a lot of local accounts + trace!("Routing table is: {:?}", routes); + *routing_table.write() = Arc::new(routes); + Ok(()) } // Uuid does not implement ToRedisArgs and FromRedisValue. @@ -2279,25 +2129,14 @@ mod tests { use redis_crate::IntoConnectionInfo; use tokio::runtime::Runtime; - #[test] - fn connect_fails_if_db_unavailable() { - let mut runtime = Runtime::new().unwrap(); - runtime - .block_on(future::lazy( - || -> Box + Send> { - Box::new( - RedisStoreBuilder::new( - "redis://127.0.0.1:0".into_connection_info().unwrap() as ConnectionInfo, - [0; 32], - ) - .connect() - .then(|result| { - assert!(result.is_err()); - Ok(()) - }), - ) - }, - )) - .unwrap(); + #[tokio::test] + async fn connect_fails_if_db_unavailable() { + let result = RedisStoreBuilder::new( + "redis://127.0.0.1:0".into_connection_info().unwrap() as ConnectionInfo, + [0; 32], + ) + .connect() + .await; + assert!(result.is_err()); } } diff --git a/crates/interledger-store/src/redis/reconnect.rs b/crates/interledger-store/src/redis/reconnect.rs index ed9680927..712d782ab 100644 --- a/crates/interledger-store/src/redis/reconnect.rs +++ b/crates/interledger-store/src/redis/reconnect.rs @@ -1,43 +1,55 @@ -use futures::{ - future::{err, result, Either}, - Future, -}; +use futures::future::{FutureExt, TryFutureExt}; +use futures01::future::{err, Either, Future as Future01}; use log::{debug, error}; use parking_lot::RwLock; use redis_crate::{ - aio::{ConnectionLike, SharedConnection}, - Client, ConnectionInfo, RedisError, Value, + aio::{ConnectionLike, MultiplexedConnection}, + AsyncCommands, Client, Cmd, ConnectionInfo, Pipeline, RedisError, RedisFuture, Value, }; +use std::future::Future; use std::sync::Arc; -/// Wrapper around a Redis SharedConnection that automatically +type Result = std::result::Result; + +/// Wrapper around a Redis MultiplexedConnection that automatically /// attempts to reconnect to the DB if the connection is dropped #[derive(Clone)] pub struct RedisReconnect { pub(crate) redis_info: Arc, - pub(crate) conn: Arc>, + pub(crate) conn: Arc>, +} + +async fn get_shared_connection(redis_info: Arc) -> Result { + let client = Client::open((*redis_info).clone())?; + client + .get_multiplexed_tokio_connection() + .map_err(|e| { + error!("Error connecting to Redis: {:?}", e); + e + }) + .await } +use futures::compat::Compat01As03; + impl RedisReconnect { - pub fn connect( - redis_info: ConnectionInfo, - ) -> impl Future { + pub async fn connect(redis_info: ConnectionInfo) -> Result { let redis_info = Arc::new(redis_info); - get_shared_connection(redis_info.clone()).map(move |conn| RedisReconnect { + let conn = get_shared_connection(redis_info.clone()).await?; + Ok(RedisReconnect { conn: Arc::new(RwLock::new(conn)), redis_info, }) } - pub fn reconnect(self) -> impl Future { - get_shared_connection(self.redis_info.clone()).and_then(move |shared_connection| { - (*self.conn.write()) = shared_connection; - debug!("Reconnected to Redis"); - Ok(self) - }) + pub async fn reconnect(self) -> Result { + let shared_connection = get_shared_connection(self.redis_info.clone()).await?; + (*self.conn.write()) = shared_connection; + debug!("Reconnected to Redis"); + Ok(self) } - fn get_shared_connection(&self) -> SharedConnection { + fn get_shared_connection(&self) -> MultiplexedConnection { self.conn.read().clone() } } @@ -47,56 +59,46 @@ impl ConnectionLike for RedisReconnect { self.conn.read().get_db() } - fn req_packed_command( - self, - cmd: Vec, - ) -> Box + Send> { - let clone = self.clone(); - Box::new( - self.get_shared_connection() - .req_packed_command(cmd) - .or_else(move |error| { + fn req_packed_command<'a>(&'a mut self, cmd: &'a Cmd) -> RedisFuture<'a, Value> { + // This is how it is implemented in the redis-rs repository + (async move { + let mut connection = self.get_shared_connection(); + match connection.req_packed_command(cmd).await { + Ok(res) => Ok(res), + Err(error) => { if error.is_connection_dropped() { debug!("Redis connection was dropped, attempting to reconnect"); - Either::A(clone.reconnect().then(|_| Err(error))) - } else { - Either::B(err(error)) + // TODO: Is this correct syntax? Otherwise we get an unused result warning + let _ = self.clone().reconnect().await; } - }) - .map(move |(_conn, res)| (self, res)), - ) + Err(error) + } + } + }) + .boxed() } - fn req_packed_commands( - self, - cmd: Vec, + fn req_packed_commands<'a>( + &'a mut self, + cmd: &'a Pipeline, offset: usize, count: usize, - ) -> Box), Error = RedisError> + Send> { - let clone = self.clone(); - Box::new( - self.get_shared_connection() - .req_packed_commands(cmd, offset, count) - .or_else(move |error| { + ) -> RedisFuture<'a, Vec> { + // This is how it is implemented in the redis-rs repository + (async move { + let mut connection = self.get_shared_connection(); + match connection.req_packed_commands(cmd, offset, count).await { + Ok(res) => Ok(res), + Err(error) => { if error.is_connection_dropped() { debug!("Redis connection was dropped, attempting to reconnect"); - Either::A(clone.reconnect().then(|_| Err(error))) - } else { - Either::B(err(error)) + // TODO: Is this correct syntax? Otherwise we get an unused result warning + let _ = self.clone().reconnect().await; } - }) - .map(|(_conn, res)| (self, res)), - ) - } -} - -fn get_shared_connection( - redis_info: Arc, -) -> impl Future { - result(Client::open((*redis_info).clone())).and_then(|client| { - client.get_shared_async_connection().map_err(|e| { - error!("Error connecting to Redis: {:?}", e); - e + Err(error) + } + } }) - }) + .boxed() + } } diff --git a/crates/interledger-store/tests/redis/accounts_test.rs b/crates/interledger-store/tests/redis/accounts_test.rs index 31e4d25e4..84ecba963 100644 --- a/crates/interledger-store/tests/redis/accounts_test.rs +++ b/crates/interledger-store/tests/redis/accounts_test.rs @@ -1,5 +1,6 @@ use super::{fixtures::*, redis_helpers::*, store_helpers::*}; -use futures::future::{result, Either, Future}; +use futures::future::{Either, Future}; +use futures::TryFutureExt; use interledger_api::{AccountSettings, NodeStore}; use interledger_btp::{BtpAccount, BtpStore}; use interledger_ccp::{CcpRoutingAccount, RoutingRelation}; @@ -9,481 +10,273 @@ use interledger_service::Account as AccountTrait; use interledger_service::{AccountStore, AddressStore, Username}; use interledger_service_util::BalanceStore; use interledger_store::redis::RedisStoreBuilder; -use log::{debug, error}; use redis_crate::Client; use secrecy::ExposeSecret; use secrecy::SecretString; +use std::default::Default; use std::str::FromStr; use uuid::Uuid; -#[test] -fn picks_up_parent_during_initialization() { +#[tokio::test] +async fn picks_up_parent_during_initialization() { let context = TestContext::new(); - block_on( - result(Client::open(context.get_client_connection_info())) - .map_err(|err| error!("Error creating Redis client: {:?}", err)) - .and_then(|client| { - debug!("Connected to redis: {:?}", client); - client - .get_shared_async_connection() - .map_err(|err| error!("Error connecting to Redis: {:?}", err)) - }) - .and_then(move |connection| { - // we set a parent that was already configured via perhaps a - // previous account insertion. that means that when we connect - // to the store we will always get the configured parent (if - // there was one)) - redis_crate::cmd("SET") - .arg("parent_node_account_address") - .arg("example.bob.node") - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_, _): (_, redis_crate::Value)| { - RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) - .connect() - .and_then(move |store| { - // the store's ilp address is the store's - // username appended to the parent's address - assert_eq!( - store.get_ilp_address(), - Address::from_str("example.bob.node").unwrap() - ); - let _ = context; - Ok(()) - }) - }) - }), - ) - .unwrap(); + let client = Client::open(context.get_client_connection_info()).unwrap(); + let mut connection = client.get_multiplexed_tokio_connection().await.unwrap(); + + // we set a parent that was already configured via perhaps a + // previous account insertion. that means that when we connect + // to the store we will always get the configured parent (if + // there was one)) + let _: redis_crate::Value = redis_crate::cmd("SET") + .arg("parent_node_account_address") + .arg("example.bob.node") + .query_async(&mut connection) + .await + .unwrap(); + + let store = RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) + .connect() + .await + .unwrap(); + // the store's ilp address is the store's + // username appended to the parent's address + assert_eq!( + store.get_ilp_address(), + Address::from_str("example.bob.node").unwrap() + ); } -#[test] -fn insert_accounts() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .insert_account(ACCOUNT_DETAILS_2.clone()) - .and_then(move |account| { - assert_eq!( - *account.ilp_address(), - Address::from_str("example.alice.user1.charlie").unwrap() - ); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn insert_accounts() { + let (store, _context, _) = test_store().await.unwrap(); + let account = store + .insert_account(ACCOUNT_DETAILS_2.clone()) + .await + .unwrap(); + assert_eq!( + *account.ilp_address(), + Address::from_str("example.alice.user1.charlie").unwrap() + ); } -#[test] -fn update_ilp_and_children_addresses() { - block_on(test_store().and_then(|(store, context, accs)| { - store - // Add a NonRoutingAccount to make sure its address - // gets updated as well - .insert_account(ACCOUNT_DETAILS_2.clone()) - .and_then(move |acc2| { - let mut accs = accs.clone(); - accs.push(acc2); - accs.sort_by_key(|a| a.username().clone()); - let ilp_address = Address::from_str("test.parent.our_address").unwrap(); - store - .set_ilp_address(ilp_address.clone()) - .and_then(move |_| { - let ret = store.get_ilp_address(); - assert_eq!(ilp_address, ret); - store.get_all_accounts().and_then(move |accounts: Vec<_>| { - let mut accounts = accounts.clone(); - accounts.sort_by(|a, b| { - a.username() - .as_bytes() - .partial_cmp(b.username().as_bytes()) - .unwrap() - }); - for (a, b) in accounts.into_iter().zip(&accs) { - if a.routing_relation() == RoutingRelation::Child - || a.routing_relation() == RoutingRelation::NonRoutingAccount - { - assert_eq!( - *a.ilp_address(), - ilp_address.with_suffix(a.username().as_bytes()).unwrap() - ); - } else { - assert_eq!(a.ilp_address(), b.ilp_address()); - } - } - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn update_ilp_and_children_addresses() { + let (store, _context, accs) = test_store().await.unwrap(); + // Add a NonRoutingAccount to make sure its address + // gets updated as well + let acc2 = store + .insert_account(ACCOUNT_DETAILS_2.clone()) + .await + .unwrap(); + let mut accs = accs.clone(); + accs.push(acc2); + accs.sort_by_key(|a| a.username().clone()); + let ilp_address = Address::from_str("test.parent.our_address").unwrap(); + + store.set_ilp_address(ilp_address.clone()).await.unwrap(); + let ret = store.get_ilp_address(); + assert_eq!(ilp_address, ret); + + let accounts = store.get_all_accounts().await.unwrap(); + let mut accounts = accounts.clone(); + accounts.sort_by(|a, b| { + a.username() + .as_bytes() + .partial_cmp(b.username().as_bytes()) + .unwrap() + }); + for (a, b) in accounts.into_iter().zip(&accs) { + if a.routing_relation() == RoutingRelation::Child + || a.routing_relation() == RoutingRelation::NonRoutingAccount + { + assert_eq!( + *a.ilp_address(), + ilp_address.with_suffix(a.username().as_bytes()).unwrap() + ); + } else { + assert_eq!(a.ilp_address(), b.ilp_address()); + } + } } -#[test] -fn only_one_parent_allowed() { +#[tokio::test] +async fn only_one_parent_allowed() { let mut acc = ACCOUNT_DETAILS_2.clone(); acc.routing_relation = Some("Parent".to_owned()); acc.username = Username::from_str("another_name").unwrap(); acc.ilp_address = Some(Address::from_str("example.another_name").unwrap()); - block_on(test_store().and_then(|(store, context, accs)| { - store.insert_account(acc.clone()).then(move |res| { - // This should fail - assert!(res.is_err()); - futures::future::join_all(vec![ - Either::A(store.delete_account(accs[0].id()).and_then(|_| Ok(()))), - // must also clear the ILP Address to indicate that we no longer - // have a parent account configured - Either::B(store.clear_ilp_address()), - ]) - .and_then(move |_| { - store.insert_account(acc).and_then(move |_| { - // the call was successful, so the parent was succesfully added - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let (store, _context, accs) = test_store().await.unwrap(); + let res = store.insert_account(acc.clone()).await; + // This should fail + assert!(res.is_err()); + futures::future::join_all(vec![ + Either::Left(store.delete_account(accs[0].id()).map_ok(|_| ())), + // must also clear the ILP Address to indicate that we no longer + // have a parent account configured + Either::Right(store.clear_ilp_address()), + ]) + .await; + let res = store.insert_account(acc).await; + assert!(res.is_ok()); } -#[test] -fn delete_accounts() { - block_on(test_store().and_then(|(store, context, _accs)| { - store.get_all_accounts().and_then(move |accounts| { - let id = accounts[0].id(); - store.delete_account(id).and_then(move |_| { - store.get_all_accounts().and_then(move |accounts| { - for a in accounts { - assert_ne!(id, a.id()); - } - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn delete_accounts() { + let (store, _context, _) = test_store().await.unwrap(); + let accounts = store.get_all_accounts().await.unwrap(); + let id = accounts[0].id(); + store.delete_account(id).await.unwrap(); + let accounts = store.get_all_accounts().await.unwrap(); + for a in accounts { + assert_ne!(id, a.id()); + } } -#[test] -fn update_accounts() { - block_on(test_store().and_then(|(store, context, accounts)| { - context - .async_connection() - .map_err(|err| panic!(err)) - .and_then(move |connection| { - let id = accounts[0].id(); - redis_crate::cmd("HMSET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg(600) - .arg("prepaid_amount") - .arg(400) - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_, _): (_, redis_crate::Value)| { - let mut new = ACCOUNT_DETAILS_0.clone(); - new.asset_code = String::from("TUV"); - store.update_account(id, new).and_then(move |account| { - assert_eq!(account.asset_code(), "TUV"); - store.get_balance(account).and_then(move |balance| { - assert_eq!(balance, 1000); - let _ = context; - Ok(()) - }) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn update_accounts() { + let (store, context, accounts) = test_store().await.unwrap(); + let mut connection = context.async_connection().await.unwrap(); + let id = accounts[0].id(); + let _: redis_crate::Value = redis_crate::cmd("HMSET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg(600u64) + .arg("prepaid_amount") + .arg(400u64) + .query_async(&mut connection) + .await + .unwrap(); + let mut new = ACCOUNT_DETAILS_0.clone(); + new.asset_code = String::from("TUV"); + let account = store.update_account(id, new).await.unwrap(); + assert_eq!(account.asset_code(), "TUV"); + let balance = store.get_balance(account).await.unwrap(); + assert_eq!(balance, 1000); } -#[test] -fn modify_account_settings_settle_to_overflow() { - block_on(test_store().and_then(|(store, context, accounts)| { - let mut settings = AccountSettings::default(); - // Redis.rs cannot save a value larger than i64::MAX - settings.settle_to = Some(std::i64::MAX as u64 + 1); - let account = accounts[0].clone(); - let id = account.id(); - store - .modify_account_settings(id, settings) - .then(move |ret| { - assert!(ret.is_err()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn modify_account_settings_settle_to_overflow() { + let (store, _context, accounts) = test_store().await.unwrap(); + let mut settings = AccountSettings::default(); + // Redis.rs cannot save a value larger than i64::MAX + settings.settle_to = Some(std::i64::MAX as u64 + 1); + let account = accounts[0].clone(); + let id = account.id(); + let ret = store.modify_account_settings(id, settings).await; + assert!(ret.is_err()); } -use std::default::Default; -#[test] -fn modify_account_settings_unchanged() { - block_on(test_store().and_then(|(store, context, accounts)| { - let settings = AccountSettings::default(); - let account = accounts[0].clone(); +#[tokio::test] +async fn modify_account_settings_unchanged() { + let (store, _context, accounts) = test_store().await.unwrap(); + let settings = AccountSettings::default(); + let account = accounts[0].clone(); - let id = account.id(); - store - .modify_account_settings(id, settings) - .and_then(move |ret| { - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - ret.get_http_auth_token().unwrap().expose_secret(), - ); - assert_eq!( - account.get_ilp_over_btp_outgoing_token().unwrap(), - ret.get_ilp_over_btp_outgoing_token().unwrap() - ); - // Cannot check other parameters since they are only pub(crate). - let _ = context; - Ok(()) - }) - })) - .unwrap(); -} + let id = account.id(); + let ret = store.modify_account_settings(id, settings).await.unwrap(); -#[test] -fn modify_account_settings() { - block_on(test_store().and_then(|(store, context, accounts)| { - let settings = AccountSettings { - ilp_over_http_outgoing_token: Some(SecretString::new("test_token".to_owned())), - ilp_over_http_incoming_token: Some(SecretString::new("http_in_new".to_owned())), - ilp_over_btp_outgoing_token: Some(SecretString::new("dylan:test".to_owned())), - ilp_over_btp_incoming_token: Some(SecretString::new("btp_in_new".to_owned())), - ilp_over_http_url: Some("http://example.com/accounts/dylan/ilp".to_owned()), - ilp_over_btp_url: Some("http://example.com/accounts/dylan/ilp/btp".to_owned()), - settle_threshold: Some(-50), - settle_to: Some(100), - }; - let account = accounts[0].clone(); - - let id = account.id(); - store - .modify_account_settings(id, settings) - .and_then(move |ret| { - assert_eq!( - ret.get_http_auth_token().unwrap().expose_secret(), - "test_token", - ); - assert_eq!( - ret.get_ilp_over_btp_outgoing_token().unwrap(), - &b"dylan:test"[..], - ); - // Cannot check other parameters since they are only pub(crate). - let _ = context; - Ok(()) - }) - })) - .unwrap(); -} - -#[test] -fn starts_with_zero_balance() { - block_on(test_store().and_then(|(store, context, accs)| { - let account0 = accs[0].clone(); - store.get_balance(account0).and_then(move |balance| { - assert_eq!(balance, 0); - let _ = context; - Ok(()) - }) - })) - .unwrap(); + assert_eq!( + account.get_http_auth_token().unwrap().expose_secret(), + ret.get_http_auth_token().unwrap().expose_secret(), + ); + assert_eq!( + account.get_ilp_over_btp_outgoing_token().unwrap(), + ret.get_ilp_over_btp_outgoing_token().unwrap() + ); } -#[test] -fn fetches_account_from_username() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .get_account_id_from_username(&Username::from_str("alice").unwrap()) - .and_then(move |account_id| { - assert_eq!(account_id, accs[0].id()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); -} - -#[test] -fn duplicate_http_incoming_auth_works() { - let mut duplicate = ACCOUNT_DETAILS_2.clone(); - duplicate.ilp_over_http_incoming_token = - Some(SecretString::new("incoming_auth_token".to_string())); - block_on(test_store().and_then(|(store, context, accs)| { - let original = accs[0].clone(); - let original_id = original.id(); - store.insert_account(duplicate).and_then(move |duplicate| { - let duplicate_id = duplicate.id(); - assert_ne!(original_id, duplicate_id); - futures::future::join_all(vec![ - store.get_account_from_http_auth( - &Username::from_str("alice").unwrap(), - "incoming_auth_token", - ), - store.get_account_from_http_auth( - &Username::from_str("charlie").unwrap(), - "incoming_auth_token", - ), - ]) - .and_then(move |accs| { - // Alice and Charlie had the same auth token, but they had a - // different username/account id, so no problem. - assert_ne!(accs[0].id(), accs[1].id()); - assert_eq!(accs[0].id(), original_id); - assert_eq!(accs[1].id(), duplicate_id); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap(); -} +#[tokio::test] +async fn modify_account_settings() { + let (store, _context, accounts) = test_store().await.unwrap(); + let settings = AccountSettings { + ilp_over_http_outgoing_token: Some(SecretString::new("test_token".to_owned())), + ilp_over_http_incoming_token: Some(SecretString::new("http_in_new".to_owned())), + ilp_over_btp_outgoing_token: Some(SecretString::new("dylan:test".to_owned())), + ilp_over_btp_incoming_token: Some(SecretString::new("btp_in_new".to_owned())), + ilp_over_http_url: Some("http://example.com/accounts/dylan/ilp".to_owned()), + ilp_over_btp_url: Some("http://example.com/accounts/dylan/ilp/btp".to_owned()), + settle_threshold: Some(-50), + settle_to: Some(100), + }; + let account = accounts[0].clone(); -#[test] -fn gets_account_from_btp_auth() { - block_on(test_store().and_then(|(store, context, accs)| { - // alice's incoming btp token is the username/password to get her - // account's information - store - .get_account_from_btp_auth(&Username::from_str("alice").unwrap(), "btp_token") - .and_then(move |acc| { - assert_eq!(acc.id(), accs[0].id()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); + let id = account.id(); + let ret = store.modify_account_settings(id, settings).await.unwrap(); + assert_eq!( + ret.get_http_auth_token().unwrap().expose_secret(), + "test_token", + ); + assert_eq!( + ret.get_ilp_over_btp_outgoing_token().unwrap(), + &b"dylan:test"[..], + ); } -#[test] -fn gets_account_from_http_auth() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .get_account_from_http_auth( - &Username::from_str("alice").unwrap(), - "incoming_auth_token", - ) - .and_then(move |acc| { - assert_eq!(acc.id(), accs[0].id()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn starts_with_zero_balance() { + let (store, _context, accs) = test_store().await.unwrap(); + let account0 = accs[0].clone(); + let balance = store.get_balance(account0).await.unwrap(); + assert_eq!(balance, 0); } -#[test] -fn duplicate_btp_incoming_auth_works() { - let mut charlie = ACCOUNT_DETAILS_2.clone(); - charlie.ilp_over_btp_incoming_token = Some(SecretString::new("btp_token".to_string())); - block_on(test_store().and_then(|(store, context, accs)| { - let alice = accs[0].clone(); - let alice_id = alice.id(); - store.insert_account(charlie).and_then(move |charlie| { - let charlie_id = charlie.id(); - assert_ne!(alice_id, charlie_id); - futures::future::join_all(vec![ - store.get_account_from_btp_auth(&Username::from_str("alice").unwrap(), "btp_token"), - store.get_account_from_btp_auth( - &Username::from_str("charlie").unwrap(), - "btp_token", - ), - ]) - .and_then(move |accs| { - assert_ne!(accs[0].id(), accs[1].id()); - assert_eq!(accs[0].id(), alice_id); - assert_eq!(accs[1].id(), charlie_id); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn fetches_account_from_username() { + let (store, _context, accs) = test_store().await.unwrap(); + let account_id = store + .get_account_id_from_username(&Username::from_str("alice").unwrap()) + .await + .unwrap(); + assert_eq!(account_id, accs[0].id()); } -#[test] -fn get_all_accounts() { - block_on(test_store().and_then(|(store, context, _accs)| { - store.get_all_accounts().and_then(move |accounts| { - assert_eq!(accounts.len(), 2); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn get_all_accounts() { + let (store, _context, _) = test_store().await.unwrap(); + let accounts = store.get_all_accounts().await.unwrap(); + assert_eq!(accounts.len(), 2); } -#[test] -fn gets_single_account() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone = store.clone(); - let acc = accs[0].clone(); - store_clone - .get_accounts(vec![acc.id()]) - .and_then(move |accounts| { - assert_eq!(accounts[0].ilp_address(), acc.ilp_address()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn gets_single_account() { + let (store, _context, accs) = test_store().await.unwrap(); + let acc = accs[0].clone(); + let accounts = store.get_accounts(vec![acc.id()]).await.unwrap(); + assert_eq!(accounts[0].ilp_address(), acc.ilp_address()); } -#[test] -fn gets_multiple() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone = store.clone(); - // set account ids in reverse order - let account_ids: Vec = accs.iter().rev().map(|a| a.id()).collect::<_>(); - store_clone - .get_accounts(account_ids) - .and_then(move |accounts| { - // note reverse order is intentional - assert_eq!(accounts[0].ilp_address(), accs[1].ilp_address()); - assert_eq!(accounts[1].ilp_address(), accs[0].ilp_address()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn gets_multiple() { + let (store, _context, accs) = test_store().await.unwrap(); + // set account ids in reverse order + let account_ids: Vec = accs.iter().rev().map(|a| a.id()).collect::<_>(); + let accounts = store.get_accounts(account_ids).await.unwrap(); + // note reverse order is intentional + assert_eq!(accounts[0].ilp_address(), accs[1].ilp_address()); + assert_eq!(accounts[1].ilp_address(), accs[0].ilp_address()); } -#[test] -fn decrypts_outgoing_tokens_acc() { - block_on(test_store().and_then(|(store, context, accs)| { - let acc = accs[0].clone(); - store - .get_accounts(vec![acc.id()]) - .and_then(move |accounts| { - let account = accounts[0].clone(); - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - acc.get_http_auth_token().unwrap().expose_secret(), - ); - assert_eq!( - account.get_ilp_over_btp_outgoing_token().unwrap(), - acc.get_ilp_over_btp_outgoing_token().unwrap(), - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn decrypts_outgoing_tokens_acc() { + let (store, _context, accs) = test_store().await.unwrap(); + let acc = accs[0].clone(); + let accounts = store.get_accounts(vec![acc.id()]).await.unwrap(); + let account = accounts[0].clone(); + assert_eq!( + account.get_http_auth_token().unwrap().expose_secret(), + acc.get_http_auth_token().unwrap().expose_secret(), + ); + assert_eq!( + account.get_ilp_over_btp_outgoing_token().unwrap(), + acc.get_ilp_over_btp_outgoing_token().unwrap(), + ); } -#[test] -fn errors_for_unknown_accounts() { - let result = block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_accounts(vec![Uuid::new_v4(), Uuid::new_v4()]) - .then(move |result| { - let _ = context; - result - }) - })); +#[tokio::test] +async fn errors_for_unknown_accounts() { + let (store, _context, _) = test_store().await.unwrap(); + let result = store + .get_accounts(vec![Uuid::new_v4(), Uuid::new_v4()]) + .await; assert!(result.is_err()); } diff --git a/crates/interledger-store/tests/redis/balances_test.rs b/crates/interledger-store/tests/redis/balances_test.rs index 20f3fece5..0be0c13df 100644 --- a/crates/interledger-store/tests/redis/balances_test.rs +++ b/crates/interledger-store/tests/redis/balances_test.rs @@ -9,90 +9,64 @@ use interledger_store::account::Account; use std::str::FromStr; use uuid::Uuid; -#[test] -fn get_balance() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account_id = Uuid::new_v4(); - context - .async_connection() - .map_err(move |err| panic!(err)) - .and_then(move |connection| { - redis_crate::cmd("HMSET") - .arg(format!("accounts:{}", account_id)) - .arg("balance") - .arg(600) - .arg("prepaid_amount") - .arg(400) - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_, _): (_, redis_crate::Value)| { - let account = Account::try_from( - account_id, - ACCOUNT_DETAILS_0.clone(), - store.get_ilp_address(), - ) - .unwrap(); - store.get_balance(account).and_then(move |balance| { - assert_eq!(balance, 1000); - let _ = context; - Ok(()) - }) - }) - }) - })) +#[tokio::test] +async fn get_balance() { + let (store, context, _accs) = test_store().await.unwrap(); + let account_id = Uuid::new_v4(); + let mut connection = context.async_connection().await.unwrap(); + let _: redis_crate::Value = redis_crate::cmd("HMSET") + .arg(format!("accounts:{}", account_id)) + .arg("balance") + .arg(600u64) + .arg("prepaid_amount") + .arg(400u64) + .query_async(&mut connection) + .await + .unwrap(); + let account = Account::try_from( + account_id, + ACCOUNT_DETAILS_0.clone(), + store.get_ilp_address(), + ) .unwrap(); + let balance = store.get_balance(account).await.unwrap(); + assert_eq!(balance, 1000); } -#[test] -fn prepare_then_fulfill_with_settlement() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone_1 = store.clone(); - let store_clone_2 = store.clone(); - store - .clone() - .get_accounts(vec![accs[0].id(), accs[1].id()]) - .map_err(|_err| panic!("Unable to get accounts")) - .and_then(move |accounts| { - let account0 = accounts[0].clone(); - let account1 = accounts[1].clone(); - store - // reduce account 0's balance by 100 - .update_balances_for_prepare(accounts[0].clone(), 100) - .and_then(move |_| { - store_clone_1 - .clone() - .get_balance(accounts[0].clone()) - .join(store_clone_1.clone().get_balance(accounts[1].clone())) - .and_then(|(balance0, balance1)| { - assert_eq!(balance0, -100); - assert_eq!(balance1, 0); - Ok(()) - }) - }) - .and_then(move |_| { - store_clone_2 - .clone() - .update_balances_for_fulfill(account1.clone(), 100) - .and_then(move |_| { - store_clone_2 - .clone() - .get_balance(account0.clone()) - .join(store_clone_2.clone().get_balance(account1.clone())) - .and_then(move |(balance0, balance1)| { - assert_eq!(balance0, -100); - assert_eq!(balance1, -1000); // the account must be settled down to -1000 - let _ = context; - Ok(()) - }) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn prepare_then_fulfill_with_settlement() { + let (store, _context, accs) = test_store().await.unwrap(); + let accounts = store + .get_accounts(vec![accs[0].id(), accs[1].id()]) + .await + .unwrap(); + let account0 = accounts[0].clone(); + let account1 = accounts[1].clone(); + // reduce account 0's balance by 100 + store + .update_balances_for_prepare(account0.clone(), 100) + .await + .unwrap(); + // TODO:Can we make get_balance take a reference to the account? + // Even better, we should make it just take the account uid/username! + let balance0 = store.get_balance(account0.clone()).await.unwrap(); + let balance1 = store.get_balance(account1.clone()).await.unwrap(); + assert_eq!(balance0, -100); + assert_eq!(balance1, 0); + + // Account 1 hits the settlement limit (?) TODO + store + .update_balances_for_fulfill(account1.clone(), 100) + .await + .unwrap(); + let balance0 = store.get_balance(account0).await.unwrap(); + let balance1 = store.get_balance(account1).await.unwrap(); + assert_eq!(balance0, -100); + assert_eq!(balance1, -1000); } -#[test] -fn process_fulfill_no_settle_to() { +#[tokio::test] +async fn process_fulfill_no_settle_to() { // account without a settle_to let acc = { let mut acc = ACCOUNT_DETAILS_1.clone(); @@ -104,31 +78,21 @@ fn process_fulfill_no_settle_to() { acc.settle_to = None; acc }; - block_on(test_store().and_then(|(store, context, _accs)| { - let store_clone = store.clone(); - store.clone().insert_account(acc).and_then(move |account| { - let id = account.id(); - store_clone - .get_accounts(vec![id]) - .and_then(move |accounts| { - let acc = accounts[0].clone(); - store_clone - .clone() - .update_balances_for_fulfill(acc.clone(), 100) - .and_then(move |(balance, amount_to_settle)| { - assert_eq!(balance, 100); - assert_eq!(amount_to_settle, 0); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let (store, _context, _accs) = test_store().await.unwrap(); + let account = store.insert_account(acc).await.unwrap(); + let id = account.id(); + let accounts = store.get_accounts(vec![id]).await.unwrap(); + let acc = accounts[0].clone(); + let (balance, amount_to_settle) = store + .update_balances_for_fulfill(acc.clone(), 100) + .await + .unwrap(); + assert_eq!(balance, 100); + assert_eq!(amount_to_settle, 0); } -#[test] -fn process_fulfill_settle_to_over_threshold() { +#[tokio::test] +async fn process_fulfill_settle_to_over_threshold() { // account misconfigured with settle_to >= settle_threshold does not get settlements let acc = { let mut acc = ACCOUNT_DETAILS_1.clone(); @@ -141,31 +105,21 @@ fn process_fulfill_settle_to_over_threshold() { acc.ilp_over_btp_incoming_token = None; acc }; - block_on(test_store().and_then(|(store, context, _accs)| { - let store_clone = store.clone(); - store.clone().insert_account(acc).and_then(move |acc| { - let id = acc.id(); - store_clone - .get_accounts(vec![id]) - .and_then(move |accounts| { - let acc = accounts[0].clone(); - store_clone - .clone() - .update_balances_for_fulfill(acc.clone(), 1000) - .and_then(move |(balance, amount_to_settle)| { - assert_eq!(balance, 1000); - assert_eq!(amount_to_settle, 0); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let (store, _context, _accs) = test_store().await.unwrap(); + let acc = store.insert_account(acc).await.unwrap(); + let id = acc.id(); + let accounts = store.get_accounts(vec![id]).await.unwrap(); + let acc = accounts[0].clone(); + let (balance, amount_to_settle) = store + .update_balances_for_fulfill(acc.clone(), 1000) + .await + .unwrap(); + assert_eq!(balance, 1000); + assert_eq!(amount_to_settle, 0); } -#[test] -fn process_fulfill_ok() { +#[tokio::test] +async fn process_fulfill_ok() { // account with settle to = 0 (not falsy) with settle_threshold > 0, gets settlements let acc = { let mut acc = ACCOUNT_DETAILS_1.clone(); @@ -178,160 +132,99 @@ fn process_fulfill_ok() { acc.ilp_over_btp_incoming_token = None; acc }; - block_on(test_store().and_then(|(store, context, _accs)| { - let store_clone = store.clone(); - store.clone().insert_account(acc).and_then(move |account| { - let id = account.id(); - store_clone - .get_accounts(vec![id]) - .and_then(move |accounts| { - let acc = accounts[0].clone(); - store_clone - .clone() - .update_balances_for_fulfill(acc.clone(), 101) - .and_then(move |(balance, amount_to_settle)| { - assert_eq!(balance, 0); - assert_eq!(amount_to_settle, 101); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let (store, _context, _accs) = test_store().await.unwrap(); + let account = store.insert_account(acc).await.unwrap(); + let id = account.id(); + let accounts = store.get_accounts(vec![id]).await.unwrap(); + let acc = accounts[0].clone(); + let (balance, amount_to_settle) = store + .update_balances_for_fulfill(acc.clone(), 101) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(amount_to_settle, 101); } -#[test] -fn prepare_then_reject() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone_1 = store.clone(); - let store_clone_2 = store.clone(); - store - .clone() - .get_accounts(vec![accs[0].id(), accs[1].id()]) - .map_err(|_err| panic!("Unable to get accounts")) - .and_then(move |accounts| { - let account0 = accounts[0].clone(); - let account1 = accounts[1].clone(); - store - .update_balances_for_prepare(accounts[0].clone(), 100) - .and_then(move |_| { - store_clone_1 - .clone() - .get_balance(accounts[0].clone()) - .join(store_clone_1.clone().get_balance(accounts[1].clone())) - .and_then(|(balance0, balance1)| { - assert_eq!(balance0, -100); - assert_eq!(balance1, 0); - Ok(()) - }) - }) - .and_then(move |_| { - store_clone_2 - .clone() - .update_balances_for_reject(account0.clone(), 100) - .and_then(move |_| { - store_clone_2 - .clone() - .get_balance(account0.clone()) - .join(store_clone_2.clone().get_balance(account1.clone())) - .and_then(move |(balance0, balance1)| { - assert_eq!(balance0, 0); - assert_eq!(balance1, 0); - let _ = context; - Ok(()) - }) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn prepare_then_reject() { + let (store, _context, accs) = test_store().await.unwrap(); + let accounts = store + .get_accounts(vec![accs[0].id(), accs[1].id()]) + .await + .unwrap(); + let account0 = accounts[0].clone(); + let account1 = accounts[1].clone(); + store + .update_balances_for_prepare(accounts[0].clone(), 100) + .await + .unwrap(); + let balance0 = store.get_balance(accounts[0].clone()).await.unwrap(); + let balance1 = store.get_balance(accounts[1].clone()).await.unwrap(); + assert_eq!(balance0, -100); + assert_eq!(balance1, 0); + store + .update_balances_for_reject(account0.clone(), 100) + .await + .unwrap(); + let balance0 = store.get_balance(accounts[0].clone()).await.unwrap(); + let balance1 = store.get_balance(accounts[1].clone()).await.unwrap(); + assert_eq!(balance0, 0); + assert_eq!(balance1, 0); } -#[test] -fn enforces_minimum_balance() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .clone() - .get_accounts(vec![accs[0].id(), accs[1].id()]) - .map_err(|_err| panic!("Unable to get accounts")) - .and_then(move |accounts| { - store - .update_balances_for_prepare(accounts[0].clone(), 10000) - .then(move |result| { - assert!(result.is_err()); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn enforces_minimum_balance() { + let (store, _context, accs) = test_store().await.unwrap(); + let accounts = store + .get_accounts(vec![accs[0].id(), accs[1].id()]) + .await + .unwrap(); + let result = store + .update_balances_for_prepare(accounts[0].clone(), 10000) + .await; + assert!(result.is_err()); } -#[test] +#[tokio::test] // Prepare and Fulfill a packet for 100 units from Account 0 to Account 1 // Then, Prepare and Fulfill a packet for 80 units from Account 1 to Account 0 -fn netting_fulfilled_balances() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone1 = store.clone(); - let store_clone2 = store.clone(); - store - .clone() - .insert_account(ACCOUNT_DETAILS_2.clone()) - .and_then(move |acc| { - store - .clone() - .get_accounts(vec![accs[0].id(), acc.id()]) - .map_err(|_err| panic!("Unable to get accounts")) - .and_then(move |accounts| { - let account0 = accounts[0].clone(); - let account1 = accounts[1].clone(); - let account0_clone = account0.clone(); - let account1_clone = account1.clone(); - future::join_all(vec![ - Either::A(store.clone().update_balances_for_prepare( - account0.clone(), - 100, // decrement account 0 by 100 - )), - Either::B( - store - .clone() - .update_balances_for_fulfill( - account1.clone(), // increment account 1 by 100 - 100, - ) - .and_then(|_| Ok(())), - ), - ]) - .and_then(move |_| { - future::join_all(vec![ - Either::A( - store_clone1 - .clone() - .update_balances_for_prepare(account1.clone(), 80), - ), - Either::B( - store_clone1 - .clone() - .update_balances_for_fulfill(account0.clone(), 80) - .and_then(|_| Ok(())), - ), - ]) - }) - .and_then(move |_| { - store_clone2 - .clone() - .get_balance(account0_clone) - .join(store_clone2.get_balance(account1_clone)) - .and_then(move |(balance0, balance1)| { - assert_eq!(balance0, -20); - assert_eq!(balance1, 20); - let _ = context; - Ok(()) - }) - }) - }) - }) - })) - .unwrap(); +async fn netting_fulfilled_balances() { + let (store, _context, accs) = test_store().await.unwrap(); + let acc = store + .insert_account(ACCOUNT_DETAILS_2.clone()) + .await + .unwrap(); + let accounts = store + .get_accounts(vec![accs[0].id(), acc.id()]) + .await + .unwrap(); + let account0 = accounts[0].clone(); + let account1 = accounts[1].clone(); + + // decrement account 0 by 100 + store + .update_balances_for_prepare(account0.clone(), 100) + .await + .unwrap(); + // increment account 1 by 100 + store + .update_balances_for_fulfill(account1.clone(), 100) + .await + .unwrap(); + + // decrement account 1 by 80 + store + .update_balances_for_prepare(account1.clone(), 80) + .await + .unwrap(); + // increment account 0 by 80 + store + .update_balances_for_fulfill(account0.clone(), 80) + .await + .unwrap(); + + let balance0 = store.get_balance(accounts[0].clone()).await.unwrap(); + let balance1 = store.get_balance(accounts[1].clone()).await.unwrap(); + assert_eq!(balance0, -20); + assert_eq!(balance1, 20); } diff --git a/crates/interledger-store/tests/redis/btp_test.rs b/crates/interledger-store/tests/redis/btp_test.rs index 94af16200..5e5c938f6 100644 --- a/crates/interledger-store/tests/redis/btp_test.rs +++ b/crates/interledger-store/tests/redis/btp_test.rs @@ -1,63 +1,81 @@ +use super::fixtures::*; +use super::redis_helpers::TestContext; use super::store_helpers::*; +use futures::compat::Future01CompatExt; use futures::future::Future; +use futures01::future::Future as Future01; +use interledger_api::NodeStore; use interledger_btp::{BtpAccount, BtpStore}; use interledger_http::HttpAccount; use interledger_packet::Address; -use interledger_service::{Account, Username}; -use secrecy::ExposeSecret; +use interledger_service::{Account as AccountTrait, Username}; +use interledger_store::{account::Account, redis::RedisStore}; +use secrecy::{ExposeSecret, SecretString}; use std::str::FromStr; -#[test] -fn gets_account_from_btp_auth() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "other_btp_token") - .and_then(move |account| { - assert_eq!( - *account.ilp_address(), - Address::from_str("example.alice.user1.bob").unwrap() - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_account_from_btp_auth() { + let (store, _context, _) = test_store().await.unwrap(); + let account = store + .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "other_btp_token") + .await + .unwrap(); + assert_eq!( + *account.ilp_address(), + Address::from_str("example.alice.user1.bob").unwrap() + ); } -#[test] -fn decrypts_outgoing_tokens_btp() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "other_btp_token") - .and_then(move |account| { - // the account is created on Dylan's connector - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - "outgoing_auth_token", - ); - assert_eq!( - &account.get_ilp_over_btp_outgoing_token().unwrap(), - b"btp_token" - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn decrypts_outgoing_tokens_btp() { + let (store, _context, _) = test_store().await.unwrap(); + let account = store + .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "other_btp_token") + .await + .unwrap(); + + // the account is created on Dylan's connector + assert_eq!( + account.get_http_auth_token().unwrap().expose_secret(), + "outgoing_auth_token", + ); + assert_eq!( + &account.get_ilp_over_btp_outgoing_token().unwrap(), + b"btp_token" + ); } -#[test] -fn errors_on_unknown_btp_token() { - let result = block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_btp_auth( - &Username::from_str("someuser").unwrap(), - "unknown_btp_token", - ) - .then(move |result| { - let _ = context; - result - }) - })); +#[tokio::test] +async fn errors_on_unknown_user_or_wrong_btp_token() { + let (store, _context, _) = test_store().await.unwrap(); + let result = store + .get_account_from_btp_auth(&Username::from_str("asdf").unwrap(), "other_btp_token") + .await; + assert!(result.is_err()); + + let result = store + .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "wrong_token") + .await; assert!(result.is_err()); } + +#[tokio::test] +async fn duplicate_btp_incoming_auth_works() { + let mut charlie = ACCOUNT_DETAILS_2.clone(); + charlie.ilp_over_btp_incoming_token = Some(SecretString::new("btp_token".to_string())); + let (store, _context, accs) = test_store().await.unwrap(); + let alice = accs[0].clone(); + let alice_id = alice.id(); + let charlie = store.insert_account(charlie).await.unwrap(); + let charlie_id = charlie.id(); + assert_ne!(alice_id, charlie_id); + let result = futures::future::join_all(vec![ + store.get_account_from_btp_auth(&Username::from_str("alice").unwrap(), "btp_token"), + store.get_account_from_btp_auth(&Username::from_str("charlie").unwrap(), "btp_token"), + ]) + .await; + let accs: Vec<_> = result.into_iter().map(|r| r.unwrap()).collect(); + assert_ne!(accs[0].id(), accs[1].id()); + assert_eq!(accs[0].id(), alice_id); + assert_eq!(accs[1].id(), charlie_id); +} diff --git a/crates/interledger-store/tests/redis/http_test.rs b/crates/interledger-store/tests/redis/http_test.rs index 3e3c2350f..fe4c3c14b 100644 --- a/crates/interledger-store/tests/redis/http_test.rs +++ b/crates/interledger-store/tests/redis/http_test.rs @@ -1,74 +1,77 @@ +use super::fixtures::*; use super::store_helpers::*; use futures::future::Future; +use interledger_api::NodeStore; use interledger_btp::BtpAccount; use interledger_http::{HttpAccount, HttpStore}; use interledger_packet::Address; use interledger_service::{Account, Username}; -use secrecy::ExposeSecret; +use secrecy::{ExposeSecret, SecretString}; use std::str::FromStr; -#[test] -fn gets_account_from_http_bearer_token() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_http_auth( - &Username::from_str("alice").unwrap(), - "incoming_auth_token", - ) - .and_then(move |account| { - assert_eq!( - *account.ilp_address(), - Address::from_str("example.alice").unwrap() - ); - // this account is in Dylan's connector - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - "outgoing_auth_token", - ); - assert_eq!( - &account.get_ilp_over_btp_outgoing_token().unwrap(), - b"btp_token", - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_account_from_http_bearer_token() { + let (store, _context, _) = test_store().await.unwrap(); + let account = store + .get_account_from_http_auth(&Username::from_str("alice").unwrap(), "incoming_auth_token") + .await + .unwrap(); + assert_eq!( + *account.ilp_address(), + Address::from_str("example.alice").unwrap() + ); + assert_eq!( + account.get_http_auth_token().unwrap().expose_secret(), + "outgoing_auth_token", + ); + assert_eq!( + &account.get_ilp_over_btp_outgoing_token().unwrap(), + b"btp_token", + ); } -#[test] -fn decrypts_outgoing_tokens_http() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_http_auth( - &Username::from_str("alice").unwrap(), - "incoming_auth_token", - ) - .and_then(move |account| { - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - "outgoing_auth_token", - ); - assert_eq!( - &account.get_ilp_over_btp_outgoing_token().unwrap(), - b"btp_token", - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() -} +#[tokio::test] +async fn errors_on_unknown_user_or_wrong_http_token() { + let (store, _context, _) = test_store().await.unwrap(); + // wrong password + let result = store + .get_account_from_http_auth(&Username::from_str("alice").unwrap(), "unknown_token") + .await; + assert!(result.is_err()); -#[test] -fn errors_on_unknown_http_auth() { - let result = block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_http_auth(&Username::from_str("someuser").unwrap(), "unknown_token") - .then(move |result| { - let _ = context; - result - }) - })); + // wrong user + let result = store + .get_account_from_http_auth(&Username::from_str("asdf").unwrap(), "incoming_auth_token") + .await; assert!(result.is_err()); } + +#[tokio::test] +async fn duplicate_http_incoming_auth_works() { + let mut duplicate = ACCOUNT_DETAILS_2.clone(); + duplicate.ilp_over_http_incoming_token = + Some(SecretString::new("incoming_auth_token".to_string())); + let (store, _context, accs) = test_store().await.unwrap(); + let original = accs[0].clone(); + let original_id = original.id(); + let duplicate = store.insert_account(duplicate).await.unwrap(); + let duplicate_id = duplicate.id(); + assert_ne!(original_id, duplicate_id); + let result = futures::future::join_all(vec![ + store.get_account_from_http_auth( + &Username::from_str("alice").unwrap(), + "incoming_auth_token", + ), + store.get_account_from_http_auth( + &Username::from_str("charlie").unwrap(), + "incoming_auth_token", + ), + ]) + .await; + let accs: Vec<_> = result.into_iter().map(|r| r.unwrap()).collect(); + // Alice and Charlie had the same auth token, but they had a + // different username/account id, so no problem. + assert_ne!(accs[0].id(), accs[1].id()); + assert_eq!(accs[0].id(), original_id); + assert_eq!(accs[1].id(), duplicate_id); +} diff --git a/crates/interledger-store/tests/redis/rate_limiting_test.rs b/crates/interledger-store/tests/redis/rate_limiting_test.rs index 178e4f0e0..6a2cc7969 100644 --- a/crates/interledger-store/tests/redis/rate_limiting_test.rs +++ b/crates/interledger-store/tests/redis/rate_limiting_test.rs @@ -5,94 +5,76 @@ use interledger_service_util::{RateLimitError, RateLimitStore}; use interledger_store::account::Account; use uuid::Uuid; -#[test] -fn rate_limits_number_of_packets() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account = Account::try_from( - Uuid::new_v4(), - ACCOUNT_DETAILS_0.clone(), - store.get_ilp_address(), - ) - .unwrap(); - join_all(vec![ - store.clone().apply_rate_limits(account.clone(), 10), - store.clone().apply_rate_limits(account.clone(), 10), - store.clone().apply_rate_limits(account.clone(), 10), - ]) - .then(move |result| { - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), RateLimitError::PacketLimitExceeded); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn rate_limits_number_of_packets() { + let (store, _context, _) = test_store().await.unwrap(); + let account = Account::try_from( + Uuid::new_v4(), + ACCOUNT_DETAILS_0.clone(), + store.get_ilp_address(), + ) + .unwrap(); + let results = join_all(vec![ + store.clone().apply_rate_limits(account.clone(), 10), + store.clone().apply_rate_limits(account.clone(), 10), + store.clone().apply_rate_limits(account.clone(), 10), + ]) + .await; + // The first 2 calls succeed, while the 3rd one hits the rate limit error + // because the account is only allowed 2 packets per minute + assert_eq!( + results, + vec![Ok(()), Ok(()), Err(RateLimitError::PacketLimitExceeded)] + ); } -#[test] -fn limits_amount_throughput() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account = Account::try_from( - Uuid::new_v4(), - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) - .unwrap(); - join_all(vec![ - store.clone().apply_rate_limits(account.clone(), 500), - store.clone().apply_rate_limits(account.clone(), 500), - store.clone().apply_rate_limits(account.clone(), 1), - ]) - .then(move |result| { - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), RateLimitError::ThroughputLimitExceeded); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn limits_amount_throughput() { + let (store, _context, _) = test_store().await.unwrap(); + let account = Account::try_from( + Uuid::new_v4(), + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + let results = join_all(vec![ + store.clone().apply_rate_limits(account.clone(), 500), + store.clone().apply_rate_limits(account.clone(), 500), + store.clone().apply_rate_limits(account.clone(), 1), + ]) + .await; + // The first 2 calls succeed, while the 3rd one hits the rate limit error + // because the account is only allowed 1000 units of currency per minute + assert_eq!( + results, + vec![Ok(()), Ok(()), Err(RateLimitError::ThroughputLimitExceeded)] + ); } -#[test] -fn refunds_throughput_limit_for_rejected_packets() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account = Account::try_from( - Uuid::new_v4(), - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) +#[tokio::test] +async fn refunds_throughput_limit_for_rejected_packets() { + let (store, _context, _) = test_store().await.unwrap(); + let account = Account::try_from( + Uuid::new_v4(), + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + + join_all(vec![ + store.clone().apply_rate_limits(account.clone(), 500), + store.clone().apply_rate_limits(account.clone(), 500), + ]) + .await; + + // We refund the throughput limit once, meaning we can do 1 more call before + // the error + store + .refund_throughput_limit(account.clone(), 500) + .await .unwrap(); - join_all(vec![ - store.clone().apply_rate_limits(account.clone(), 500), - store.clone().apply_rate_limits(account.clone(), 500), - ]) - .map_err(|err| panic!(err)) - .and_then(move |_| { - let store_clone = store.clone(); - let account_clone = account.clone(); - store - .clone() - .refund_throughput_limit(account.clone(), 500) - .and_then(move |_| { - store - .clone() - .apply_rate_limits(account.clone(), 500) - .map_err(|err| panic!(err)) - }) - .and_then(move |_| { - store_clone - .apply_rate_limits(account_clone, 1) - .then(move |result| { - assert!(result.is_err()); - assert_eq!( - result.unwrap_err(), - RateLimitError::ThroughputLimitExceeded - ); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() + store.apply_rate_limits(account.clone(), 500).await.unwrap(); + + let result = store.apply_rate_limits(account.clone(), 1).await; + assert_eq!(result.unwrap_err(), RateLimitError::ThroughputLimitExceeded); } diff --git a/crates/interledger-store/tests/redis/rates_test.rs b/crates/interledger-store/tests/redis/rates_test.rs index ce7a99d65..c3dd7f700 100644 --- a/crates/interledger-store/tests/redis/rates_test.rs +++ b/crates/interledger-store/tests/redis/rates_test.rs @@ -2,26 +2,21 @@ use super::store_helpers::*; use futures::future::Future; use interledger_service_util::ExchangeRateStore; -#[test] -fn set_rates() { - block_on(test_store().and_then(|(store, context, _accs)| { - let store_clone = store.clone(); - let rates = store.get_exchange_rates(&["ABC", "XYZ"]); - assert!(rates.is_err()); - store - .set_exchange_rates( - [("ABC".to_string(), 500.0), ("XYZ".to_string(), 0.005)] - .iter() - .cloned() - .collect(), - ) - .and_then(move |_| { - let rates = store_clone.get_exchange_rates(&["XYZ", "ABC"]).unwrap(); - assert_eq!(rates[0].to_string(), "0.005"); - assert_eq!(rates[1].to_string(), "500"); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn set_rates() { + let (store, _context, _) = test_store().await.unwrap(); + let rates = store.get_exchange_rates(&["ABC", "XYZ"]); + assert!(rates.is_err()); + store + .set_exchange_rates( + [("ABC".to_string(), 500.0), ("XYZ".to_string(), 0.005)] + .iter() + .cloned() + .collect(), + ) + .unwrap(); + + let rates = store.get_exchange_rates(&["XYZ", "ABC"]).unwrap(); + assert_eq!(rates[0].to_string(), "0.005"); + assert_eq!(rates[1].to_string(), "500"); } diff --git a/crates/interledger-store/tests/redis/redis_tests.rs b/crates/interledger-store/tests/redis/redis_tests.rs index c0910f487..f450763de 100644 --- a/crates/interledger-store/tests/redis/redis_tests.rs +++ b/crates/interledger-store/tests/redis/redis_tests.rs @@ -8,6 +8,7 @@ mod routing_test; mod settlement_test; mod fixtures { + use interledger_api::AccountDetails; use interledger_packet::Address; use interledger_service::Username; @@ -97,6 +98,8 @@ mod redis_helpers { use std::thread::sleep; use std::time::Duration; + use futures::future::TryFutureExt; + #[derive(PartialEq)] enum ServerType { Tcp, @@ -247,22 +250,21 @@ mod redis_helpers { self.client.get_connection().unwrap() } - pub fn async_connection( - &self, - ) -> impl Future { + pub async fn async_connection(&self) -> Result { self.client .get_async_connection() .map_err(|err| panic!(err)) + .await } pub fn stop_server(&mut self) { self.server.stop(); } - pub fn shared_async_connection( + pub async fn shared_async_connection( &self, - ) -> impl Future { - self.client.get_shared_async_connection() + ) -> Result { + self.client.get_multiplexed_tokio_connection().await } } } @@ -271,7 +273,9 @@ mod store_helpers { use super::fixtures::*; use super::redis_helpers::*; use env_logger; + use futures::compat::Future01CompatExt; use futures::Future; + use futures::TryFutureExt; use interledger_api::NodeStore; use interledger_packet::Address; use interledger_service::{Account as AccountTrait, AddressStore}; @@ -288,49 +292,32 @@ mod store_helpers { static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); } - pub fn test_store() -> impl Future), Error = ()> { + pub async fn test_store() -> Result<(RedisStore, TestContext, Vec), ()> { let context = TestContext::new(); - RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) + let store = RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) .node_ilp_address(Address::from_str("example.node").unwrap()) .connect() - .and_then(|store| { - let store_clone = store.clone(); - let mut accs = Vec::new(); - store - .clone() - .insert_account(ACCOUNT_DETAILS_0.clone()) - .and_then(move |acc| { - accs.push(acc.clone()); - // alice is a Parent, so the store's ilp address is updated to - // the value that would be received by the ILDCP request. here, - // we just assume alice appended some data to her address - store - .clone() - .set_ilp_address(acc.ilp_address().with_suffix(b"user1").unwrap()) - .and_then(move |_| { - store_clone - .insert_account(ACCOUNT_DETAILS_1.clone()) - .and_then(move |acc| { - accs.push(acc.clone()); - Ok((store, context, accs)) - }) - }) - }) - }) - } + .await + .unwrap(); + let mut accs = Vec::new(); + let acc = store + .insert_account(ACCOUNT_DETAILS_0.clone()) + .await + .unwrap(); + accs.push(acc.clone()); + // alice is a Parent, so the store's ilp address is updated to + // the value that would be received by the ILDCP request. here, + // we just assume alice appended some data to her address + store + .set_ilp_address(acc.ilp_address().with_suffix(b"user1").unwrap()) + .await + .unwrap(); - pub fn block_on(f: F) -> Result - where - F: Future + Send + 'static, - F::Item: Send, - F::Error: Send, - { - // Only run one test at a time - let _ = env_logger::try_init(); - let lock = TEST_MUTEX.lock(); - let mut runtime = Runtime::new().unwrap(); - let result = runtime.block_on(f); - drop(lock); - result + let acc = store + .insert_account(ACCOUNT_DETAILS_1.clone()) + .await + .unwrap(); + accs.push(acc.clone()); + Ok((store, context, accs)) } } diff --git a/crates/interledger-store/tests/redis/routing_test.rs b/crates/interledger-store/tests/redis/routing_test.rs index 46cadc1eb..af0c6b2b0 100644 --- a/crates/interledger-store/tests/redis/routing_test.rs +++ b/crates/interledger-store/tests/redis/routing_test.rs @@ -8,378 +8,266 @@ use interledger_service::{Account as AccountTrait, AddressStore, Username}; use interledger_store::{account::Account, redis::RedisStoreBuilder}; use std::str::FromStr; use std::{collections::HashMap, time::Duration}; -use tokio_timer::sleep; use uuid::Uuid; -#[test] -fn polls_for_route_updates() { +#[tokio::test] +async fn polls_for_route_updates() { let context = TestContext::new(); - block_on( - RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) - .poll_interval(1) - .node_ilp_address(Address::from_str("example.node").unwrap()) - .connect() - .and_then(|store| { - let connection = context.async_connection(); - assert_eq!(store.routing_table().len(), 0); - let store_clone_1 = store.clone(); - let store_clone_2 = store.clone(); - store - .clone() - .insert_account(ACCOUNT_DETAILS_0.clone()) - .and_then(move |alice| { - let routing_table = store_clone_1.routing_table(); - assert_eq!(routing_table.len(), 1); - assert_eq!(*routing_table.get("example.alice").unwrap(), alice.id()); - store_clone_1 - .insert_account(AccountDetails { - ilp_address: Some(Address::from_str("example.bob").unwrap()), - username: Username::from_str("bob").unwrap(), - asset_scale: 6, - asset_code: "XYZ".to_string(), - max_packet_amount: 1000, - min_balance: Some(-1000), - ilp_over_http_url: None, - ilp_over_http_incoming_token: None, - ilp_over_http_outgoing_token: None, - ilp_over_btp_url: None, - ilp_over_btp_outgoing_token: None, - ilp_over_btp_incoming_token: None, - settle_threshold: None, - settle_to: None, - routing_relation: Some("Peer".to_owned()), - round_trip_time: None, - amount_per_minute_limit: None, - packets_per_minute_limit: None, - settlement_engine_url: None, - }) - .and_then(move |bob| { - let routing_table = store_clone_2.routing_table(); - assert_eq!(routing_table.len(), 2); - assert_eq!(*routing_table.get("example.bob").unwrap(), bob.id(),); - let alice_id = alice.id(); - let bob_id = bob.id(); - connection - .map_err(|err| panic!(err)) - .and_then(move |connection| { - redis_crate::cmd("HMSET") - .arg("routes:current") - .arg("example.alice") - .arg(bob_id.to_string()) - .arg("example.charlie") - .arg(alice_id.to_string()) - .query_async(connection) - .and_then( - |(_connection, _result): ( - _, - redis_crate::Value, - )| { - Ok(()) - }, - ) - .map_err(|err| panic!(err)) - .and_then(|_| { - sleep(Duration::from_millis(10)).then(|_| Ok(())) - }) - }) - .and_then(move |_| { - let routing_table = store_clone_2.routing_table(); - assert_eq!(routing_table.len(), 3); - assert_eq!( - *routing_table.get("example.alice").unwrap(), - bob_id - ); - assert_eq!( - *routing_table.get("example.bob").unwrap(), - bob.id(), - ); - assert_eq!( - *routing_table.get("example.charlie").unwrap(), - alice_id, - ); - assert!(routing_table.get("example.other").is_none()); - let _ = context; - Ok(()) - }) - }) - }) - }), - ) - .unwrap(); + let store = RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) + .poll_interval(1) + .node_ilp_address(Address::from_str("example.node").unwrap()) + .connect() + .await + .unwrap(); + + let connection = context.async_connection(); + assert_eq!(store.routing_table().len(), 0); + let store_clone_1 = store.clone(); + let store_clone_2 = store.clone(); + let alice = store + .insert_account(ACCOUNT_DETAILS_0.clone()) + .await + .unwrap(); + let routing_table = store_clone_1.routing_table(); + assert_eq!(routing_table.len(), 1); + assert_eq!(*routing_table.get("example.alice").unwrap(), alice.id()); + let bob = store_clone_1 + .insert_account(AccountDetails { + ilp_address: Some(Address::from_str("example.bob").unwrap()), + username: Username::from_str("bob").unwrap(), + asset_scale: 6, + asset_code: "XYZ".to_string(), + max_packet_amount: 1000, + min_balance: Some(-1000), + ilp_over_http_url: None, + ilp_over_http_incoming_token: None, + ilp_over_http_outgoing_token: None, + ilp_over_btp_url: None, + ilp_over_btp_outgoing_token: None, + ilp_over_btp_incoming_token: None, + settle_threshold: None, + settle_to: None, + routing_relation: Some("Peer".to_owned()), + round_trip_time: None, + amount_per_minute_limit: None, + packets_per_minute_limit: None, + settlement_engine_url: None, + }) + .await + .unwrap(); + + let routing_table = store_clone_2.routing_table(); + assert_eq!(routing_table.len(), 2); + assert_eq!(*routing_table.get("example.bob").unwrap(), bob.id(),); + let alice_id = alice.id(); + let bob_id = bob.id(); + let mut connection = connection.await.unwrap(); + let _: redis_crate::Value = redis_crate::cmd("HMSET") + .arg("routes:current") + .arg("example.alice") + .arg(bob_id.to_string()) + .arg("example.charlie") + .arg(alice_id.to_string()) + .query_async(&mut connection) + .await + .unwrap(); + + tokio::time::delay_for(Duration::from_millis(10)).await; + let routing_table = store_clone_2.routing_table(); + assert_eq!(routing_table.len(), 3); + assert_eq!(*routing_table.get("example.alice").unwrap(), bob_id); + assert_eq!(*routing_table.get("example.bob").unwrap(), bob.id(),); + assert_eq!(*routing_table.get("example.charlie").unwrap(), alice_id,); + assert!(routing_table.get("example.other").is_none()); } -#[test] -fn gets_accounts_to_send_routes_to() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_accounts_to_send_routes_to(Vec::new()) - .and_then(move |accounts| { - // We send to child accounts but not parents - assert_eq!(accounts[0].username().as_ref(), "bob"); - assert_eq!(accounts.len(), 1); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_accounts_to_send_routes_to() { + let (store, _context, _) = test_store().await.unwrap(); + let accounts = store + .get_accounts_to_send_routes_to(Vec::new()) + .await + .unwrap(); + // We send to child accounts but not parents + assert_eq!(accounts[0].username().as_ref(), "bob"); + assert_eq!(accounts.len(), 1); } -#[test] -fn gets_accounts_to_send_routes_to_and_skips_ignored() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .get_accounts_to_send_routes_to(vec![accs[1].id()]) - .and_then(move |accounts| { - assert!(accounts.is_empty()); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_accounts_to_send_routes_to_and_skips_ignored() { + let (store, _context, accs) = test_store().await.unwrap(); + let accounts = store + .get_accounts_to_send_routes_to(vec![accs[1].id()]) + .await + .unwrap(); + assert!(accounts.is_empty()); } -#[test] -fn gets_accounts_to_receive_routes_from() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_accounts_to_receive_routes_from() - .and_then(move |accounts| { - assert_eq!( - *accounts[0].ilp_address(), - Address::from_str("example.alice").unwrap() - ); - assert_eq!(accounts.len(), 1); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_accounts_to_receive_routes_from() { + let (store, _context, _) = test_store().await.unwrap(); + let accounts = store.get_accounts_to_receive_routes_from().await.unwrap(); + assert_eq!( + *accounts[0].ilp_address(), + Address::from_str("example.alice").unwrap() + ); } -#[test] -fn gets_local_and_configured_routes() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_local_and_configured_routes() - .and_then(move |(local, configured)| { - assert_eq!(local.len(), 2); - assert!(configured.is_empty()); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_local_and_configured_routes() { + let (store, _context, _) = test_store().await.unwrap(); + let (local, configured) = store.get_local_and_configured_routes().await.unwrap(); + assert_eq!(local.len(), 2); + assert!(configured.is_empty()); } -#[test] -fn saves_routes_to_db() { - block_on(test_store().and_then(|(mut store, context, _accs)| { - let get_connection = context.async_connection(); - let account0_id = Uuid::new_v4(); - let account1_id = Uuid::new_v4(); - let account0 = Account::try_from( - account0_id, - ACCOUNT_DETAILS_0.clone(), - store.get_ilp_address(), - ) +#[tokio::test] +async fn saves_routes_to_db() { + let (store, context, _) = test_store().await.unwrap(); + let get_connection = context.async_connection(); + let account0_id = Uuid::new_v4(); + let account1_id = Uuid::new_v4(); + let account0 = Account::try_from( + account0_id, + ACCOUNT_DETAILS_0.clone(), + store.get_ilp_address(), + ) + .unwrap(); + + let account1 = Account::try_from( + account1_id, + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + + store + .clone() + .set_routes(vec![ + ("example.a".to_string(), account0.clone()), + ("example.b".to_string(), account0.clone()), + ("example.c".to_string(), account1.clone()), + ]) + .await .unwrap(); - let account1 = Account::try_from( - account1_id, - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) + let mut connection = get_connection.await.unwrap(); + let routes: HashMap = redis_crate::cmd("HGETALL") + .arg("routes:current") + .query_async(&mut connection) + .await .unwrap(); + assert_eq!(routes["example.a"], account0_id.to_string()); + assert_eq!(routes["example.b"], account0_id.to_string()); + assert_eq!(routes["example.c"], account1_id.to_string()); + assert_eq!(routes.len(), 3); - store - .set_routes(vec![ - ("example.a".to_string(), account0.clone()), - ("example.b".to_string(), account0.clone()), - ("example.c".to_string(), account1.clone()), - ]) - .and_then(move |_| { - get_connection.and_then(move |connection| { - redis_crate::cmd("HGETALL") - .arg("routes:current") - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_conn, routes): (_, HashMap)| { - assert_eq!(routes["example.a"], account0_id.to_string()); - assert_eq!(routes["example.b"], account0_id.to_string()); - assert_eq!(routes["example.c"], account1_id.to_string()); - assert_eq!(routes.len(), 3); - Ok(()) - }) - }) - }) - .and_then(move |_| { - let _ = context; - Ok(()) - }) - })) - .unwrap() + // local routing table routes are also updated + let routes = store.routing_table(); + assert_eq!(routes["example.a"], account0_id); + assert_eq!(routes["example.b"], account0_id); + assert_eq!(routes["example.c"], account1_id); + assert_eq!(routes.len(), 3); } -#[test] -fn updates_local_routes() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account0_id = Uuid::new_v4(); - let account1_id = Uuid::new_v4(); - let account0 = Account::try_from( - account0_id, - ACCOUNT_DETAILS_0.clone(), - store.get_ilp_address(), - ) +#[tokio::test] +async fn adds_static_routes_to_redis() { + let (store, context, accs) = test_store().await.unwrap(); + let get_connection = context.async_connection(); + store + .set_static_routes(vec![ + ("example.a".to_string(), accs[0].id()), + ("example.b".to_string(), accs[0].id()), + ("example.c".to_string(), accs[1].id()), + ]) + .await .unwrap(); - let account1 = Account::try_from( - account1_id, - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) + let mut connection = get_connection.await.unwrap(); + let routes: HashMap = redis_crate::cmd("HGETALL") + .arg("routes:static") + .query_async(&mut connection) + .await .unwrap(); - store - .clone() - .set_routes(vec![ - ("example.a".to_string(), account0.clone()), - ("example.b".to_string(), account0.clone()), - ("example.c".to_string(), account1.clone()), - ]) - .and_then(move |_| { - let routes = store.routing_table(); - assert_eq!(routes["example.a"], account0_id); - assert_eq!(routes["example.b"], account0_id); - assert_eq!(routes["example.c"], account1_id); - assert_eq!(routes.len(), 3); - Ok(()) - }) - .and_then(move |_| { - let _ = context; - Ok(()) - }) - })) - .unwrap() + assert_eq!(routes["example.a"], accs[0].id().to_string()); + assert_eq!(routes["example.b"], accs[0].id().to_string()); + assert_eq!(routes["example.c"], accs[1].id().to_string()); + assert_eq!(routes.len(), 3); } -#[test] -fn adds_static_routes_to_redis() { - block_on(test_store().and_then(|(store, context, accs)| { - let get_connection = context.async_connection(); - store - .clone() - .set_static_routes(vec![ - ("example.a".to_string(), accs[0].id()), - ("example.b".to_string(), accs[0].id()), - ("example.c".to_string(), accs[1].id()), - ]) - .and_then(move |_| { - get_connection.and_then(|connection| { - redis_crate::cmd("HGETALL") - .arg("routes:static") - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_, routes): (_, HashMap)| { - assert_eq!(routes["example.a"], accs[0].id().to_string()); - assert_eq!(routes["example.b"], accs[0].id().to_string()); - assert_eq!(routes["example.c"], accs[1].id().to_string()); - assert_eq!(routes.len(), 3); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() -} +#[tokio::test] +async fn static_routes_override_others() { + let (store, _context, accs) = test_store().await.unwrap(); + store + .set_static_routes(vec![ + ("example.a".to_string(), accs[0].id()), + ("example.b".to_string(), accs[0].id()), + ]) + .await + .unwrap(); + + let account1_id = Uuid::new_v4(); + let account1 = Account::try_from( + account1_id, + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + store + .clone() + .set_routes(vec![ + ("example.a".to_string(), account1.clone()), + ("example.b".to_string(), account1.clone()), + ("example.c".to_string(), account1), + ]) + .await + .unwrap(); -#[test] -fn static_routes_override_others() { - block_on(test_store().and_then(|(store, context, accs)| { - let mut store_clone = store.clone(); - store - .clone() - .set_static_routes(vec![ - ("example.a".to_string(), accs[0].id()), - ("example.b".to_string(), accs[0].id()), - ]) - .and_then(move |_| { - let account1_id = Uuid::new_v4(); - let account1 = Account::try_from( - account1_id, - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) - .unwrap(); - store_clone - .set_routes(vec![ - ("example.a".to_string(), account1.clone()), - ("example.b".to_string(), account1.clone()), - ("example.c".to_string(), account1), - ]) - .and_then(move |_| { - let routes = store.routing_table(); - assert_eq!(routes["example.a"], accs[0].id()); - assert_eq!(routes["example.b"], accs[0].id()); - assert_eq!(routes["example.c"], account1_id); - assert_eq!(routes.len(), 3); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap() + let routes = store.routing_table(); + assert_eq!(routes["example.a"], accs[0].id()); + assert_eq!(routes["example.b"], accs[0].id()); + assert_eq!(routes["example.c"], account1_id); + assert_eq!(routes.len(), 3); } -#[test] -fn default_route() { - block_on(test_store().and_then(|(store, context, accs)| { - let mut store_clone = store.clone(); - store - .clone() - .set_default_route(accs[0].id()) - .and_then(move |_| { - let account1_id = Uuid::new_v4(); - let account1 = Account::try_from( - account1_id, - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) - .unwrap(); - store_clone - .set_routes(vec![ - ("example.a".to_string(), account1.clone()), - ("example.b".to_string(), account1.clone()), - ]) - .and_then(move |_| { - let routes = store.routing_table(); - assert_eq!(routes[""], accs[0].id()); - assert_eq!(routes["example.a"], account1_id); - assert_eq!(routes["example.b"], account1_id); - assert_eq!(routes.len(), 3); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn default_route() { + let (store, _context, accs) = test_store().await.unwrap(); + store.set_default_route(accs[0].id()).await.unwrap(); + let account1_id = Uuid::new_v4(); + let account1 = Account::try_from( + account1_id, + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + store + .clone() + .set_routes(vec![ + ("example.a".to_string(), account1.clone()), + ("example.b".to_string(), account1.clone()), + ]) + .await + .unwrap(); + + let routes = store.routing_table(); + assert_eq!(routes[""], accs[0].id()); + assert_eq!(routes["example.a"], account1_id); + assert_eq!(routes["example.b"], account1_id); + assert_eq!(routes.len(), 3); } -#[test] -fn returns_configured_routes_for_route_manager() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .clone() - .set_static_routes(vec![ - ("example.a".to_string(), accs[0].id()), - ("example.b".to_string(), accs[1].id()), - ]) - .and_then(move |_| store.get_local_and_configured_routes()) - .and_then(move |(_local, configured)| { - assert_eq!(configured.len(), 2); - assert_eq!(configured["example.a"].id(), accs[0].id()); - assert_eq!(configured["example.b"].id(), accs[1].id()); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn returns_configured_routes_for_route_manager() { + let (store, context, accs) = test_store().await.unwrap(); + store + .set_static_routes(vec![ + ("example.a".to_string(), accs[0].id()), + ("example.b".to_string(), accs[1].id()), + ]) + .await + .unwrap(); + let (_, configured) = store.get_local_and_configured_routes().await.unwrap(); + assert_eq!(configured.len(), 2); + assert_eq!(configured["example.a"].id(), accs[0].id()); + assert_eq!(configured["example.b"].id(), accs[1].id()); } diff --git a/crates/interledger-store/tests/redis/settlement_test.rs b/crates/interledger-store/tests/redis/settlement_test.rs index 0d1112215..0977fd725 100644 --- a/crates/interledger-store/tests/redis/settlement_test.rs +++ b/crates/interledger-store/tests/redis/settlement_test.rs @@ -10,7 +10,7 @@ use interledger_settlement::core::{ }; use lazy_static::lazy_static; use num_bigint::BigUint; -use redis_crate::{aio::SharedConnection, cmd}; +use redis_crate::{aio::MultiplexedConnection, cmd}; use url::Url; use uuid::Uuid; @@ -18,387 +18,250 @@ lazy_static! { static ref IDEMPOTENCY_KEY: String = String::from("AJKJNUjM0oyiAN46"); } -#[test] -fn saves_and_gets_uncredited_settlement_amount_properly() { - block_on(test_store().and_then(|(store, context, _accs)| { - let amounts = vec![ - (BigUint::from(5u32), 11), // 5 - (BigUint::from(855u32), 12), // 905 - (BigUint::from(1u32), 10), // 1005 total - ]; - let acc = Uuid::new_v4(); - let mut f = Vec::new(); - for a in amounts { - let s = store.clone(); - f.push(s.save_uncredited_settlement_amount(acc, a)); - } - join_all(f) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .load_uncredited_settlement_amount(acc, 9) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |ret| { - // 1 uncredited unit for scale 9 - assert_eq!(ret, BigUint::from(1u32)); - // rest should be in the leftovers store - store - .get_uncredited_settlement_amount(acc) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |ret| { - // 1 uncredited unit for scale 9 - assert_eq!(ret, (BigUint::from(5u32), 12)); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() -} +#[tokio::test] +async fn saves_gets_clears_uncredited_settlement_amount_properly() { + let (store, _context, accs) = test_store().await.unwrap(); + let amounts: Vec<(BigUint, u8)> = vec![ + (BigUint::from(5u32), 11), // 5 + (BigUint::from(855u32), 12), // 905 + (BigUint::from(1u32), 10), // 1005 total + ]; + let acc = Uuid::new_v4(); + for a in amounts { + let s = store.clone(); + s.save_uncredited_settlement_amount(acc, a).await.unwrap(); + } + let ret = store + .load_uncredited_settlement_amount(acc, 9u8) + .await + .unwrap(); + // 1 uncredited unit for scale 9 + assert_eq!(ret, BigUint::from(1u32)); + // rest should be in the leftovers store + let ret = store.get_uncredited_settlement_amount(acc).await.unwrap(); + // 1 uncredited unit for scale 9 + assert_eq!(ret, (BigUint::from(5u32), 12)); -#[test] -fn clears_uncredited_settlement_amount_properly() { - block_on(test_store().and_then(|(store, context, _accs)| { - let amounts = vec![ - (BigUint::from(5u32), 11), // 5 - (BigUint::from(855u32), 12), // 905 - (BigUint::from(1u32), 10), // 1005 total - ]; - let acc = Uuid::new_v4(); - let mut f = Vec::new(); - for a in amounts { - let s = store.clone(); - f.push(s.save_uncredited_settlement_amount(acc, a)); - } - join_all(f) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .clear_uncredited_settlement_amount(acc) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .get_uncredited_settlement_amount(acc) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |amount| { - assert_eq!(amount, (BigUint::from(0u32), 0)); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() + // clears uncredited amount + store.clear_uncredited_settlement_amount(acc).await.unwrap(); + let ret = store.get_uncredited_settlement_amount(acc).await.unwrap(); + assert_eq!(ret, (BigUint::from(0u32), 0)); } -#[test] -fn credits_prepaid_amount() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context.async_connection().and_then(move |conn| { - store - .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |(_conn, (balance, prepaid_amount)): (_, (i64, i64))| { - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 100); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn credits_prepaid_amount() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.async_connection().await.unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 100); } -#[test] -fn saves_and_loads_idempotency_key_data_properly() { - block_on(test_store().and_then(|(store, context, _accs)| { - let input_hash: [u8; 32] = Default::default(); - store - .save_idempotent_data( - IDEMPOTENCY_KEY.clone(), - input_hash, - StatusCode::OK, - Bytes::from("TEST"), - ) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .load_idempotent_data(IDEMPOTENCY_KEY.clone()) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |data1| { - assert_eq!( - data1.unwrap(), - IdempotentData::new(StatusCode::OK, Bytes::from("TEST"), input_hash) - ); - let _ = context; +#[tokio::test] +async fn saves_and_loads_idempotency_key_data_properly() { + let (store, _context, _) = test_store().await.unwrap(); + let input_hash: [u8; 32] = Default::default(); + store + .save_idempotent_data( + IDEMPOTENCY_KEY.clone(), + input_hash, + StatusCode::OK, + Bytes::from("TEST"), + ) + .await + .unwrap(); + let data1 = store + .load_idempotent_data(IDEMPOTENCY_KEY.clone()) + .await + .unwrap(); + assert_eq!( + data1.unwrap(), + IdempotentData::new(StatusCode::OK, Bytes::from("TEST"), input_hash) + ); - store - .load_idempotent_data("asdf".to_string()) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |data2| { - assert!(data2.is_none()); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let data2 = store + .load_idempotent_data("asdf".to_string()) + .await + .unwrap(); + assert!(data2.is_none()); } -#[test] -fn idempotent_settlement_calls() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context.async_connection().and_then(move |conn| { - store - .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |(conn, (balance, prepaid_amount)): (_, (i64, i64))| { - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 100); +#[tokio::test] +async fn idempotent_settlement_calls() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.async_connection().await.unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 100); - store - .update_balance_for_incoming_settlement( - id, - 100, - Some(IDEMPOTENCY_KEY.clone()), // Reuse key to make idempotent request. - ) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then( - move |(_conn, (balance, prepaid_amount)): ( - _, - (i64, i64), - )| { - // Since it's idempotent there - // will be no state update. - // Otherwise it'd be 200 (100 + 100) - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 100); - let _ = context; - Ok(()) - }, - ) - }) - }) - }) - }) - })) - .unwrap() + store + .update_balance_for_incoming_settlement( + id, + 100, + Some(IDEMPOTENCY_KEY.clone()), // Reuse key to make idempotent request. + ) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + // Since it's idempotent there + // will be no state update. + // Otherwise it'd be 200 (100 + 100) + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 100); } -#[test] -fn credits_balance_owed() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context - .shared_async_connection() - .map_err(|err| panic!(err)) - .and_then(move |conn| { - cmd("HSET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg(-200) - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then(move |(conn, _balance): (SharedConnection, i64)| { - store - .update_balance_for_incoming_settlement( - id, - 100, - Some(IDEMPOTENCY_KEY.clone()), - ) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then( - move |(_conn, (balance, prepaid_amount)): ( - _, - (i64, i64), - )| { - assert_eq!(balance, -100); - assert_eq!(prepaid_amount, 0); - let _ = context; - Ok(()) - }, - ) - }) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn credits_balance_owed() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.shared_async_connection().await.unwrap(); + let _balance: i64 = cmd("HSET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg(-200i64) + .query_async(&mut conn) + .await + .unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, -100); + assert_eq!(prepaid_amount, 0); } -#[test] -fn clears_balance_owed() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context - .shared_async_connection() - .map_err(|err| panic!(err)) - .and_then(move |conn| { - cmd("HSET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg(-100) - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then(move |(conn, _balance): (SharedConnection, i64)| { - store - .update_balance_for_incoming_settlement( - id, - 100, - Some(IDEMPOTENCY_KEY.clone()), - ) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then( - move |(_conn, (balance, prepaid_amount)): ( - _, - (i64, i64), - )| { - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 0); - let _ = context; - Ok(()) - }, - ) - }) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn clears_balance_owed() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.shared_async_connection().await.unwrap(); + let _balance: i64 = cmd("HSET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg(-100i64) + .query_async(&mut conn) + .await + .unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 0); } -#[test] -fn clears_balance_owed_and_puts_remainder_as_prepaid() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context - .shared_async_connection() - .map_err(|err| panic!(err)) - .and_then(move |conn| { - cmd("HSET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg(-40) - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then(move |(conn, _balance): (SharedConnection, i64)| { - store - .update_balance_for_incoming_settlement( - id, - 100, - Some(IDEMPOTENCY_KEY.clone()), - ) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then( - move |(_conn, (balance, prepaid_amount)): ( - _, - (i64, i64), - )| { - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 60); - let _ = context; - Ok(()) - }, - ) - }) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn clears_balance_owed_and_puts_remainder_as_prepaid() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.shared_async_connection().await.unwrap(); + let _balance: i64 = cmd("HSET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg(-40) + .query_async(&mut conn) + .await + .unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 60); } -#[test] -fn loads_globally_configured_settlement_engine_url() { - block_on(test_store().and_then(|(store, context, accs)| { - assert!(accs[0].settlement_engine_details().is_some()); - assert!(accs[1].settlement_engine_details().is_none()); - let account_ids = vec![accs[0].id(), accs[1].id()]; - store - .clone() - .get_accounts(account_ids.clone()) - .and_then(move |accounts| { - assert!(accounts[0].settlement_engine_details().is_some()); - assert!(accounts[1].settlement_engine_details().is_none()); +#[tokio::test] +async fn loads_globally_configured_settlement_engine_url() { + let (store, _context, accs) = test_store().await.unwrap(); + assert!(accs[0].settlement_engine_details().is_some()); + assert!(accs[1].settlement_engine_details().is_none()); + let account_ids = vec![accs[0].id(), accs[1].id()]; + let accounts = store.get_accounts(account_ids.clone()).await.unwrap(); + assert!(accounts[0].settlement_engine_details().is_some()); + assert!(accounts[1].settlement_engine_details().is_none()); - store - .clone() - .set_settlement_engines(vec![ - ( - "ABC".to_string(), - Url::parse("http://settle-abc.example").unwrap(), - ), - ( - "XYZ".to_string(), - Url::parse("http://settle-xyz.example").unwrap(), - ), - ]) - .and_then(move |_| { - store.get_accounts(account_ids).and_then(move |accounts| { - // It should not overwrite the one that was individually configured - assert_eq!( - accounts[0] - .settlement_engine_details() - .unwrap() - .url - .as_str(), - "http://settlement.example/" - ); + store + .clone() + .set_settlement_engines(vec![ + ( + "ABC".to_string(), + Url::parse("http://settle-abc.example").unwrap(), + ), + ( + "XYZ".to_string(), + Url::parse("http://settle-xyz.example").unwrap(), + ), + ]) + .await + .unwrap(); + let accounts = store.get_accounts(account_ids).await.unwrap(); + // It should not overwrite the one that was individually configured + assert_eq!( + accounts[0] + .settlement_engine_details() + .unwrap() + .url + .as_str(), + "http://settlement.example/" + ); - // It should set the URL for the account that did not have one configured - assert!(accounts[1].settlement_engine_details().is_some()); - assert_eq!( - accounts[1] - .settlement_engine_details() - .unwrap() - .url - .as_str(), - "http://settle-abc.example/" - ); - let _ = context; - Ok(()) - }) - }) - // store.set_settlement_engines - }) - })) - .unwrap() + // It should set the URL for the account that did not have one configured + assert!(accounts[1].settlement_engine_details().is_some()); + assert_eq!( + accounts[1] + .settlement_engine_details() + .unwrap() + .url + .as_str(), + "http://settle-abc.example/" + ); } diff --git a/crates/interledger-stream/Cargo.toml b/crates/interledger-stream/Cargo.toml index 62c8fb064..d54cc2f69 100644 --- a/crates/interledger-stream/Cargo.toml +++ b/crates/interledger-stream/Cargo.toml @@ -18,7 +18,7 @@ byteorder = { version = "1.3.2", default-features = false } chrono = { version = "0.4.9", default-features = false, features = ["clock"] } csv = { version = "1.1.1", default-features = false, optional = true } failure = { version = "0.1.5", default-features = false, features = ["derive"] } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3.1", default-features = false } hex = { version = "0.4.0", default-features = false } interledger-ildcp = { path = "../interledger-ildcp", version = "^0.4.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", features = ["serde"], default-features = false } @@ -27,8 +27,9 @@ log = { version = "0.4.8", default-features = false } parking_lot = { version = "0.9.0", default-features = false } ring = { version = "0.16.9", default-features = false } serde = { version = "1.0.101", default-features = false } -tokio = { version = "0.1.22", default-features = false, features = ["rt-full"] } +tokio = { version = "^0.2.6", default-features = false, features = ["rt-core", "macros"] } uuid = { version = "0.8.1", default-features = false, features = ["v4"] } +async-trait = "0.1.22" [dev-dependencies] interledger-router = { path = "../interledger-router", version = "^0.4.0", default-features = false } diff --git a/crates/interledger-stream/src/client.rs b/crates/interledger-stream/src/client.rs index 4a5502316..aee13d286 100644 --- a/crates/interledger-stream/src/client.rs +++ b/crates/interledger-stream/src/client.rs @@ -4,7 +4,7 @@ use super::error::Error; use super::packet::*; use bytes::Bytes; use bytes::BytesMut; -use futures::{Async, Future, Poll}; +use futures::TryFutureExt; use interledger_ildcp::get_ildcp_info; use interledger_packet::{ Address, ErrorClass, ErrorCode as IlpErrorCode, Fulfill, PacketType as IlpPacketType, @@ -14,7 +14,6 @@ use interledger_service::*; use log::{debug, error, warn}; use serde::{Deserialize, Serialize}; use std::{ - cell::Cell, cmp::min, str, time::{Duration, Instant, SystemTime}, @@ -48,13 +47,13 @@ impl StreamDelivery { /// Send a given amount of money using the STREAM transport protocol. /// /// This returns the amount delivered, as reported by the receiver and in the receiver's asset's units. -pub fn send_money( +pub async fn send_money( service: S, from_account: &A, destination_account: Address, shared_secret: &[u8], source_amount: u64, -) -> impl Future +) -> Result<(StreamDelivery, S), Error> where S: IncomingService + Clone, A: Account, @@ -62,58 +61,99 @@ where let shared_secret = Bytes::from(shared_secret); let from_account = from_account.clone(); // TODO can/should we avoid cloning the account? - get_ildcp_info(&mut service.clone(), from_account.clone()) + let account_details = get_ildcp_info(&mut service.clone(), from_account.clone()) .map_err(|_err| Error::ConnectionError("Unable to get ILDCP info: {:?}".to_string())) - .and_then(move |account_details| { - let source_account = account_details.ilp_address(); - if source_account.scheme() != destination_account.scheme() { - warn!("Destination ILP address starts with a different scheme prefix (\"{}\') than ours (\"{}\'), this probably isn't going to work", - destination_account.scheme(), - source_account.scheme()); - } + .await?; + + let source_account = account_details.ilp_address(); + if source_account.scheme() != destination_account.scheme() { + warn!("Destination ILP address starts with a different scheme prefix (\"{}\') than ours (\"{}\'), this probably isn't going to work", + destination_account.scheme(), + source_account.scheme()); + } + + let mut sender = SendMoneyFuture { + state: SendMoneyFutureState::SendMoney, + next: service.clone(), + from_account: from_account.clone(), + source_account, + destination_account: destination_account.clone(), + shared_secret, + source_amount, + // Try sending the full amount first + // TODO make this configurable -- in different scenarios you might prioritize + // sending as much as possible per packet vs getting money flowing ASAP differently + congestion_controller: CongestionController::new(source_amount, source_amount / 10, 2.0), + receipt: StreamDelivery { + from: from_account.ilp_address().clone(), + to: destination_account, + sent_amount: source_amount, + sent_asset_scale: from_account.asset_scale(), + sent_asset_code: from_account.asset_code().to_string(), + delivered_asset_scale: None, + delivered_asset_code: None, + delivered_amount: 0, + }, + should_send_source_account: true, + sequence: 1, + rejected_packets: 0, + error: None, + last_fulfill_time: Instant::now(), + }; + + loop { + if let Some(error) = sender.error.take() { + error!("Send money stopped because of error: {:?}", error); + return Err(error); + } - SendMoneyFuture { - state: SendMoneyFutureState::SendMoney, - next: Some(service), - from_account: from_account.clone(), - source_account, - destination_account: destination_account.clone(), - shared_secret, - source_amount, - // Try sending the full amount first - // TODO make this configurable -- in different scenarios you might prioritize - // sending as much as possible per packet vs getting money flowing ASAP differently - congestion_controller: CongestionController::new(source_amount, source_amount / 10, 2.0), - pending_requests: Cell::new(Vec::new()), - receipt: StreamDelivery { - from: from_account.ilp_address().clone(), - to: destination_account, - sent_amount: source_amount, - sent_asset_scale: from_account.asset_scale(), - sent_asset_code: from_account.asset_code().to_string(), - delivered_asset_scale: None, - delivered_asset_code: None, - delivered_amount: 0, - }, - should_send_source_account: true, - sequence: 1, - rejected_packets: 0, - error: None, - last_fulfill_time: Instant::now(), + // Error if we haven't received a fulfill over a timeout period + if sender.last_fulfill_time.elapsed() >= MAX_TIME_SINCE_LAST_FULFILL { + return Err(Error::TimeoutError(format!( + "Time since last fulfill exceeded the maximum time limit of {:?} secs", + sender.last_fulfill_time.elapsed().as_secs() + ))); + } + + // a. If we've sent everything and there's no pending requests coose the connection + if sender.source_amount == 0 { + // Try closing the connection if it still thinks it's sending + if sender.state == SendMoneyFutureState::SendMoney { + sender.state = SendMoneyFutureState::Closing; + sender.try_send_connection_close().await?; + } else { + sender.state = SendMoneyFutureState::Closed; + debug!( + "Send money future finished. Delivered: {} ({} packets fulfilled, {} packets rejected)", sender.receipt.delivered_amount, sender.sequence - 1, sender.rejected_packets, + ); + + // Connection is finally closed, we can now return the receipt and the next service + return Ok((sender.receipt, service)); } - }) + // b. We still need to send more packets! + } else { + sender.try_send_money().await? + } + } +} + +#[derive(PartialEq)] +enum SendMoneyFutureState { + SendMoney, + Closing, + // RemoteClosed, + Closed, } struct SendMoneyFuture, A: Account> { state: SendMoneyFutureState, - next: Option, + next: S, from_account: A, source_account: Address, destination_account: Address, shared_secret: Bytes, source_amount: u64, congestion_controller: CongestionController, - pending_requests: Cell>, receipt: StreamDelivery, should_send_source_account: bool, sequence: u64, @@ -122,96 +162,82 @@ struct SendMoneyFuture, A: Account> { last_fulfill_time: Instant, } -struct PendingRequest { - sequence: u64, - amount: u64, - future: BoxedIlpFuture, -} - -#[derive(PartialEq)] -enum SendMoneyFutureState { - SendMoney, - Closing, - // RemoteClosed, - Closed, -} - impl SendMoneyFuture where S: IncomingService, A: Account, { - fn try_send_money(&mut self) -> Result { - // Fire off requests until the congestion controller tells us to stop or we've sent the total amount or maximum time since last fulfill has elapsed - let mut sent_packets = false; - loop { - let amount = min( - self.source_amount, - self.congestion_controller.get_max_amount(), - ); - if amount == 0 { - break; - } - self.source_amount -= amount; - - // Load up the STREAM packet - let sequence = self.next_sequence(); - let mut frames = vec![Frame::StreamMoney(StreamMoneyFrame { - stream_id: 1, - shares: 1, - })]; - if self.should_send_source_account { - frames.push(Frame::ConnectionNewAddress(ConnectionNewAddressFrame { - source_account: self.source_account.clone(), - })); - } - let stream_packet = StreamPacketBuilder { - ilp_packet_type: IlpPacketType::Prepare, - // TODO enforce min exchange rate - prepare_amount: 0, - sequence, - frames: &frames, - } - .build(); + #[inline] + // Fire off requests until the congestion controller tells us to stop or we've sent the total amount or maximum time since last fulfill has elapsed + async fn try_send_money(&mut self) -> Result<(), Error> { + let amount = min( + self.source_amount, + self.congestion_controller.get_max_amount(), + ); + if amount == 0 { + return Ok(()); + } + self.source_amount -= amount; - // Create the ILP Prepare packet - debug!( - "Sending packet {} with amount: {} and encrypted STREAM packet: {:?}", - sequence, amount, stream_packet - ); - let data = stream_packet.into_encrypted(&self.shared_secret); - let execution_condition = generate_condition(&self.shared_secret, &data); - let prepare = PrepareBuilder { - destination: self.destination_account.clone(), - amount, - execution_condition: &execution_condition, - expires_at: SystemTime::now() + Duration::from_secs(30), - // TODO don't copy the data - data: &data[..], - } - .build(); - - // Send it! - self.congestion_controller.prepare(amount); - if let Some(ref mut next) = self.next { - let send_request = next.handle_request(IncomingRequest { - from: self.from_account.clone(), - prepare, - }); - self.pending_requests.get_mut().push(PendingRequest { - sequence, - amount, - future: Box::new(send_request), - }); - sent_packets = true; - } else { - panic!("Polled after finish"); - } + // Load up the STREAM packet + let sequence = self.next_sequence(); + let mut frames = vec![Frame::StreamMoney(StreamMoneyFrame { + stream_id: 1, + shares: 1, + })]; + + if self.should_send_source_account { + frames.push(Frame::ConnectionNewAddress(ConnectionNewAddressFrame { + source_account: self.source_account.clone(), + })); + } + let stream_packet = StreamPacketBuilder { + ilp_packet_type: IlpPacketType::Prepare, + // TODO enforce min exchange rate + prepare_amount: 0, + sequence, + frames: &frames, } - Ok(sent_packets) + .build(); + + // Create the ILP Prepare packet + debug!( + "Sending packet {} with amount: {} and encrypted STREAM packet: {:?}", + sequence, amount, stream_packet + ); + let data = stream_packet.into_encrypted(&self.shared_secret); + let execution_condition = generate_condition(&self.shared_secret, &data); + let prepare = PrepareBuilder { + destination: self.destination_account.clone(), + amount, + execution_condition: &execution_condition, + expires_at: SystemTime::now() + Duration::from_secs(30), + // TODO don't copy the data + data: &data[..], + } + .build(); + + // Send it! + self.congestion_controller.prepare(amount); + let result = self + .next + .handle_request(IncomingRequest { + from: self.from_account.clone(), + prepare, + }) + .await; + + // Handle the response + match result { + Ok(fulfill) => self.handle_fulfill(sequence, amount, fulfill), + Err(reject) => self.handle_reject(sequence, amount, reject), + } + + Ok(()) } - fn try_send_connection_close(&mut self) -> Result<(), Error> { + #[inline] + async fn try_send_connection_close(&mut self) -> Result<(), Error> { let sequence = self.next_sequence(); let stream_packet = StreamPacketBuilder { ilp_packet_type: IlpPacketType::Prepare, @@ -236,48 +262,19 @@ where // Send it! debug!("Closing connection"); - if let Some(ref mut next) = self.next { - let send_request = next.handle_request(IncomingRequest { + let result = self + .next + .handle_request(IncomingRequest { from: self.from_account.clone(), prepare, - }); - self.pending_requests.get_mut().push(PendingRequest { - sequence, - amount: 0, - future: Box::new(send_request), - }); - } else { - panic!("Polled after finish"); - } - Ok(()) - } - - fn poll_pending_requests(&mut self) -> Poll<(), Error> { - let pending_requests = self.pending_requests.take(); - let pending_requests = pending_requests - .into_iter() - .filter_map(|mut pending_request| match pending_request.future.poll() { - Ok(Async::NotReady) => Some(pending_request), - Ok(Async::Ready(fulfill)) => { - self.handle_fulfill(pending_request.sequence, pending_request.amount, fulfill); - None - } - Err(reject) => { - self.handle_reject(pending_request.sequence, pending_request.amount, reject); - None - } }) - .collect(); - self.pending_requests.set(pending_requests); - - if let Some(error) = self.error.take() { - error!("Send money stopped because of error: {:?}", error); - Err(error) - } else if self.pending_requests.get_mut().is_empty() { - Ok(Async::Ready(())) - } else { - Ok(Async::NotReady) + .await; + match result { + Ok(fulfill) => self.handle_fulfill(sequence, 0, fulfill), + Err(reject) => self.handle_reject(sequence, 0, reject), } + + Ok(()) } fn handle_fulfill(&mut self, sequence: u64, amount: u64, fulfill: Fulfill) { @@ -379,46 +376,6 @@ where } } -impl Future for SendMoneyFuture -where - S: IncomingService, - A: Account, -{ - type Item = (StreamDelivery, S); - type Error = Error; - - fn poll(&mut self) -> Poll { - // TODO maybe don't have loops here and in try_send_money - loop { - self.poll_pending_requests()?; - if self.last_fulfill_time.elapsed() >= MAX_TIME_SINCE_LAST_FULFILL { - return Err(Error::TimeoutError(format!( - "Time since last fulfill exceeded the maximum time limit of {:?} secs", - self.last_fulfill_time.elapsed().as_secs() - ))); - } - - if self.source_amount == 0 && self.pending_requests.get_mut().is_empty() { - if self.state == SendMoneyFutureState::SendMoney { - self.state = SendMoneyFutureState::Closing; - self.try_send_connection_close()?; - } else { - self.state = SendMoneyFutureState::Closed; - debug!( - "Send money future finished. Delivered: {} ({} packets fulfilled, {} packets rejected)", self.receipt.delivered_amount, self.sequence - 1, self.rejected_packets, - ); - return Ok(Async::Ready(( - self.receipt.clone(), - self.next.take().unwrap(), - ))); - } - } else if !self.try_send_money()? { - return Ok(Async::NotReady); - } - } - } -} - #[cfg(test)] mod send_money_tests { use super::*; @@ -431,8 +388,8 @@ mod send_money_tests { use std::sync::Arc; use uuid::Uuid; - #[test] - fn stops_at_final_errors() { + #[tokio::test] + async fn stops_at_final_errors() { let account = TestAccount { id: Uuid::new_v4(), asset_code: "XYZ".to_string(), @@ -457,7 +414,7 @@ mod send_money_tests { &[0; 32][..], 100, ) - .wait(); + .await; assert!(result.is_err()); assert_eq!(requests.lock().len(), 1); } diff --git a/crates/interledger-stream/src/lib.rs b/crates/interledger-stream/src/lib.rs index 7e0c048ac..e71619f11 100644 --- a/crates/interledger-stream/src/lib.rs +++ b/crates/interledger-stream/src/lib.rs @@ -20,7 +20,8 @@ pub use server::{ #[cfg(test)] pub mod test_helpers { use super::*; - use futures::{future::ok, sync::mpsc::UnboundedSender, Future}; + use async_trait::async_trait; + use futures::channel::mpsc::UnboundedSender; use interledger_packet::Address; use interledger_router::RouterStore; use interledger_service::{Account, AccountStore, AddressStore, Username}; @@ -88,22 +89,17 @@ pub mod test_helpers { pub route: (String, TestAccount), } + #[async_trait] impl AccountStore for TestStore { type Account = TestAccount; - fn get_accounts( - &self, - _account_ids: Vec, - ) -> Box, Error = ()> + Send> { - Box::new(ok(vec![self.route.1.clone()])) + async fn get_accounts(&self, _account_ids: Vec) -> Result, ()> { + Ok(vec![self.route.1.clone()]) } // stub implementation (not used in these tests) - fn get_account_id_from_username( - &self, - _username: &Username, - ) -> Box + Send> { - Box::new(ok(Uuid::new_v4())) + async fn get_account_id_from_username(&self, _username: &Username) -> Result { + Ok(Uuid::new_v4()) } } @@ -115,16 +111,14 @@ pub mod test_helpers { } } + #[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } @@ -140,18 +134,16 @@ mod send_money_to_receiver { use super::test_helpers::*; use super::*; use bytes::Bytes; - use futures::Future; use interledger_ildcp::IldcpService; use interledger_packet::Address; use interledger_packet::{ErrorCode, RejectBuilder}; use interledger_router::Router; use interledger_service::outgoing_service_fn; use std::str::FromStr; - use tokio::runtime::Runtime; use uuid::Uuid; - #[test] - fn send_money_test() { + #[tokio::test] + async fn send_money_test() { let server_secret = Bytes::from(&[0; 32][..]); let destination_address = Address::from_str("example.receiver").unwrap(); let account = TestAccount { @@ -184,7 +176,7 @@ mod send_money_to_receiver { connection_generator.generate_address_and_secret(&destination_address); let destination_address = Address::from_str("example.receiver").unwrap(); - let run = send_money( + let (receipt, _service) = send_money( server, &test_helpers::TestAccount { id: Uuid::new_v4(), @@ -196,12 +188,9 @@ mod send_money_to_receiver { &shared_secret[..], 100, ) - .and_then(|(receipt, _service)| { - assert_eq!(receipt.delivered_amount, 100); - Ok(()) - }) - .map_err(|err| panic!(err)); - let runtime = Runtime::new().unwrap(); - runtime.block_on_all(run).unwrap(); + .await + .unwrap(); + + assert_eq!(receipt.delivered_amount, 100); } } diff --git a/crates/interledger-stream/src/server.rs b/crates/interledger-stream/src/server.rs index 890489969..03c18eccd 100644 --- a/crates/interledger-stream/src/server.rs +++ b/crates/interledger-stream/src/server.rs @@ -1,15 +1,16 @@ use super::crypto::*; use super::packet::*; +use async_trait::async_trait; use base64; use bytes::{Bytes, BytesMut}; use chrono::{DateTime, Utc}; -use futures::{future::result, sync::mpsc::UnboundedSender}; +use futures::channel::mpsc::UnboundedSender; use hex; use interledger_packet::{ Address, ErrorCode, Fulfill, FulfillBuilder, PacketType as IlpPacketType, Prepare, Reject, RejectBuilder, }; -use interledger_service::{Account, BoxedIlpFuture, OutgoingRequest, OutgoingService, Username}; +use interledger_service::{Account, IlpResult, OutgoingRequest, OutgoingService, Username}; use log::debug; use serde::{Deserialize, Serialize}; use std::marker::PhantomData; @@ -137,17 +138,16 @@ where } } +#[async_trait] impl OutgoingService for StreamReceiverService where S: StreamNotificationsStore + Send + Sync + 'static + Clone, - O: OutgoingService, - A: Account, + O: OutgoingService + Send + Sync + Clone, + A: Account + Send + Sync + Clone, { - type Future = BoxedIlpFuture; - /// Try fulfilling the request if it is for this STREAM server or pass it to the next /// outgoing handler if not. - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { let to_username = request.to.username().clone(); let from_username = request.from.username().clone(); let amount = request.prepare.amount(); @@ -182,14 +182,14 @@ where // the sender will likely see an error like F02: Unavailable (this is // a bit confusing but the packet data should not be modified at all // under normal circumstances). - return Box::new(self.next.send_request(request)); + return self.next.send_request(request).await; } } }; - return Box::new(result(response)); + return response; } } - Box::new(self.next.send_request(request)) + self.next.send_request(request).await } } @@ -367,7 +367,7 @@ mod receiving_money { fn fulfills_valid_packet() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); - let connection_generator = ConnectionGenerator::new(server_secret.clone()); + let connection_generator = ConnectionGenerator::new(server_secret); let (destination_account, shared_secret) = connection_generator.generate_address_and_secret(&ilp_address); let stream_packet = test_stream_packet(); @@ -395,7 +395,7 @@ mod receiving_money { fn fulfills_valid_packet_without_connection_tag() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); - let connection_generator = ConnectionGenerator::new(server_secret.clone()); + let connection_generator = ConnectionGenerator::new(server_secret); let (destination_account, shared_secret) = connection_generator.generate_address_and_secret(&ilp_address); let stream_packet = test_stream_packet(); @@ -423,7 +423,7 @@ mod receiving_money { fn rejects_modified_data() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); - let connection_generator = ConnectionGenerator::new(server_secret.clone()); + let connection_generator = ConnectionGenerator::new(server_secret); let (destination_account, shared_secret) = connection_generator.generate_address_and_secret(&ilp_address); let stream_packet = test_stream_packet(); @@ -452,7 +452,7 @@ mod receiving_money { fn rejects_too_little_money() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); - let connection_generator = ConnectionGenerator::new(server_secret.clone()); + let connection_generator = ConnectionGenerator::new(server_secret); let (destination_account, shared_secret) = connection_generator.generate_address_and_secret(&ilp_address); @@ -519,15 +519,15 @@ mod receiving_money { mod stream_receiver_service { use super::*; use crate::test_helpers::*; - use futures::Future; use interledger_packet::PrepareBuilder; use interledger_service::outgoing_service_fn; use std::convert::TryFrom; use std::str::FromStr; use std::time::UNIX_EPOCH; - #[test] - fn fulfills_correct_packets() { + + #[tokio::test] + async fn fulfills_correct_packets() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); let connection_generator = ConnectionGenerator::new(server_secret.clone()); @@ -550,7 +550,7 @@ mod stream_receiver_service { let mut service = StreamReceiverService::new( server_secret.clone(), DummyStore, - outgoing_service_fn(|_: OutgoingRequest| -> BoxedIlpFuture { + outgoing_service_fn(|_: OutgoingRequest| -> IlpResult { panic!("shouldn't get here") }), ); @@ -572,12 +572,12 @@ mod stream_receiver_service { original_amount: prepare.amount(), prepare, }) - .wait(); + .await; assert!(result.is_ok()); } - #[test] - fn rejects_invalid_packets() { + #[tokio::test] + async fn rejects_invalid_packets() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); let connection_generator = ConnectionGenerator::new(server_secret.clone()); @@ -632,12 +632,12 @@ mod stream_receiver_service { original_amount: prepare.amount(), prepare, }) - .wait(); + .await; assert!(result.is_err()); } - #[test] - fn passes_on_packets_not_for_it() { + #[tokio::test] + async fn passes_on_packets_not_for_it() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); let connection_generator = ConnectionGenerator::new(server_secret.clone()); @@ -690,7 +690,7 @@ mod stream_receiver_service { }, prepare, }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( result.unwrap_err().triggered_by().unwrap(),