From 41800e780ab5f26bd7f9384ee128eea6bb4bec3e Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Fri, 6 Dec 2024 17:21:07 +0100 Subject: [PATCH 01/12] Load Balancer Services --- Cargo.lock | 3165 +++++++++++++++++++++ Cargo.toml | 3 + load-balancer/Cargo.toml | 14 + load-balancer/src/cluster.rs | 166 ++ load-balancer/src/main.rs | 179 ++ load-balancer/src/ref_guard.rs | 112 + load-balancer/src/service/applications.rs | 93 + load-balancer/src/service/auth.rs | 52 + load-balancer/src/service/events.rs | 59 + load-balancer/src/service/health_check.rs | 61 + load-balancer/src/service/mod.rs | 323 +++ load-balancer/src/service/partitions.rs | 114 + load-balancer/src/service/results.rs | 293 ++ load-balancer/src/service/sessions.rs | 276 ++ load-balancer/src/service/submitter.rs | 472 +++ load-balancer/src/service/tasks.rs | 260 ++ load-balancer/src/service/versions.rs | 52 + load-balancer/src/utils.rs | 412 +++ 18 files changed, 6106 insertions(+) create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 load-balancer/Cargo.toml create mode 100644 load-balancer/src/cluster.rs create mode 100644 load-balancer/src/main.rs create mode 100644 load-balancer/src/ref_guard.rs create mode 100644 load-balancer/src/service/applications.rs create mode 100644 load-balancer/src/service/auth.rs create mode 100644 load-balancer/src/service/events.rs create mode 100644 load-balancer/src/service/health_check.rs create mode 100644 load-balancer/src/service/mod.rs create mode 100644 load-balancer/src/service/partitions.rs create mode 100644 load-balancer/src/service/results.rs create mode 100644 load-balancer/src/service/sessions.rs create mode 100644 load-balancer/src/service/submitter.rs create mode 100644 load-balancer/src/service/tasks.rs create mode 100644 load-balancer/src/service/versions.rs create mode 100644 load-balancer/src/utils.rs diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..4a3868d --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,3165 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "getrandom", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +dependencies = [ + "anstyle", + "windows-sys 0.59.0", +] + +[[package]] +name = "anyhow" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" + +[[package]] +name = "armonik" +version = "3.21.0-beta-3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a932e18538d27280291cd13a265be9f6a234abf290dd1349ed6ccfe67f1cce1d" +dependencies = [ + "futures", + "hyper", + "hyper-rustls", + "prost", + "prost-types", + "rustls", + "snafu", + "tokio", + "tokio-util", + "tonic", + "tonic-build", + "tracing", +] + +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "async-broadcast" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cd0e2e25ea8e5f7e9df04578dc6cf5c83577fd09b1a46aaf5c85e1c33f2a7e" +dependencies = [ + "event-listener", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "async-trait" +version = "0.1.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "aws-lc-rs" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f47bb8cc16b669d267eeccf585aea077d0882f4777b1c1f740217885d6e6e5a3" +dependencies = [ + "aws-lc-sys", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2101df3813227bbaaaa0b04cd61c534c7954b22bd68d399b440be937dc63ff7" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 1.0.2", + "tower 0.5.1", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom", + "instant", + "rand", +] + +[[package]] +name = "backtrace" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bindgen" +version = "0.69.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn 2.0.90", + "which", +] + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +dependencies = [ + "serde", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" + +[[package]] +name = "cc" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "num-traits", + "serde", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "clap_lex" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "cmake" +version = "0.1.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" +dependencies = [ + "cc", +] + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "config" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68578f196d2a33ff61b27fae256c3164f65e36382648e30666dde05b8cc9dfdf" +dependencies = [ + "async-trait", + "convert_case", + "json5", + "nom", + "pathdiff", + "ron", + "rust-ini", + "serde", + "serde_json", + "toml", + "yaml-rust2", +] + +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" +dependencies = [ + "libc", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "dlv-list" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" +dependencies = [ + "const-random", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "env_filter" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f2c92ceda6ceec50f43169f9ee8424fe2db276791afde7b2cd8bc084cb376ab" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "env_logger" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13fa619b91fb2381732789fc5de83b45675e882f66623b7d8cb4f643017018d" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "event-listener" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +dependencies = [ + "event-listener", + "pin-project-lite", +] + +[[package]] +name = "fastrand" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "h2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.7.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "http" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "hyper" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "log", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +dependencies = [ + "equivalent", + "hashbrown 0.15.2", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "ipnet" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" + +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "json-patch" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec9ad60d674508f3ca8f380a928cfe7b096bc729c4e2dbfe3852bc45da3ab30b" +dependencies = [ + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + +[[package]] +name = "jsonpath-rust" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d8fe85bd70ff715f31ce8c739194b423d79811a19602115d611a3ec85d6200" +dependencies = [ + "lazy_static", + "once_cell", + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "k8s-openapi" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19501afb943ae5806548bc3ebd7f3374153ca057a38f480ef30adfde5ef09755" +dependencies = [ + "base64 0.22.1", + "chrono", + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "kube" +version = "0.91.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "264461a7ebf4fb0fcf23e4c7e4f9387c5696ee61d003de207d9b5a895ff37bfa" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-runtime", +] + +[[package]] +name = "kube-client" +version = "0.91.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47164ad6c47398ee4bdf90509c7b44026229721cb1377eb4623a1ec2a00a85e9" +dependencies = [ + "base64 0.22.1", + "bytes", + "chrono", + "either", + "futures", + "home", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-timeout", + "hyper-util", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "rustls", + "rustls-pemfile", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tower 0.4.13", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.91.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2797d3044a238825432129cd9537e12c2a6dacbbb5352381af5ea55e1505ed4f" +dependencies = [ + "chrono", + "form_urlencoded", + "http", + "json-patch", + "k8s-openapi", + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "kube-runtime" +version = "0.91.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e463e89a1fb222c65a5469b568803153d1bf13d084a8dd42b659e6cca66edc6e" +dependencies = [ + "ahash", + "async-broadcast", + "async-stream", + "async-trait", + "backoff", + "derivative", + "futures", + "hashbrown 0.14.5", + "json-patch", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "smallvec", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.167" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" + +[[package]] +name = "libloading" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +dependencies = [ + "cfg-if", + "windows-targets", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + +[[package]] +name = "load-balancer" +version = "0.1.0" +dependencies = [ + "armonik", + "async-stream", + "config", + "env_logger", + "futures", + "log", + "tokio", + "tower-http", +] + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.52.0", +] + +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.36.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-multimap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79" +dependencies = [ + "dlv-list", + "hashbrown 0.14.5", +] + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pathdiff" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" + +[[package]] +name = "pdc-update" +version = "0.1.0" +dependencies = [ + "async-trait", + "clap", + "env_logger", + "futures", + "json-patch", + "k8s-openapi", + "kube", + "log", + "reqwest", + "serde", + "serde_json", + "tokio", +] + +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.1", + "serde", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pest" +version = "2.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +dependencies = [ + "memchr", + "thiserror 1.0.69", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "pest_meta" +version = "2.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d" +dependencies = [ + "once_cell", + "pest", + "sha2", +] + +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.7.0", +] + +[[package]] +name = "pin-project" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +dependencies = [ + "proc-macro2", + "syn 2.0.90", +] + +[[package]] +name = "proc-macro2" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0f3e5beed80eb580c68e2c600937ac2c4eedabdfd5ef1e5b7ea4f3fba84497b" +dependencies = [ + "heck", + "itertools 0.13.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.90", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "prost-types" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" +dependencies = [ + "prost", +] + +[[package]] +name = "quinn" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.1.0", + "rustls", + "socket2", + "thiserror 2.0.4", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +dependencies = [ + "bytes", + "getrandom", + "rand", + "ring", + "rustc-hash 2.1.0", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.4", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "redox_syscall" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "reqwest" +version = "0.12.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +dependencies = [ + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pemfile", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.2", + "tokio", + "tokio-rustls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "windows-registry", +] + +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "ron" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" +dependencies = [ + "base64 0.21.7", + "bitflags", + "serde", + "serde_derive", +] + +[[package]] +name = "rust-ini" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a" +dependencies = [ + "cfg-if", + "ordered-multimap", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hash" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" + +[[package]] +name = "rustix" +version = "0.38.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.23.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +dependencies = [ + "aws-lc-rs", + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] + +[[package]] +name = "rustls-webpki" +version = "0.102.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "schannel" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "serde", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.215" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.215" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "serde_json" +version = "1.0.133" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.7.0", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "snafu" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "223891c85e2a29c3fe8fb900c1fae5e69c2e42415e3177752e8718475efa5019" +dependencies = [ + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c3c6b7927ffe7ecaa769ee0e3994da3b8cafc8f444578982c83ecb161af917" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "socket2" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "tempfile" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490" +dependencies = [ + "thiserror-impl 2.0.4", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "slab", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +dependencies = [ + "indexmap 2.7.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "base64 0.21.7", + "bitflags", + "bytes", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "tracing-core" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "unicode-ident" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" + +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.90", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" + +[[package]] +name = "web-sys" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.26.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +dependencies = [ + "memchr", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + +[[package]] +name = "yaml-rust2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8902160c4e6f2fb145dbe9d6760a75e3c9522d8bf796ed7047c85919ac7115f8" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink", +] + +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..e67a557 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,3 @@ +[workspace] + +members = ["pdc-update", "load-balancer"] diff --git a/load-balancer/Cargo.toml b/load-balancer/Cargo.toml new file mode 100644 index 0000000..c76b398 --- /dev/null +++ b/load-balancer/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "load-balancer" +version = "0.1.0" +edition = "2021" + +[dependencies] +armonik = { version = "3.21.0-beta-3", features = ["client", "server"] } +async-stream = "0.3" +futures = "0.3" +config = "0.14" +log = "0.4" +env_logger = "0.11" +tokio = { version = "1.0", features = ["full"] } +tower-http = { version = "0.5", features = ["trace"] } diff --git a/load-balancer/src/cluster.rs b/load-balancer/src/cluster.rs new file mode 100644 index 0000000..daba5ad --- /dev/null +++ b/load-balancer/src/cluster.rs @@ -0,0 +1,166 @@ +use std::{hash::Hash, ops::Deref}; + +use armonik::reexports::{tokio_stream, tonic}; + +#[derive(Debug, Default, Clone)] +pub struct Cluster { + endpoint: armonik::ClientConfig, +} + +impl PartialEq for Cluster { + fn eq(&self, other: &Self) -> bool { + self.endpoint.endpoint == other.endpoint.endpoint + && self.endpoint.identity == other.endpoint.identity + && self.endpoint.override_target == other.endpoint.override_target + } +} + +impl Eq for Cluster {} + +impl Hash for Cluster { + fn hash(&self, state: &mut H) { + self.endpoint.endpoint.hash(state); + self.endpoint + .identity + .as_ref() + .map(|identity| identity.0.as_ref()) + .hash(state); + self.endpoint.override_target.hash(state); + } +} + +impl Cluster { + pub fn new(config: armonik::ClientConfig) -> Self { + Self { endpoint: config } + } + + pub async fn client(&self) -> Result { + Ok(ClusterClient( + armonik::Client::with_config(self.endpoint.clone()).await?, + )) + } +} + +pub struct ClusterClient(armonik::Client); + +impl Deref for ClusterClient { + type Target = armonik::Client; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef for ClusterClient { + fn as_ref(&self) -> &armonik::Client { + &self.0 + } +} + +impl ClusterClient { + pub async fn get_all_sessions( + &self, + filters: armonik::sessions::filter::Or, + sort: armonik::sessions::Sort, + ) -> Result< + impl tokio_stream::Stream, tonic::Status>>, + tonic::Status, + > { + let mut client = self.sessions(); + let page_size = 1000; + let mut page_index = 0; + + Ok(async_stream::try_stream! { + loop { + let page = client + .list( + filters.clone(), + sort.clone(), + true, + page_index, + page_size, + ) + .await + .map_err(crate::utils::IntoStatus::into_status)?; + + if page.sessions.is_empty() { + break; + } + + page_index += 1; + + yield page.sessions; + } + }) + } + + pub async fn get_all_partitions( + &self, + filters: armonik::partitions::filter::Or, + sort: armonik::partitions::Sort, + ) -> Result< + impl tokio_stream::Stream, tonic::Status>>, + tonic::Status, + > { + let mut client = self.partitions(); + let page_size = 1000; + let mut page_index = 0; + + Ok(async_stream::try_stream! { + loop { + let page = client + .list( + filters.clone(), + sort.clone(), + page_index, + page_size, + ) + .await + .map_err(crate::utils::IntoStatus::into_status)?; + + if page.partitions.is_empty() { + break; + } + + page_index += 1; + + yield page.partitions; + } + }) + } + + pub async fn get_all_applications( + &self, + filters: armonik::applications::filter::Or, + sort: armonik::applications::Sort, + ) -> Result< + impl tokio_stream::Stream, tonic::Status>>, + tonic::Status, + > { + let mut client = self.applications(); + let page_size = 1000; + let mut page_index = 0; + + Ok(async_stream::try_stream! { + loop { + let page = client + .list( + filters.clone(), + sort.clone(), + page_index, + page_size, + ) + .await + .map_err(crate::utils::IntoStatus::into_status)?; + + if page.applications.is_empty() { + break; + } + + page_index += 1; + + yield page.applications; + } + }) + } +} diff --git a/load-balancer/src/main.rs b/load-balancer/src/main.rs new file mode 100644 index 0000000..97d8e5f --- /dev/null +++ b/load-balancer/src/main.rs @@ -0,0 +1,179 @@ +use std::sync::Arc; + +use armonik::reexports::tonic; +use tower_http::trace::TraceLayer; + +pub mod cluster; +pub mod ref_guard; +pub mod service; +pub mod utils; + +/// Wait for termination signal (either SIGINT or SIGTERM) +#[cfg(unix)] +async fn wait_terminate() { + use futures::{stream::FuturesUnordered, StreamExt}; + use tokio::signal::unix::{signal, SignalKind}; + let mut signals = Vec::new(); + + // Register signal handlers + for sig in [SignalKind::terminate(), SignalKind::interrupt()] { + match signal(sig) { + Ok(sig) => signals.push(sig), + Err(err) => log::error!("Could not register signal handler: {err}"), + } + } + + // Wait for the first signal to trigger + let mut signals = signals + .iter_mut() + .map(|sig| sig.recv()) + .collect::>(); + + loop { + match signals.next().await { + // One of the signal triggered -> stop waiting + Some(Some(())) => break, + // One of the signal handler has been stopped -> continue waiting for the others + Some(None) => (), + // No more signal handlers are available, so wait indefinitely + None => futures::future::pending::<()>().await, + } + } +} + +#[cfg(windows)] +macro_rules! win_signal { + ($($sig:ident),*$(,)?) => { + $( + let $sig = async { + match tokio::signal::windows::$sig() { + Ok(mut $sig) => { + if $sig.recv().await.is_some() { + return; + } + } + Err(err) => log::error!( + "Could not register signal handler for {}: {err}", + stringify!($sig), + ), + } + futures::future::pending::<()>().await; + }; + )* + tokio::select! { + $( + _ = $sig => {} + )* + } + } +} + +/// Wait for termination signal (either SIGINT or SIGTERM) +#[cfg(windows)] +async fn wait_terminate() { + win_signal!(ctrl_c, ctrl_close, ctrl_logoff, ctrl_shutdown); +} + +#[tokio::main] +async fn main() { + env_logger::init(); + + let service = Arc::new( + service::Service::new([( + String::from("A"), + cluster::Cluster::new(armonik::ClientConfig::from_env().unwrap()), + )]) + .await, + ); + + let router = tonic::transport::Server::builder() + .layer(TraceLayer::new_for_grpc()) + .add_service( + armonik::api::v3::applications::applications_server::ApplicationsServer::from_arc( + service.clone(), + ), + ) + .add_service( + armonik::api::v3::auth::authentication_server::AuthenticationServer::from_arc( + service.clone(), + ), + ) + .add_service( + armonik::api::v3::events::events_server::EventsServer::from_arc(service.clone()), + ) + .add_service( + armonik::api::v3::partitions::partitions_server::PartitionsServer::from_arc( + service.clone(), + ), + ) + .add_service( + armonik::api::v3::health_checks::health_checks_service_server::HealthChecksServiceServer::from_arc( + service.clone(), + ), + ) + .add_service( + armonik::api::v3::results::results_server::ResultsServer::from_arc(service.clone()), + ) + .add_service( + armonik::api::v3::sessions::sessions_server::SessionsServer::from_arc(service.clone()), + ) + .add_service( + armonik::api::v3::submitter::submitter_server::SubmitterServer::from_arc( + service.clone(), + ), + ) + .add_service(armonik::api::v3::tasks::tasks_server::TasksServer::from_arc(service.clone())) + .add_service( + armonik::api::v3::versions::versions_server::VersionsServer::from_arc(service.clone()), + ); + + let mut background_future = tokio::spawn({ + let service = service.clone(); + + async move { + let mut timer = tokio::time::interval(tokio::time::Duration::from_secs(10)); + timer.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + + loop { + timer.tick().await; + if let Err(err) = service.update_sessions().await { + log::error!("Error while fetching sessions from clusters:\n{err:?}"); + } + } + } + }); + + let mut service_future = tokio::spawn(router.serve("0.0.0.0:1337".parse().unwrap())); + + log::info!("Application running"); + + tokio::select! { + output = &mut background_future => { + if let Err(err) = output { + log::error!("Background future had an error: {err:?}"); + } + } + output = &mut service_future => { + match output { + Ok(Ok(())) => (), + Ok(Err(err)) => { + log::error!("Service had an error: {err:?}"); + } + Err(err) => { + log::error!("Service future had an error: {err:?}"); + } + } + } + _ = wait_terminate() => { + log::info!("Application stopping"); + } + } + + background_future.abort(); + service_future.abort(); + + _ = background_future.await; + _ = service_future.await; + + log::info!("Application stopped"); +} diff --git a/load-balancer/src/ref_guard.rs b/load-balancer/src/ref_guard.rs new file mode 100644 index 0000000..d5a1e89 --- /dev/null +++ b/load-balancer/src/ref_guard.rs @@ -0,0 +1,112 @@ +use std::{ + mem::ManuallyDrop, + ops::{Deref, DerefMut}, +}; + +pub struct RefGuard +where + G: Unpin, + T: Unpin, +{ + guard: std::mem::ManuallyDrop, + reference: std::mem::ManuallyDrop, +} + +impl Drop for RefGuard { + fn drop(&mut self) { + unsafe { + // Order is crucial for safety + ManuallyDrop::drop(&mut self.reference); + ManuallyDrop::drop(&mut self.guard); + } + } +} + +impl RefGuard { + pub fn new_deref_mut(mut guard: G) -> Self + where + G: DerefMut, + { + // As G is Unpin, it is safe to move the guard after dereferencing it + let mut target = std::ptr::NonNull::from(guard.deref_mut()); + Self { + guard: ManuallyDrop::new(guard), + reference: ManuallyDrop::new(unsafe { target.as_mut() }), + } + } +} + +impl RefGuard { + pub fn new_deref(guard: G) -> Self + where + G: Deref, + { + // As G is Unpin, it is safe to move the guard after dereferencing it + let target = std::ptr::NonNull::from(guard.deref()); + Self { + guard: ManuallyDrop::new(guard), + reference: ManuallyDrop::new(unsafe { target.as_ref() }), + } + } +} + +impl RefGuard { + pub fn get(&self) -> &T { + let x = self.reference.deref(); + x + } + + pub fn get_mut(&mut self) -> &mut T { + self.reference.deref_mut() + } + + pub fn map(mut self, f: impl FnOnce(T) -> U) -> RefGuard { + let (guard, reference) = unsafe { + let ref_guard = ( + ManuallyDrop::take(&mut self.guard), + ManuallyDrop::take(&mut self.reference), + ); + std::mem::forget(self); + ref_guard + }; + RefGuard { + guard: ManuallyDrop::new(guard), + reference: ManuallyDrop::new(f(reference)), + } + } +} + +impl<'a, G: Unpin, T: Unpin + Deref> RefGuard { + pub fn map_deref(self) -> RefGuard::Target> { + self.map(Deref::deref) + } +} +impl<'a, G: Unpin, T: Unpin + DerefMut> RefGuard { + pub fn map_deref_mut(self) -> RefGuard::Target> { + self.map(DerefMut::deref_mut) + } + pub fn map_deref(self) -> RefGuard::Target> { + self.map(|r| r.deref_mut() as &_) + } +} + +impl Deref for RefGuard { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.get() + } +} +impl Deref for RefGuard { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.get() + } +} + +impl DerefMut for RefGuard { + fn deref_mut(&mut self) -> &mut Self::Target { + self.get_mut() + } +} diff --git a/load-balancer/src/service/applications.rs b/load-balancer/src/service/applications.rs new file mode 100644 index 0000000..935444b --- /dev/null +++ b/load-balancer/src/service/applications.rs @@ -0,0 +1,93 @@ +use std::sync::Arc; + +use armonik::{ + applications, + reexports::{tokio_stream::StreamExt, tokio_util, tonic}, + server::ApplicationsService, +}; + +use crate::utils::{run_with_cancellation, IntoStatus}; + +use super::Service; + +impl ApplicationsService for Service { + async fn list( + self: Arc, + request: applications::list::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + let Ok(page) = usize::try_from(request.page) else { + return Err(tonic::Status::invalid_argument("Page should be positive")); + }; + let Ok(page_size) = usize::try_from(request.page_size) else { + return Err(tonic::Status::invalid_argument( + "Page size should be positive", + )); + }; + + let mut applications = Vec::new(); + + run_with_cancellation! { + use cancellation_token; + + for cluster in self.clusters.values() { + let client = cluster.client().await.map_err(IntoStatus::into_status)?; + let stream = client + .get_all_applications(request.filters.clone(), request.sort.clone()) + .await?; + + let mut stream = std::pin::pin!(stream); + while let Some(chunk) = stream.try_next().await? { + applications.extend(chunk); + } + } + + if !request.sort.fields.is_empty() { + applications.sort_by(|a, b| { + for field in &request.sort.fields { + let ordering = match field { + applications::Field::Unspecified => a.name.cmp(&b.name), + applications::Field::Name => a.name.cmp(&b.name), + applications::Field::Version => a.version.cmp(&b.version), + applications::Field::Namespace => a.namespace.cmp(&b.namespace), + applications::Field::Service => a.service.cmp(&b.service), + }; + + match (ordering, &request.sort.direction) { + ( + std::cmp::Ordering::Less, + armonik::SortDirection::Unspecified | armonik::SortDirection::Asc, + ) => return std::cmp::Ordering::Less, + (std::cmp::Ordering::Less, armonik::SortDirection::Desc) => { + return std::cmp::Ordering::Greater + } + (std::cmp::Ordering::Equal, _) => (), + ( + std::cmp::Ordering::Greater, + armonik::SortDirection::Unspecified | armonik::SortDirection::Asc, + ) => return std::cmp::Ordering::Greater, + (std::cmp::Ordering::Greater, armonik::SortDirection::Desc) => { + return std::cmp::Ordering::Less + } + } + } + + std::cmp::Ordering::Equal + }); + } + + let total = applications.len() as i32; + + Ok(armonik::applications::list::Response { + applications: applications + .into_iter() + .skip(page * page_size) + .take(page_size) + .collect(), + page: request.page, + page_size: request.page_size, + total, + }) + } + } +} diff --git a/load-balancer/src/service/auth.rs b/load-balancer/src/service/auth.rs new file mode 100644 index 0000000..448df2b --- /dev/null +++ b/load-balancer/src/service/auth.rs @@ -0,0 +1,52 @@ +use std::sync::Arc; + +use armonik::{ + auth, + reexports::{tokio_util, tonic}, + server::AuthService, +}; + +use crate::utils::{run_with_cancellation, IntoStatus}; + +use super::Service; + +impl AuthService for Service { + async fn current_user( + self: Arc, + _request: auth::current_user::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut users = Vec::new(); + + for cluster in self.clusters.values() { + let user = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .auth() + .current_user() + .await + .map_err(IntoStatus::into_status)?; + + users.push(user); + } + + let mut users = users.into_iter(); + + let Some(user) = users.next() else { + return Err(tonic::Status::internal("No cluster")); + }; + + for other in users { + if user != other { + return Err(tonic::Status::internal("Mismatch between clusters")); + } + } + + Ok(auth::current_user::Response { user }) + } + } +} diff --git a/load-balancer/src/service/events.rs b/load-balancer/src/service/events.rs new file mode 100644 index 0000000..93d0280 --- /dev/null +++ b/load-balancer/src/service/events.rs @@ -0,0 +1,59 @@ +use std::sync::Arc; + +use armonik::{ + events, + reexports::{tokio_stream::StreamExt, tokio_util, tonic}, + server::EventsService, +}; + +use crate::utils::{run_with_cancellation, IntoStatus}; + +use super::Service; + +impl EventsService for Service { + async fn subscribe( + self: Arc, + request: events::subscribe::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> Result< + impl tonic::codegen::tokio_stream::Stream< + Item = Result, + > + Send, + tonic::Status, + > { + run_with_cancellation! { + use cancellation_token.clone(); + + let events::subscribe::Request { + session_id, + task_filters, + result_filters, + returned_events, + } = request; + + let client = self + .get_cluster_from_session(&session_id) + .await? + .ok_or_else(|| tonic::Status::not_found(format!("Session {} was not found", session_id)))? + .client() + .await + .map_err(IntoStatus::into_status)?; + + let stream = client + .events() + .subscribe(session_id, task_filters, result_filters, returned_events) + .await + .map_err(IntoStatus::into_status)?; + + let stream = async_stream::try_stream! { + let mut stream = std::pin::pin!(stream); + + while let Some(Some(event)) = cancellation_token.run_until_cancelled(stream.next()).await { + yield event.map_err(IntoStatus::into_status)?; + } + }; + + Ok(stream) + } + } +} diff --git a/load-balancer/src/service/health_check.rs b/load-balancer/src/service/health_check.rs new file mode 100644 index 0000000..5d84db1 --- /dev/null +++ b/load-balancer/src/service/health_check.rs @@ -0,0 +1,61 @@ +use std::{collections::HashMap, sync::Arc}; + +use armonik::{ + health_checks, + reexports::{tokio_util, tonic}, + server::HealthChecksService, +}; + +use crate::utils::{run_with_cancellation, IntoStatus}; + +use super::Service; + +impl HealthChecksService for Service { + async fn check( + self: Arc, + _request: health_checks::check::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut services = HashMap::::new(); + + for cluster in self.clusters.values() { + let health = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .health_checks() + .check() + .await + .map_err(IntoStatus::into_status)?; + + for service in health { + match services.entry(service.name) { + std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { + let health = occupied_entry.get_mut(); + if health.0 < service.health { + *health = (service.health, service.message); + } + } + std::collections::hash_map::Entry::Vacant(vacant_entry) => { + vacant_entry.insert((service.health, service.message)); + } + } + } + } + + Ok(health_checks::check::Response { + services: services + .into_iter() + .map(|(name, (health, message))| health_checks::ServiceHealth { + name, + message, + health, + }) + .collect(), + }) + } + } +} diff --git a/load-balancer/src/service/mod.rs b/load-balancer/src/service/mod.rs new file mode 100644 index 0000000..769872c --- /dev/null +++ b/load-balancer/src/service/mod.rs @@ -0,0 +1,323 @@ +#![allow(clippy::mutable_key_type)] + +use std::{ + collections::HashMap, + sync::{atomic::AtomicUsize, Arc}, +}; + +use armonik::reexports::{tokio::sync::RwLock, tokio_stream::StreamExt, tonic::Status}; + +use crate::{cluster::Cluster, utils::IntoStatus}; + +mod applications; +mod auth; +mod events; +mod health_check; +mod partitions; +mod results; +mod sessions; +mod submitter; +mod tasks; +mod versions; + +pub struct Service { + clusters: HashMap>, + mapping_session: RwLock, armonik::sessions::Raw)>>, + mapping_result: RwLock>>, + mapping_task: RwLock>>, + counter: AtomicUsize, +} + +impl Service { + pub async fn new(clusters: impl IntoIterator) -> Self { + Self { + clusters: clusters + .into_iter() + .map(|(name, cluster)| (name, Arc::new(cluster))) + .collect(), + mapping_session: RwLock::new(Default::default()), + mapping_result: RwLock::new(Default::default()), + mapping_task: RwLock::new(Default::default()), + counter: AtomicUsize::new(0), + } + } + + pub async fn get_cluster_from_sessions<'a>( + &'a self, + session_ids: &[&str], + ) -> Result, Vec>, Status> { + let mut missing_ids = Vec::new(); + let mut mapping = HashMap::, Vec>::new(); + + { + let guard = self.mapping_session.read().await; + + for &session_id in session_ids { + if let Some(cluster) = guard.get(session_id) { + match mapping.entry(cluster.0.clone()) { + std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { + occupied_entry.get_mut().push(String::from(session_id)); + } + std::collections::hash_map::Entry::Vacant(vacant_entry) => { + vacant_entry.insert(vec![String::from(session_id)]); + } + } + } else { + missing_ids.push(session_id); + } + } + } + + if !missing_ids.is_empty() { + let filter = missing_ids + .iter() + .map(|&session_id| { + [armonik::sessions::filter::Field { + field: armonik::sessions::Field::Raw( + armonik::sessions::RawField::SessionId, + ), + condition: armonik::sessions::filter::Condition::String( + armonik::FilterString { + value: String::from(session_id), + operator: armonik::FilterStringOperator::Equal, + }, + ), + }] + }) + .collect::>(); + + for cluster in self.clusters.values() { + let sessions = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .sessions() + .list( + filter.clone(), + Default::default(), + true, + 0, + filter.len() as i32, + ) + .await + .map_err(IntoStatus::into_status)? + .sessions; + + if !sessions.is_empty() { + let cluster_mapping = mapping.entry(cluster.clone()).or_default(); + let mut guard = self.mapping_session.write().await; + for session in sessions { + let session_id = session.session_id.clone(); + guard + .entry(session_id.clone()) + .or_insert_with(|| (cluster.clone(), session)); + cluster_mapping.push(session_id); + } + } + } + } + + Ok(mapping) + } + + pub async fn get_cluster_from_session( + &self, + session_id: &str, + ) -> Result>, Status> { + let sessions = self.get_cluster_from_sessions(&[session_id]).await?; + + Ok(sessions.into_keys().next()) + } + + pub async fn get_cluster_from_results<'a>( + &'a self, + result_ids: &[&str], + ) -> Result, Vec>, Status> { + let mut missing_ids = Vec::new(); + let mut mapping = HashMap::, Vec>::new(); + + { + let guard = self.mapping_result.read().await; + + for &result_id in result_ids { + if let Some(cluster) = guard.get(result_id) { + match mapping.entry(cluster.clone()) { + std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { + occupied_entry.get_mut().push(String::from(result_id)); + } + std::collections::hash_map::Entry::Vacant(vacant_entry) => { + vacant_entry.insert(vec![String::from(result_id)]); + } + } + } else { + missing_ids.push(result_id); + } + } + } + + if !missing_ids.is_empty() { + let filter = missing_ids + .iter() + .map(|&result_id| { + [armonik::results::filter::Field { + field: armonik::results::Field::ResultId, + condition: armonik::results::filter::Condition::String( + armonik::FilterString { + value: String::from(result_id), + operator: armonik::FilterStringOperator::Equal, + }, + ), + }] + }) + .collect::>(); + + for cluster in self.clusters.values() { + let results = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .results() + .list(filter.clone(), Default::default(), 0, filter.len() as i32) + .await + .map_err(IntoStatus::into_status)? + .results; + + if !results.is_empty() { + let cluster_mapping = mapping.entry(cluster.clone()).or_default(); + let mut guard = self.mapping_result.write().await; + for result in results { + guard + .entry(result.result_id.clone()) + .or_insert_with(|| cluster.clone()); + cluster_mapping.push(result.result_id); + } + } + } + } + + Ok(mapping) + } + + pub async fn get_cluster_from_result( + &self, + result_id: &str, + ) -> Result>, Status> { + let results = self.get_cluster_from_results(&[result_id]).await?; + + Ok(results.into_keys().next()) + } + + pub async fn get_cluster_from_tasks<'a>( + &'a self, + task_ids: &[&str], + ) -> Result, Vec>, Status> { + let mut missing_ids = Vec::new(); + let mut mapping = HashMap::, Vec>::new(); + + { + let guard = self.mapping_task.read().await; + + for &task_id in task_ids { + if let Some(cluster) = guard.get(task_id) { + match mapping.entry(cluster.clone()) { + std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { + occupied_entry.get_mut().push(String::from(task_id)); + } + std::collections::hash_map::Entry::Vacant(vacant_entry) => { + vacant_entry.insert(vec![String::from(task_id)]); + } + } + } else { + missing_ids.push(task_id); + } + } + } + + if !missing_ids.is_empty() { + let filter = missing_ids + .iter() + .map(|&result_id| { + [armonik::tasks::filter::Field { + field: armonik::tasks::Field::Summary(armonik::tasks::SummaryField::TaskId), + condition: armonik::tasks::filter::Condition::String( + armonik::FilterString { + value: String::from(result_id), + operator: armonik::FilterStringOperator::Equal, + }, + ), + }] + }) + .collect::>(); + + for cluster in self.clusters.values() { + let tasks = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .tasks() + .list( + filter.clone(), + Default::default(), + false, + 0, + filter.len() as i32, + ) + .await + .map_err(IntoStatus::into_status)? + .tasks; + + if !tasks.is_empty() { + let cluster_mapping = mapping.entry(cluster.clone()).or_default(); + let mut guard = self.mapping_task.write().await; + for task in tasks { + guard + .entry(task.task_id.clone()) + .or_insert_with(|| cluster.clone()); + cluster_mapping.push(task.task_id); + } + } + } + } + + Ok(mapping) + } + + pub async fn get_cluster_from_task( + &self, + task_id: &str, + ) -> Result>, Status> { + let results = self.get_cluster_from_tasks(&[task_id]).await?; + + Ok(results.into_keys().next()) + } + + pub async fn update_sessions(&self) -> Result<(), Status> { + for cluster in self.clusters.values() { + let mut stream = std::pin::pin!( + cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .get_all_sessions(Default::default(), Default::default()) + .await? + ); + + while let Some(chunk) = stream.try_next().await? { + let mut guard = self.mapping_session.write().await; + + for session in chunk { + match guard.entry(session.session_id.clone()) { + std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { + occupied_entry.get_mut().1 = session + } + std::collections::hash_map::Entry::Vacant(vacant_entry) => { + vacant_entry.insert((cluster.clone(), session)); + } + } + } + } + } + + Ok(()) + } +} diff --git a/load-balancer/src/service/partitions.rs b/load-balancer/src/service/partitions.rs new file mode 100644 index 0000000..fbcc1a5 --- /dev/null +++ b/load-balancer/src/service/partitions.rs @@ -0,0 +1,114 @@ +use std::sync::Arc; + +use armonik::{ + partitions, + reexports::{tokio_stream::StreamExt, tokio_util, tonic}, + server::PartitionsService, +}; + +use crate::utils::{run_with_cancellation, IntoStatus}; + +use super::Service; + +impl PartitionsService for Service { + async fn list( + self: Arc, + request: partitions::list::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + let Ok(page) = usize::try_from(request.page) else { + return Err(tonic::Status::invalid_argument("Page should be positive")); + }; + let Ok(page_size) = usize::try_from(request.page_size) else { + return Err(tonic::Status::invalid_argument( + "Page size should be positive", + )); + }; + + let mut partitions = Vec::new(); + + run_with_cancellation! { + use cancellation_token; + + for cluster in self.clusters.values() { + let client = cluster.client().await + .map_err(IntoStatus::into_status)?; + let stream = + client.get_all_partitions(request.filters.clone(), request.sort.clone()).await?; + + let mut stream = std::pin::pin!(stream); + while let Some(chunk) = stream.try_next().await? { + partitions.extend(chunk); + } + } + + match &request.sort.field { + partitions::Field::Unspecified => (), + partitions::Field::Id => partitions.sort_by(|a, b| a.partition_id.cmp(&b.partition_id)), + partitions::Field::ParentPartitionIds => { + partitions.sort_by(|a, b| a.parent_partition_ids.cmp(&b.parent_partition_ids)) + } + partitions::Field::PodReserved => { + partitions.sort_by(|a, b| a.pod_reserved.cmp(&b.pod_reserved)) + } + partitions::Field::PodMax => partitions.sort_by(|a, b| a.pod_max.cmp(&b.pod_max)), + partitions::Field::PreemptionPercentage => { + partitions.sort_by(|a, b| a.preemption_percentage.cmp(&b.preemption_percentage)) + } + partitions::Field::Priority => partitions.sort_by(|a, b| a.priority.cmp(&b.priority)), + } + + if matches!(&request.sort.direction, armonik::SortDirection::Desc) { + partitions.reverse(); + } + + let total = partitions.len() as i32; + + Ok(armonik::partitions::list::Response { + partitions: partitions + .into_iter() + .skip(page * page_size) + .take(page_size) + .collect(), + page: request.page, + page_size: request.page_size, + total, + }) + } + } + + async fn get( + self: Arc, + request: partitions::get::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut err = None; + + for cluster in self.clusters.values() { + let client = match cluster.client().await { + Ok(client) => client, + Err(error) => { + err = Some(error.into_status()); + continue; + } + }; + + match client.partitions().call(request.clone()).await { + Ok(response) => return Ok(response), + Err(error) => { + err = Some(error.into_status()); + continue; + } + }; + } + + match err { + Some(err) => Err(err), + None => Err(tonic::Status::internal("No cluster")), + } + } + } +} diff --git a/load-balancer/src/service/results.rs b/load-balancer/src/service/results.rs new file mode 100644 index 0000000..0bc3156 --- /dev/null +++ b/load-balancer/src/service/results.rs @@ -0,0 +1,293 @@ +use std::sync::Arc; + +use armonik::{ + reexports::{tokio, tokio_stream::StreamExt, tokio_util, tonic}, + results, + server::ResultsService, +}; + +use crate::utils::{run_with_cancellation, IntoStatus}; + +use super::Service; + +impl ResultsService for Service { + async fn list( + self: Arc, + request: results::list::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut requested_results = Vec::new(); + let mut requested_sessions = Vec::new(); + + for and in &request.filters.or { + let mut has_check = false; + + for field in and { + match field { + armonik::results::filter::Field { + field: armonik::results::Field::SessionId, + condition: + armonik::results::filter::Condition::String(armonik::FilterString { + value, + operator: armonik::FilterStringOperator::Equal, + }), + } => { + requested_sessions.push(value.as_str()); + has_check = true; + } + armonik::results::filter::Field { + field: armonik::results::Field::ResultId, + condition: + armonik::results::filter::Condition::String(armonik::FilterString { + value, + operator: armonik::FilterStringOperator::Equal, + }), + } => { + requested_results.push(value.as_str()); + has_check = true; + } + _ => {} + } + } + + if !has_check { + return Err(armonik::reexports::tonic::Status::invalid_argument(String::from("Cannot determine the cluster from the filter, missing condition on session_id"))); + } + } + + let (sessions, results) = tokio::join!( + self.get_cluster_from_sessions(&requested_sessions), + self.get_cluster_from_results(&requested_results) + ); + + let (mut sessions, mut results) = + (sessions?.into_iter(), results?.into_iter()); + + let cluster = match (sessions.next(), results.next()) { + (None, None) => { + return Ok(results::list::Response { + results: Vec::new(), + page: request.page, + page_size: request.page_size, + total: 0, + }); + } + (None, Some(res_cluster)) => res_cluster.0, + (Some(ses_cluster), None) => ses_cluster.0, + (Some(ses_cluster), Some(res_cluster)) => { + if res_cluster != ses_cluster { + return Err(tonic::Status::invalid_argument( + "Cannot determine the cluster from the filter, multiple clusters targeted", + )); + } + ses_cluster.0 + } + }; + match (sessions.next(), results.next()) { + (None, None) => {} + _ => { + return Err(tonic::Status::invalid_argument( + "Cannot determine the cluster from the filter, multiple clusters targeted", + )); + } + } + + match cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .results() + .call(request) + .await + { + Ok(response) => Ok(response), + Err(err) => match err { + armonik::client::RequestError::Grpc { source, .. } => Err(*source), + err => Err(tonic::Status::internal(err.to_string())), + }, + } + } + } + + async fn get( + self: Arc, + request: results::get::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + crate::utils::impl_unary!(self.results, request, cancellation_token, {get_cluster_from_result, id, "Result {} was not found"}) + } + + async fn get_owner_task_id( + self: Arc, + request: results::get_owner_task_id::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + crate::utils::impl_unary!(self.results, request, cancellation_token, session) + } + + async fn create_metadata( + self: Arc, + request: results::create_metadata::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + crate::utils::impl_unary!(self.results, request, cancellation_token, session) + } + + async fn create( + self: Arc, + request: results::create::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + crate::utils::impl_unary!(self.results, request, cancellation_token, session) + } + + async fn delete_data( + self: Arc, + request: results::delete_data::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + crate::utils::impl_unary!(self.results, request, cancellation_token, session) + } + + async fn get_service_configuration( + self: Arc, + _request: results::get_service_configuration::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + crate::utils::run_with_cancellation! { + use cancellation_token; + + let mut min = 1 << 24; + + for (_, cluster) in self.clusters.iter() { + let conf = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .results() + .get_service_configuration() + .await + .map_err(IntoStatus::into_status)?; + + min = min.min(conf.data_chunk_max_size); + } + + Ok(results::get_service_configuration::Response { + data_chunk_max_size: min, + }) + } + } + + async fn download( + self: Arc, + request: results::download::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> Result< + impl tonic::codegen::tokio_stream::Stream< + Item = Result, + > + Send, + tonic::Status, + > { + crate::utils::run_with_cancellation! { + use cancellation_token.clone(); + + let Some(cluster) = self.get_cluster_from_session(&request.session_id).await? else { + return Err(tonic::Status::not_found(format!( + "Session {} was not found", + request.session_id + ))); + }; + + let mut stream = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .results() + .download(request.session_id, request.result_id) + .await + .map_err(IntoStatus::into_status)?; + + Ok(async_stream::try_stream! { + while let Some(Some(chunk)) = cancellation_token.run_until_cancelled(stream.next()).await { + let chunk = chunk.map_err(IntoStatus::into_status)?; + yield results::download::Response{ data_chunk: chunk }; + } + }) + } + } + + async fn upload( + self: Arc, + request: impl tonic::codegen::tokio_stream::Stream< + Item = Result, + > + Send + + 'static, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> Result { + let mut request = Box::pin(request); + + match crate::utils::run_with_cancellation!(cancellation_token, request.next()) { + Some(Ok(results::upload::Request::Identifier { + session_id, + result_id, + })) => { + let Some(cluster) = self.get_cluster_from_session(&session_id).await? else { + return Err(tonic::Status::not_found(format!( + "Session {} was not found", + session_id + ))); + }; + + let (tx, rx) = tokio::sync::oneshot::channel(); + let mut tx = Some(tx); + + let stream = request.map_while(move |r| match r { + Ok(results::upload::Request::DataChunk(vec)) => Some(vec), + invalid => { + if let Some(tx) = tx.take() { + _ = tx.send(invalid); + } + None + } + }); + + let mut result_client = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .results(); + + tokio::select! { + result = result_client.upload(session_id, result_id, stream) => { + match result { + Ok(result) => Ok(results::upload::Response { result }), + Err(err) => Err(err.into_status()) + } + } + Ok(invalid) = rx => { + match invalid { + Ok(results::upload::Request::DataChunk(_)) => unreachable!(), + Ok(results::upload::Request::Identifier { .. }) => { + Err(tonic::Status::invalid_argument("Invalid upload request, identifier sent multiple times")) + } + Err(err) => Err(err), + } + } + _ = cancellation_token.cancelled() => Err(tonic::Status::aborted("Cancellation token has been triggered")) + } + } + Some(Ok(results::upload::Request::DataChunk(_))) => { + Err(tonic::Status::invalid_argument( + "Could not upload result, data sent before identifier", + )) + } + Some(Err(err)) => Err(err), + None => Err(tonic::Status::invalid_argument( + "Could not upload result, no identifier nor data sent", + )), + } + } +} diff --git a/load-balancer/src/service/sessions.rs b/load-balancer/src/service/sessions.rs new file mode 100644 index 0000000..902ec03 --- /dev/null +++ b/load-balancer/src/service/sessions.rs @@ -0,0 +1,276 @@ +use std::sync::Arc; + +use armonik::{ + reexports::{tokio_util, tonic}, + server::SessionsService, + sessions, +}; + +use crate::utils::{impl_unary, run_with_cancellation, IntoStatus}; + +use super::Service; + +impl SessionsService for Service { + async fn list( + self: Arc, + request: sessions::list::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + let Ok(page) = usize::try_from(request.page) else { + return Err(tonic::Status::invalid_argument("Page should be positive")); + }; + let Ok(page_size) = usize::try_from(request.page_size) else { + return Err(tonic::Status::invalid_argument( + "Page size should be positive", + )); + }; + let guard = self.mapping_session.read().await; + + let mut sessions = Vec::new(); + + for (_, (_, session)) in guard.iter() { + if cancellation_token.is_cancelled() { + return Err(tonic::Status::aborted("Request aborted")); + } + for filter in &request.filters { + let mut ok = true; + + for filter in filter { + if !crate::utils::filter_match_session(session, filter)? { + ok = false; + break; + } + } + + if ok { + sessions.push(session.clone()); + break; + } + } + } + + std::mem::drop(guard); + + match request.sort.field { + sessions::Field::Raw(raw_field) => { + match raw_field { + sessions::RawField::Unspecified => (), + sessions::RawField::SessionId => { + sessions.sort_by(|a, b| a.session_id.cmp(&b.session_id)) + } + sessions::RawField::Status => sessions.sort_by(|a, b| a.status.cmp(&b.status)), + sessions::RawField::ClientSubmission => { + sessions.sort_by(|a, b| a.client_submission.cmp(&b.client_submission)) + } + sessions::RawField::WorkerSubmission => { + sessions.sort_by(|a, b| a.worker_submission.cmp(&b.worker_submission)) + } + sessions::RawField::PartitionIds => { + sessions.sort_by(|a, b| a.partition_ids.cmp(&b.partition_ids)) + } + sessions::RawField::Options => { + return Err(tonic::Status::invalid_argument( + "Field Options is not sortable", + )); + } + sessions::RawField::CreatedAt => sessions + .sort_by(|a, b| crate::utils::cmp_timestamp(a.created_at, b.created_at)), + sessions::RawField::CancelledAt => sessions.sort_by(|a, b| { + crate::utils::cmp_timestamp(a.cancelled_at, b.cancelled_at) + }), + sessions::RawField::ClosedAt => sessions + .sort_by(|a, b| crate::utils::cmp_timestamp(a.closed_at, b.closed_at)), + sessions::RawField::PurgedAt => sessions + .sort_by(|a, b| crate::utils::cmp_timestamp(a.purged_at, b.purged_at)), + sessions::RawField::DeletedAt => sessions + .sort_by(|a, b| crate::utils::cmp_timestamp(a.deleted_at, b.deleted_at)), + sessions::RawField::Duration => { + sessions.sort_by(|a, b| crate::utils::cmp_duration(a.duration, b.duration)) + } + } + } + sessions::Field::TaskOption(task_option_field) => match task_option_field { + armonik::TaskOptionField::Unspecified => (), + armonik::TaskOptionField::MaxDuration => sessions.sort_by(|a, b| { + crate::utils::cmp_duration( + Some(a.default_task_options.max_duration), + Some(b.default_task_options.max_duration), + ) + }), + armonik::TaskOptionField::MaxRetries => sessions.sort_by(|a, b| { + a.default_task_options + .max_retries + .cmp(&b.default_task_options.max_retries) + }), + armonik::TaskOptionField::Priority => sessions.sort_by(|a, b| { + a.default_task_options + .priority + .cmp(&b.default_task_options.priority) + }), + armonik::TaskOptionField::PartitionId => sessions.sort_by(|a, b| { + a.default_task_options + .partition_id + .cmp(&b.default_task_options.partition_id) + }), + armonik::TaskOptionField::ApplicationName => sessions.sort_by(|a, b| { + a.default_task_options + .application_name + .cmp(&b.default_task_options.application_name) + }), + armonik::TaskOptionField::ApplicationVersion => sessions.sort_by(|a, b| { + a.default_task_options + .application_version + .cmp(&b.default_task_options.application_version) + }), + armonik::TaskOptionField::ApplicationNamespace => sessions.sort_by(|a, b| { + a.default_task_options + .application_namespace + .cmp(&b.default_task_options.application_namespace) + }), + armonik::TaskOptionField::ApplicationService => sessions.sort_by(|a, b| { + a.default_task_options + .application_service + .cmp(&b.default_task_options.application_service) + }), + armonik::TaskOptionField::ApplicationEngine => sessions.sort_by(|a, b| { + a.default_task_options + .engine_type + .cmp(&b.default_task_options.engine_type) + }), + }, + sessions::Field::TaskOptionGeneric(key) => { + sessions.sort_by(|a, b| { + a.default_task_options + .options + .get(&key) + .cmp(&b.default_task_options.options.get(&key)) + }); + } + } + + if matches!(request.sort.direction, armonik::SortDirection::Desc) { + sessions.reverse(); + } + + let total = sessions.len() as i32; + + Ok(armonik::sessions::list::Response { + sessions: sessions + .into_iter() + .skip(page * page_size) + .take(page_size) + .collect(), + page: request.page, + page_size: request.page_size, + total, + }) + } + + async fn get( + self: Arc, + request: sessions::get::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + impl_unary!(self.sessions, request, cancellation_token, session) + } + + async fn cancel( + self: Arc, + request: sessions::cancel::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + impl_unary!(self.sessions, request, cancellation_token, session) + } + + async fn create( + self: Arc, + request: sessions::create::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + let n = self.clusters.len(); + let i = self + .counter + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + + let mut err = None; + + for (_, cluster) in self.clusters.iter().cycle().skip(i % n).take(n) { + match run_with_cancellation!(cancellation_token, cluster.client()) { + Ok(client) => { + let response = run_with_cancellation!( + cancellation_token, + client.sessions().call(request.clone()) + ); + + match response { + Ok(response) => return Ok(response), + Err(error) => err = Some(error.into_status()), + } + } + Err(error) => err = Some(error.into_status()), + } + } + + match err { + Some(err) => Err(err), + None => Err(tonic::Status::internal("No cluster")), + } + } + + async fn pause( + self: Arc, + request: sessions::pause::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + impl_unary!(self.sessions, request, cancellation_token, session) + } + + async fn resume( + self: Arc, + request: sessions::resume::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + impl_unary!(self.sessions, request, cancellation_token, session) + } + + async fn close( + self: Arc, + request: sessions::close::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + impl_unary!(self.sessions, request, cancellation_token, session) + } + + async fn purge( + self: Arc, + request: sessions::purge::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + impl_unary!(self.sessions, request, cancellation_token, session) + } + + async fn delete( + self: Arc, + request: sessions::delete::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + let service = self.clone(); + let response = impl_unary!(service.sessions, request, cancellation_token, session)?; + + // If delete is successful, remove the session from the list + let mut guard = + crate::utils::run_with_cancellation!(cancellation_token, self.mapping_session.write()); + + guard.remove(&response.session.session_id); + + Ok(response) + } + + async fn stop_submission( + self: Arc, + request: sessions::stop_submission::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + impl_unary!(self.sessions, request, cancellation_token, session) + } +} diff --git a/load-balancer/src/service/submitter.rs b/load-balancer/src/service/submitter.rs new file mode 100644 index 0000000..521bd20 --- /dev/null +++ b/load-balancer/src/service/submitter.rs @@ -0,0 +1,472 @@ +#![allow(deprecated)] + +use std::{collections::HashMap, sync::Arc}; + +use armonik::{ + reexports::{tokio, tokio_util, tonic}, + server::SubmitterService, + submitter, +}; +use futures::StreamExt as _; + +use crate::utils::{impl_unary, run_with_cancellation, IntoStatus}; + +use super::Service; + +impl SubmitterService for Service { + async fn get_service_configuration( + self: Arc, + _request: submitter::get_service_configuration::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!("SubmitterService::GetServiceConfiguration is deprecated, please use ResultsService::GetServiceConfiguration instead"); + + crate::utils::run_with_cancellation! { + use cancellation_token; + + let mut min = 1 << 24; + + for (_, cluster) in self.clusters.iter() { + let conf = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .results() + .get_service_configuration() + .await + .map_err(IntoStatus::into_status)?; + + min = min.min(conf.data_chunk_max_size); + } + + Ok(submitter::get_service_configuration::Response { + data_chunk_max_size: min, + }) + } + } + + async fn create_session( + self: Arc, + request: submitter::create_session::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!("SubmitterService::CreateSession is deprecated, please use SessionsService::CreateSession instead"); + + let n = self.clusters.len(); + let i = self + .counter + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + + let mut err = None; + + for (_, cluster) in self.clusters.iter().cycle().skip(i % n).take(n) { + match run_with_cancellation!(cancellation_token, cluster.client()) { + Ok(client) => { + let response = run_with_cancellation!( + cancellation_token, + client.submitter().call(request.clone()) + ); + + match response { + Ok(response) => return Ok(response), + Err(error) => err = Some(error.into_status()), + } + } + Err(error) => err = Some(error.into_status()), + } + } + + match err { + Some(err) => Err(err), + None => Err(tonic::Status::internal("No cluster")), + } + } + + async fn cancel_session( + self: Arc, + request: submitter::cancel_session::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!("SubmitterService::CancelSession is deprecated, please use SessionsService::CancelSession instead"); + + impl_unary!(self.submitter, request, cancellation_token, session) + } + + async fn list_tasks( + self: Arc, + request: submitter::list_tasks::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!( + "SubmitterService::ListTasks is deprecated, please use TasksService::ListTasks instead" + ); + + run_with_cancellation! { + use cancellation_token; + + let mut task_ids = Vec::new(); + + for cluster in self.clusters.values() { + let response = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .submitter() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)?; + + task_ids.extend(response.task_ids); + } + + Ok(submitter::list_tasks::Response { task_ids }) + } + } + + async fn list_sessions( + self: Arc, + request: submitter::list_sessions::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!("SubmitterService::ListSessions is deprecated, please use SessionsService::ListSessions instead"); + + run_with_cancellation! { + use cancellation_token; + + let mut session_ids = Vec::new(); + + for cluster in self.clusters.values() { + let response = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .submitter() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)?; + + session_ids.extend(response.session_ids); + } + + Ok(submitter::list_sessions::Response { session_ids }) + } + } + + async fn count_tasks( + self: Arc, + request: submitter::count_tasks::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!( + "SubmitterService::CountTasks is deprecated, please use TasksService::CountTasksByStatus instead" + ); + + run_with_cancellation! { + use cancellation_token; + + let mut status_count = HashMap::::new(); + + for cluster in self.clusters.values() { + let response = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .submitter() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)? + .values; + + for (status, count) in response { + *status_count.entry(status).or_default() += count; + } + } + + Ok(armonik::submitter::count_tasks::Response { + values: status_count, + }) + } + } + + async fn try_get_task_output( + self: Arc, + request: submitter::try_get_task_output::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!( + "SubmitterService::TryGetTaskOutput is deprecated, please use TasksService::GetTask instead" + ); + crate::utils::impl_unary!(self.submitter, request, cancellation_token, session) + } + + async fn wait_for_availability( + self: Arc, + request: submitter::wait_for_availability::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!("SubmitterService::WaitForAvailability is deprecated, please use EventsService::GetEvents instead"); + crate::utils::impl_unary!(self.submitter, request, cancellation_token, session) + } + + async fn wait_for_completion( + self: Arc, + request: submitter::wait_for_completion::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!("SubmitterService::WaitForCompletion is deprecated, please use EventsService::GetEvents instead"); + run_with_cancellation! { + use cancellation_token.clone(); + + let mut status_count = HashMap::new(); + + let mut wait_all = self + .clusters + .values() + .map(|cluster| async { + cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .submitter() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + + while let Some(completion) = wait_all.next().await { + let mut is_error = false; + let mut is_cancelled = false; + for (status, count) in completion?.values { + match status { + armonik::TaskStatus::Error => is_error = true, + armonik::TaskStatus::Cancelling | armonik::TaskStatus::Cancelled => { + is_cancelled = true + } + _ => (), + } + *status_count.entry(status).or_default() += count; + } + + if (is_error && request.stop_on_first_task_error) + || (is_cancelled && request.stop_on_first_task_cancellation) + { + std::mem::drop(wait_all); + + return self + .count_tasks( + armonik::submitter::count_tasks::Request { + filter: request.filter, + }, + cancellation_token, + ) + .await; + } + } + + Ok(armonik::submitter::wait_for_completion::Response { + values: status_count, + }) + } + } + + async fn cancel_tasks( + self: Arc, + request: submitter::cancel_tasks::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!( + "SubmitterService::CancelTasks is deprecated, please use TasksService::CancelTasks instead" + ); + run_with_cancellation! { + use cancellation_token; + + for cluster in self.clusters.values() { + cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .submitter() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)?; + } + + Ok(submitter::cancel_tasks::Response { }) + } + } + + async fn task_status( + self: Arc, + request: submitter::task_status::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!( + "SubmitterService::TaskStatus is deprecated, please use TasksService::ListTasks instead" + ); + run_with_cancellation! { + use cancellation_token; + + let mut task_status = HashMap::::new(); + + for cluster in self.clusters.values() { + let response = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .submitter() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)? + .statuses; + + for (task_id, status) in response { + task_status.insert(task_id, status); + } + } + + Ok(submitter::task_status::Response { + statuses: task_status + }) + } + } + + async fn result_status( + self: Arc, + request: submitter::result_status::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + log::warn!("SubmitterService::ResultStatus is deprecated, please use ResultsService::ListResults instead"); + crate::utils::impl_unary!(self.submitter, request, cancellation_token, session) + } + + async fn try_get_result( + self: Arc, + request: submitter::try_get_result::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> Result< + impl tonic::codegen::tokio_stream::Stream< + Item = Result, + > + Send, + tonic::Status, + > { + log::warn!( + "SubmitterService::TryGetResult is deprecated, please use ResultsService::DownloadResultData instead" + ); + crate::utils::run_with_cancellation! { + use cancellation_token.clone(); + + let Some(cluster) = self.get_cluster_from_session(&request.session_id).await? else { + return Err(tonic::Status::not_found(format!( + "Session {} was not found", + request.session_id + ))); + }; + + let mut stream = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .submitter() + .try_get_result(request.session_id, request.result_id) + .await + .map_err(IntoStatus::into_status)?; + + Ok(async_stream::try_stream! { + while let Some(Some(item)) = cancellation_token.run_until_cancelled(stream.next()).await { + let item = item.map_err(IntoStatus::into_status)?; + yield item; + } + }) + } + } + + async fn create_small_tasks( + self: Arc, + request: submitter::create_tasks::SmallRequest, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> Result { + log::warn!( + "SubmitterService::CreateSmallTasks is deprecated, please use a combination of ResultsService::CreateResults and TasksService::SubmitTasks instead" + ); + crate::utils::impl_unary!(self.submitter, request, cancellation_token, session) + } + + async fn create_large_tasks( + self: Arc, + request: impl tonic::codegen::tokio_stream::Stream< + Item = Result, + > + Send + + 'static, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> Result { + log::warn!( + "SubmitterService::CreateLargeTasks is deprecated, please use a combination of ResultsService::CreateResults and TasksService::SubmitTasks instead" + ); + let mut request = Box::pin(request); + + match crate::utils::run_with_cancellation!(cancellation_token, request.next()) { + Some(Ok(submitter::create_tasks::LargeRequest::InitRequest( + submitter::create_tasks::InitRequest { + session_id, + task_options, + }, + ))) => { + let Some(cluster) = self.get_cluster_from_session(&session_id).await? else { + return Err(tonic::Status::not_found(format!( + "Session {} was not found", + session_id + ))); + }; + + let (tx, rx) = tokio::sync::oneshot::channel(); + let mut tx = Some(tx); + + let stream = async_stream::stream! { + yield submitter::create_tasks::LargeRequest::InitRequest( + submitter::create_tasks::InitRequest { + session_id: session_id.clone(), + task_options: task_options.clone(), + }, + ); + + while let Some(item) = request.next().await { + match item { + Ok(item) => yield item, + Err(err) => { + if let Some(tx) = tx.take() { + _ = tx.send(err); + } + break; + } + } + } + }; + + let mut submitter_client = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .submitter(); + + tokio::select! { + result = submitter_client.create_large_tasks(stream) => match result { + Ok(result) => Ok(armonik::submitter::create_tasks::Response::Status(result)), + Err(err) => Err(err.into_status()), + }, + Ok(invalid) = rx => { + Err(invalid) + } + _ = cancellation_token.cancelled() => Err(tonic::Status::aborted("Cancellation token has been triggered")) + } + } + Some(Ok(_)) => Err(tonic::Status::invalid_argument( + "Could not create tasks, data sent before identifier", + )), + Some(Err(err)) => Err(err), + None => Err(tonic::Status::invalid_argument( + "Could not create tasks, no identifier nor data sent", + )), + } + } +} diff --git a/load-balancer/src/service/tasks.rs b/load-balancer/src/service/tasks.rs new file mode 100644 index 0000000..b74e6f3 --- /dev/null +++ b/load-balancer/src/service/tasks.rs @@ -0,0 +1,260 @@ +use std::{collections::HashMap, sync::Arc}; + +use armonik::{ + reexports::{tokio_util, tonic}, + server::TasksService, + tasks, +}; + +use crate::utils::{run_with_cancellation, IntoStatus}; + +use super::Service; + +impl TasksService for Service { + async fn list( + self: Arc, + request: tasks::list::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut requested_sessions = Vec::new(); + + for and in &request.filters.or { + let mut has_check = false; + + for field in and { + if let armonik::tasks::filter::Field { + field: armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), + condition: + armonik::tasks::filter::Condition::String(armonik::FilterString { + value, + operator: armonik::FilterStringOperator::Equal, + }), + } = field + { + requested_sessions.push(value.as_str()); + has_check = true; + } + } + + if !has_check { + return Err(armonik::reexports::tonic::Status::invalid_argument(String::from("Cannot determine the cluster from the filter, missing condition on session_id"))); + } + } + + let mut sessions = self + .get_cluster_from_sessions(&requested_sessions) + .await? + .into_iter(); + + let Some((cluster, _)) = sessions.next() else { + return Ok(tasks::list::Response { + tasks: Vec::new(), + page: request.page, + page_size: request.page_size, + total: 0, + }); + }; + + if sessions.next().is_some() { + return Err(tonic::Status::invalid_argument( + "Cannot determine the cluster from the filter, multiple clusters targeted", + )); + } + + match cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .tasks() + .call(request) + .await + { + Ok(response) => Ok(response), + Err(err) => match err { + armonik::client::RequestError::Grpc { source, .. } => Err(*source), + err => Err(tonic::Status::internal(err.to_string())), + }, + } + } + } + + async fn list_detailed( + self: Arc, + request: tasks::list_detailed::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut requested_sessions = Vec::new(); + + for and in &request.filters.or { + let mut has_check = false; + + for field in and { + if let armonik::tasks::filter::Field { + field: armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), + condition: + armonik::tasks::filter::Condition::String(armonik::FilterString { + value, + operator: armonik::FilterStringOperator::Equal, + }), + } = field + { + requested_sessions.push(value.as_str()); + has_check = true; + } + } + + if !has_check { + return Err(armonik::reexports::tonic::Status::invalid_argument(String::from("Cannot determine the cluster from the filter, missing condition on session_id"))); + } + } + + let mut sessions = self + .get_cluster_from_sessions(&requested_sessions) + .await? + .into_iter(); + + let Some((cluster, _)) = sessions.next() else { + return Ok(tasks::list_detailed::Response { + tasks: Vec::new(), + page: request.page, + page_size: request.page_size, + total: 0, + }); + }; + + if sessions.next().is_some() { + return Err(tonic::Status::invalid_argument( + "Cannot determine the cluster from the filter, multiple clusters targeted", + )); + } + + match cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .tasks() + .call(request) + .await + { + Ok(response) => Ok(response), + Err(err) => match err { + armonik::client::RequestError::Grpc { source, .. } => Err(*source), + err => Err(tonic::Status::internal(err.to_string())), + }, + } + } + } + + async fn get( + self: Arc, + request: tasks::get::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + crate::utils::impl_unary!(self.tasks, request, cancellation_token, task) + } + + async fn cancel( + self: Arc, + request: tasks::cancel::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut tasks = Vec::new(); + for cluster in self.clusters.values() { + tasks.extend( + cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .tasks() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)? + .tasks + .into_iter(), + ); + } + + Ok(tasks::cancel::Response { tasks }) + } + } + + async fn get_result_ids( + self: Arc, + request: tasks::get_result_ids::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut task_results = HashMap::>::new(); + for cluster in self.clusters.values() { + let response = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .tasks() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)?; + + for (task_id, result_ids) in response.task_results { + task_results.entry(task_id).or_default().extend(result_ids); + } + } + + Ok(tasks::get_result_ids::Response { task_results }) + } + } + + async fn count_status( + self: Arc, + request: tasks::count_status::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut status = HashMap::::new(); + + for cluster in self.clusters.values() { + let response = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .tasks() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)? + .status; + + for count in response { + *status.entry(count.status).or_default() += count.count; + } + } + + Ok(armonik::tasks::count_status::Response { + status: status + .into_iter() + .map(|(status, count)| armonik::StatusCount { status, count }) + .collect(), + }) + } + } + + async fn submit( + self: Arc, + request: tasks::submit::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + crate::utils::impl_unary!(self.tasks, request, cancellation_token, session) + } +} diff --git a/load-balancer/src/service/versions.rs b/load-balancer/src/service/versions.rs new file mode 100644 index 0000000..8f8adfe --- /dev/null +++ b/load-balancer/src/service/versions.rs @@ -0,0 +1,52 @@ +use std::sync::Arc; + +use armonik::{ + reexports::{tokio_util, tonic}, + server::VersionsService, + versions, +}; + +use crate::utils::{run_with_cancellation, IntoStatus}; + +use super::Service; + +impl VersionsService for Service { + async fn list( + self: Arc, + _request: versions::list::Request, + cancellation_token: tokio_util::sync::CancellationToken, + ) -> std::result::Result { + run_with_cancellation! { + use cancellation_token; + + let mut cluster_versions = Vec::new(); + + for cluster in self.clusters.values() { + let versions = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .versions() + .list() + .await + .map_err(IntoStatus::into_status)?; + + cluster_versions.push(versions); + } + + let mut cluster_versions = cluster_versions.into_iter(); + + let Some(versions) = cluster_versions.next() else { + return Err(tonic::Status::internal("No cluster")); + }; + + for other in cluster_versions { + if versions != other { + return Err(tonic::Status::internal("Mismatch between clusters")); + } + } + + Ok(versions) + } + } +} diff --git a/load-balancer/src/utils.rs b/load-balancer/src/utils.rs new file mode 100644 index 0000000..4a0104c --- /dev/null +++ b/load-balancer/src/utils.rs @@ -0,0 +1,412 @@ +use std::cmp::Ordering; + +use armonik::reexports::{prost_types, tonic::Status}; + +macro_rules! run_with_cancellation { + (use $ct:expr; $($body:tt)*) => { + crate::utils::run_with_cancellation!($ct, async move { $($body)* }) + }; + ($ct:expr, $fut:expr) => { + match $ct.run_until_cancelled($fut).await { + Some(res) => res, + None => { + Err(tonic::Status::aborted("Cancellation token has been triggered"))? + } + } + } +} +pub(crate) use run_with_cancellation; + +macro_rules! impl_unary { + ($self:ident.$service:ident, $request:ident, $ct:ident, session) => { + crate::utils::impl_unary!($self.$service, $request, $ct, {get_cluster_from_session, session_id, "Session {} was not found"}) + }; + ($self:ident.$service:ident, $request:ident, $ct:ident, result) => { + crate::utils::impl_unary!($self.$service, $request, $ct, {get_cluster_from_result, result_id, "Result {} was not found"}) + }; + ($self:ident.$service:ident, $request:ident, $ct:ident, task) => { + crate::utils::impl_unary!($self.$service, $request, $ct, {get_cluster_from_task, task_id, "Task {} was not found"}) + }; + + ($self:ident.$service:ident, $request:ident, $ct:ident, {$get_cluster:ident, $id:ident, $msg:literal}) => { + crate::utils::run_with_cancellation! { + use $ct; + + let Some(cluster) = $self.$get_cluster(&$request.$id).await? else { + return Err(tonic::Status::not_found(format!( + $msg, + $request.$id + ))); + }; + + cluster + .client() + .await + .map_err(crate::utils::IntoStatus::into_status)? + .$service() + .call($request) + .await + .map_err(crate::utils::IntoStatus::into_status) + } + }; +} +pub(crate) use impl_unary; + +pub trait IntoStatus { + fn into_status(self) -> Status; +} + +impl IntoStatus for armonik::client::RequestError { + fn into_status(self) -> Status { + match self { + armonik::client::RequestError::Grpc { source, .. } => *source, + err => Status::internal(err.to_string()), + } + } +} + +impl IntoStatus for armonik::client::ConnectionError { + fn into_status(self) -> Status { + Status::unavailable(self.to_string()) + } +} + +impl IntoStatus for armonik::client::ConfigError { + fn into_status(self) -> Status { + Status::internal(self.to_string()) + } +} + +impl IntoStatus for armonik::client::ReadEnvError { + fn into_status(self) -> Status { + Status::internal(self.to_string()) + } +} + +pub(crate) fn filter_match_string(value: &str, condition: &armonik::FilterString) -> bool { + match condition.operator { + armonik::FilterStringOperator::Equal => condition.value == value, + armonik::FilterStringOperator::NotEqual => condition.value != value, + armonik::FilterStringOperator::Contains => condition.value.contains(value), + armonik::FilterStringOperator::NotContains => !condition.value.contains(value), + armonik::FilterStringOperator::StartsWith => condition.value.starts_with(value), + armonik::FilterStringOperator::EndsWith => condition.value.ends_with(value), + } +} + +pub(crate) fn filter_match_number(value: i64, condition: &armonik::FilterNumber) -> bool { + match condition.operator { + armonik::FilterNumberOperator::Equal => value == condition.value, + armonik::FilterNumberOperator::NotEqual => value != condition.value, + armonik::FilterNumberOperator::LessThan => value < condition.value, + armonik::FilterNumberOperator::LessThanOrEqual => value <= condition.value, + armonik::FilterNumberOperator::GreaterThanOrEqual => value >= condition.value, + armonik::FilterNumberOperator::GreaterThan => value > condition.value, + } +} + +pub(crate) fn filter_match_bool(value: bool, condition: &armonik::FilterBoolean) -> bool { + match condition.operator { + armonik::FilterBooleanOperator::Is => value == condition.value, + } +} + +pub(crate) fn filter_match_array( + value: impl IntoIterator>, + condition: &armonik::FilterArray, +) -> bool { + let contains = value.into_iter().any(|s| s.as_ref() == condition.value); + match condition.operator { + armonik::FilterArrayOperator::Contains => contains, + armonik::FilterArrayOperator::NotContains => !contains, + } +} + +pub(crate) fn filter_match_status( + value: &T, + condition: &armonik::FilterStatus, +) -> bool { + match condition.operator { + armonik::FilterStatusOperator::Equal => *value == condition.value, + armonik::FilterStatusOperator::NotEqual => *value != condition.value, + } +} + +pub(crate) fn filter_match_duration( + value: Option, + condition: &armonik::FilterDuration, +) -> bool { + let Some(value) = value else { + return matches!( + condition.operator, + armonik::FilterDurationOperator::NotEqual + ); + }; + let prost_types::Duration { seconds, nanos } = value; + let lhs = (seconds, nanos); + + let prost_types::Duration { seconds, nanos } = condition.value; + let rhs = (seconds, nanos); + + match condition.operator { + armonik::FilterDurationOperator::Equal => lhs == rhs, + armonik::FilterDurationOperator::NotEqual => lhs != rhs, + armonik::FilterDurationOperator::ShorterThan => lhs < rhs, + armonik::FilterDurationOperator::ShorterThanOrEqual => lhs <= rhs, + armonik::FilterDurationOperator::LongerThanOrEqual => lhs >= rhs, + armonik::FilterDurationOperator::LongerThan => lhs > rhs, + } +} + +pub(crate) fn filter_match_date( + value: Option, + condition: &armonik::FilterDate, +) -> bool { + let Some(value) = value else { + return matches!(condition.operator, armonik::FilterDateOperator::NotEqual); + }; + + let prost_types::Timestamp { seconds, nanos } = value; + let lhs = (seconds, nanos); + + let prost_types::Timestamp { seconds, nanos } = condition.value; + let rhs = (seconds, nanos); + + match condition.operator { + armonik::FilterDateOperator::Equal => lhs == rhs, + armonik::FilterDateOperator::NotEqual => lhs != rhs, + armonik::FilterDateOperator::Before => lhs < rhs, + armonik::FilterDateOperator::BeforeOrEqual => lhs <= rhs, + armonik::FilterDateOperator::AfterOrEqual => lhs >= rhs, + armonik::FilterDateOperator::After => lhs > rhs, + } +} + +pub(crate) fn filter_match_session( + value: &armonik::sessions::Raw, + condition: &armonik::sessions::filter::Field, +) -> Result { + match &condition.field { + armonik::sessions::Field::Raw(raw_field) => match raw_field { + armonik::sessions::RawField::Unspecified => { + Err(Status::invalid_argument("Filter field is not set")) + } + armonik::sessions::RawField::SessionId => match &condition.condition { + armonik::sessions::filter::Condition::String(filter_string) => { + Ok(filter_match_string(&value.session_id, filter_string)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field SessionId" + ))), + }, + armonik::sessions::RawField::Status => match &condition.condition { + armonik::sessions::filter::Condition::Status(filter_status) => { + Ok(filter_match_status(&value.status, filter_status)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field Status" + ))), + }, + armonik::sessions::RawField::ClientSubmission => match &condition.condition { + armonik::sessions::filter::Condition::Boolean(filter_bool) => { + Ok(filter_match_bool(value.client_submission, filter_bool)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field ClientSubmission" + ))), + }, + armonik::sessions::RawField::WorkerSubmission => match &condition.condition { + armonik::sessions::filter::Condition::Boolean(filter_bool) => { + Ok(filter_match_bool(value.worker_submission, filter_bool)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field WorkerSubmission" + ))), + }, + armonik::sessions::RawField::PartitionIds => match &condition.condition { + armonik::sessions::filter::Condition::Array(filter_array) => { + Ok(filter_match_array(&value.partition_ids, filter_array)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field PartitionIds" + ))), + }, + armonik::sessions::RawField::Options => Err(Status::invalid_argument( + "Filter field Options is not valid for a RawField filter", + )), + armonik::sessions::RawField::CreatedAt => match &condition.condition { + armonik::sessions::filter::Condition::Date(filter_date) => { + Ok(filter_match_date(value.created_at, filter_date)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field CreatedAt" + ))), + }, + armonik::sessions::RawField::CancelledAt => match &condition.condition { + armonik::sessions::filter::Condition::Date(filter_date) => { + Ok(filter_match_date(value.cancelled_at, filter_date)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field CancelledAt" + ))), + }, + armonik::sessions::RawField::ClosedAt => match &condition.condition { + armonik::sessions::filter::Condition::Date(filter_date) => { + Ok(filter_match_date(value.closed_at, filter_date)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field ClosedAt" + ))), + }, + armonik::sessions::RawField::PurgedAt => match &condition.condition { + armonik::sessions::filter::Condition::Date(filter_date) => { + Ok(filter_match_date(value.purged_at, filter_date)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field PurgedAt" + ))), + }, + armonik::sessions::RawField::DeletedAt => match &condition.condition { + armonik::sessions::filter::Condition::Date(filter_date) => { + Ok(filter_match_date(value.deleted_at, filter_date)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DeletedAt" + ))), + }, + armonik::sessions::RawField::Duration => match &condition.condition { + armonik::sessions::filter::Condition::Duration(filter_duration) => { + Ok(filter_match_duration(value.duration, filter_duration)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field Duration" + ))), + }, + }, + armonik::sessions::Field::TaskOption(task_option_field) => match task_option_field { + armonik::TaskOptionField::Unspecified => { + Err(Status::invalid_argument("Filter field is not set")) + } + armonik::TaskOptionField::MaxDuration => match &condition.condition { + armonik::sessions::filter::Condition::Duration(filter_duration) => { + Ok(filter_match_duration(Some(value.default_task_options.max_duration), filter_duration)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.MaxDuration" + ))), + }, + armonik::TaskOptionField::MaxRetries => match &condition.condition { + armonik::sessions::filter::Condition::Number(filter_number) => { + Ok(filter_match_number(value.default_task_options.max_retries as i64, filter_number)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.MaxRetries" + ))), + }, + armonik::TaskOptionField::Priority => match &condition.condition { + armonik::sessions::filter::Condition::Number(filter_number) => { + Ok(filter_match_number(value.default_task_options.priority as i64, filter_number)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.Priority" + ))), + }, + armonik::TaskOptionField::PartitionId => match &condition.condition { + armonik::sessions::filter::Condition::String(filter_string) => { + Ok(filter_match_string(&value.default_task_options.partition_id, filter_string)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.PartitionId" + ))), + }, + armonik::TaskOptionField::ApplicationName => match &condition.condition { + armonik::sessions::filter::Condition::String(filter_string) => { + Ok(filter_match_string(&value.default_task_options.application_name, filter_string)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationName" + ))), + }, + armonik::TaskOptionField::ApplicationVersion => match &condition.condition { + armonik::sessions::filter::Condition::String(filter_string) => { + Ok(filter_match_string(&value.default_task_options.application_version, filter_string)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationVersion" + ))), + }, + armonik::TaskOptionField::ApplicationNamespace => match &condition.condition { + armonik::sessions::filter::Condition::String(filter_string) => { + Ok(filter_match_string(&value.default_task_options.application_namespace, filter_string)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationNamespace" + ))), + }, + armonik::TaskOptionField::ApplicationService => match &condition.condition { + armonik::sessions::filter::Condition::String(filter_string) => { + Ok(filter_match_string(&value.default_task_options.application_service, filter_string)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationService" + ))), + }, + armonik::TaskOptionField::ApplicationEngine => match &condition.condition { + armonik::sessions::filter::Condition::String(filter_string) => { + Ok(filter_match_string(&value.default_task_options.engine_type, filter_string)) + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationEngine" + ))), + }, + }, + armonik::sessions::Field::TaskOptionGeneric(field) => match &condition.condition { + armonik::sessions::filter::Condition::String(filter_string) => { + if let Some(value) = value.default_task_options.options.get(field) { + Ok(filter_match_string(value, filter_string)) + } else { + Ok(false) + } + } + condition => Err(Status::invalid_argument(format!( + "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationEngine" + ))), + }, + } +} + +pub(crate) fn cmp_duration( + lhs: Option, + rhs: Option, +) -> Ordering { + match (lhs, rhs) { + (None, None) => Ordering::Equal, + (None, Some(_)) => Ordering::Less, + (Some(_), None) => Ordering::Greater, + (Some(lhs), Some(rhs)) => { + let cmp = lhs.seconds.cmp(&rhs.seconds); + if cmp.is_eq() { + return cmp; + } + lhs.nanos.cmp(&rhs.nanos) + } + } +} + +pub(crate) fn cmp_timestamp( + lhs: Option, + rhs: Option, +) -> Ordering { + match (lhs, rhs) { + (None, None) => Ordering::Equal, + (None, Some(_)) => Ordering::Less, + (Some(_), None) => Ordering::Greater, + (Some(lhs), Some(rhs)) => { + let cmp = lhs.seconds.cmp(&rhs.seconds); + if cmp.is_eq() { + return cmp; + } + lhs.nanos.cmp(&rhs.nanos) + } + } +} From 6c85db0d4e78198b31c79b09dff51d5ca5484f61 Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Fri, 13 Dec 2024 18:09:26 +0100 Subject: [PATCH 02/12] CLI --- Cargo.lock | 19 ++++++ load-balancer/Cargo.toml | 3 + load-balancer/src/main.rs | 112 ++++++++++++++++++++++++++++--- load-balancer/src/service/mod.rs | 3 +- 4 files changed, 125 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4a3868d..c4a6947 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -687,6 +687,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fastrand" version = "2.2.0" @@ -1173,6 +1183,12 @@ dependencies = [ "icu_properties", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "1.9.3" @@ -1446,10 +1462,13 @@ version = "0.1.0" dependencies = [ "armonik", "async-stream", + "clap", "config", "env_logger", + "eyre", "futures", "log", + "serde", "tokio", "tower-http", ] diff --git a/load-balancer/Cargo.toml b/load-balancer/Cargo.toml index c76b398..e3f5c23 100644 --- a/load-balancer/Cargo.toml +++ b/load-balancer/Cargo.toml @@ -12,3 +12,6 @@ log = "0.4" env_logger = "0.11" tokio = { version = "1.0", features = ["full"] } tower-http = { version = "0.5", features = ["trace"] } +serde = { version = "1.0", features = ["derive"] } +clap = { version = "4.5", features = ["derive"] } +eyre = "0.6" diff --git a/load-balancer/src/main.rs b/load-balancer/src/main.rs index 97d8e5f..303a852 100644 --- a/load-balancer/src/main.rs +++ b/load-balancer/src/main.rs @@ -1,6 +1,8 @@ -use std::sync::Arc; +use std::{collections::HashMap, net::SocketAddr, sync::Arc}; use armonik::reexports::tonic; +use clap::Parser; +use serde::{Deserialize, Serialize}; use tower_http::trace::TraceLayer; pub mod cluster; @@ -8,6 +10,67 @@ pub mod ref_guard; pub mod service; pub mod utils; +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct ClusterConfig { + /// Endpoint for sending requests + pub endpoint: String, + /// Path to the certificate file in pem format + #[serde(default)] + pub cert_pem: String, + /// Path to the key file in pem format + #[serde(default)] + pub key_pem: String, + /// Path to the Certificate Authority file in pem format + #[serde(default)] + pub ca_cert: String, + /// Allow unsafe connections to the endpoint (without SSL), defaults to false + #[serde(default)] + pub allow_unsafe_connection: bool, + /// Override the endpoint name during SSL verification + #[serde(default)] + pub override_target_name: String, +} + +impl From for armonik::client::ClientConfigArgs { + fn from( + ClusterConfig { + endpoint, + cert_pem, + key_pem, + ca_cert, + allow_unsafe_connection, + override_target_name, + }: ClusterConfig, + ) -> Self { + armonik::client::ClientConfigArgs { + endpoint, + cert_pem, + key_pem, + ca_cert, + allow_unsafe_connection, + override_target_name, + } + } +} + +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +pub struct LbConfig { + pub clusters: HashMap, + #[serde(default)] + pub listen_ip: String, + #[serde(default)] + pub listen_port: u16, + #[serde(default)] + pub refresh_delay: String, +} + +#[derive(Debug, Clone, Default, PartialEq, Eq, Parser)] +pub struct Cli { + /// Filename of the config file + #[arg(short, long, default_value = "")] + pub config: String, +} + /// Wait for termination signal (either SIGINT or SIGTERM) #[cfg(unix)] async fn wait_terminate() { @@ -75,16 +138,40 @@ async fn wait_terminate() { } #[tokio::main] -async fn main() { +async fn main() -> Result<(), eyre::Report> { env_logger::init(); - let service = Arc::new( - service::Service::new([( - String::from("A"), - cluster::Cluster::new(armonik::ClientConfig::from_env().unwrap()), - )]) - .await, - ); + let cli = Cli::parse(); + + let mut conf = config::Config::builder() + .add_source( + config::Environment::with_prefix("LoadBalancer") + .convert_case(config::Case::Snake) + .separator("__"), + ) + .set_default("listen_ip", "0.0.0.0")? + .set_default("listen_port", 8081)? + .set_default("refresh_delay", "10")?; + + if !cli.config.is_empty() { + conf = conf.add_source(config::File::with_name(&cli.config)); + } + + let conf: LbConfig = conf.build()?.try_deserialize()?; + + let mut clusters = HashMap::with_capacity(conf.clusters.len()); + + for (name, cluster_config) in conf.clusters { + clusters.insert( + name, + cluster::Cluster::new(armonik::ClientConfig::from_config_args( + cluster_config.into(), + )?), + ); + } + + let service = Arc::new(service::Service::new(clusters).await); + let refresh_delay = std::time::Duration::from_secs_f64(conf.refresh_delay.parse()?); let router = tonic::transport::Server::builder() .layer(TraceLayer::new_for_grpc()) @@ -131,7 +218,7 @@ async fn main() { let service = service.clone(); async move { - let mut timer = tokio::time::interval(tokio::time::Duration::from_secs(10)); + let mut timer = tokio::time::interval(refresh_delay); timer.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); loop { @@ -143,7 +230,8 @@ async fn main() { } }); - let mut service_future = tokio::spawn(router.serve("0.0.0.0:1337".parse().unwrap())); + let mut service_future = + tokio::spawn(router.serve(SocketAddr::new(conf.listen_ip.parse()?, conf.listen_port))); log::info!("Application running"); @@ -176,4 +264,6 @@ async fn main() { _ = service_future.await; log::info!("Application stopped"); + + Ok(()) } diff --git a/load-balancer/src/service/mod.rs b/load-balancer/src/service/mod.rs index 769872c..fcf5eb1 100644 --- a/load-balancer/src/service/mod.rs +++ b/load-balancer/src/service/mod.rs @@ -292,7 +292,8 @@ impl Service { } pub async fn update_sessions(&self) -> Result<(), Status> { - for cluster in self.clusters.values() { + for (name, cluster) in &self.clusters { + log::debug!("Refreshing sessions from {}\n {:?}", name, cluster); let mut stream = std::pin::pin!( cluster .client() From 20990cd5a47a1b92289b82cc6f76e070cf8d4505 Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Tue, 31 Dec 2024 00:10:32 +0100 Subject: [PATCH 03/12] Add in-memory sqlite --- Cargo.lock | 90 +++- load-balancer/Cargo.toml | 4 + load-balancer/src/async_pool.rs | 118 +++++ load-balancer/src/main.rs | 1 + load-balancer/src/ref_guard.rs | 80 +++- load-balancer/src/service/mod.rs | 188 ++++++-- load-balancer/src/service/sessions.rs | 646 +++++++++++++++++++++----- load-balancer/src/utils.rs | 336 +------------- 8 files changed, 944 insertions(+), 519 deletions(-) create mode 100644 load-balancer/src/async_pool.rs diff --git a/Cargo.lock b/Cargo.lock index c4a6947..ac1c8c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -543,6 +543,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.20" @@ -697,6 +706,18 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fastrand" version = "2.2.0" @@ -904,6 +925,15 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "heck" version = "0.5.0" @@ -1444,6 +1474,17 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "libsqlite3-sys" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -1467,9 +1508,13 @@ dependencies = [ "env_logger", "eyre", "futures", + "lockfree-object-pool", "log", + "rusqlite", "serde", + "serde_json", "tokio", + "tokio-rusqlite", "tower-http", ] @@ -1483,6 +1528,12 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lockfree-object-pool" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9374ef4228402d4b7e403e5838cb880d9ee663314b0a900d5a6aabf0c213552e" + [[package]] name = "log" version = "0.4.22" @@ -1760,6 +1811,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + [[package]] name = "ppv-lite86" version = "0.2.20" @@ -2039,6 +2096,20 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "rusqlite" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" +dependencies = [ + "bitflags", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink 0.9.1", + "libsqlite3-sys", + "smallvec", +] + [[package]] name = "rust-ini" version = "0.20.0" @@ -2534,6 +2605,17 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "tokio-rusqlite" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b65501378eb676f400c57991f42cbd0986827ab5c5200c53f206d710fb32a945" +dependencies = [ + "crossbeam-channel", + "rusqlite", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.1" @@ -2815,6 +2897,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.5" @@ -3086,7 +3174,7 @@ checksum = "8902160c4e6f2fb145dbe9d6760a75e3c9522d8bf796ed7047c85919ac7115f8" dependencies = [ "arraydeque", "encoding_rs", - "hashlink", + "hashlink 0.8.4", ] [[package]] diff --git a/load-balancer/Cargo.toml b/load-balancer/Cargo.toml index e3f5c23..8d611e0 100644 --- a/load-balancer/Cargo.toml +++ b/load-balancer/Cargo.toml @@ -15,3 +15,7 @@ tower-http = { version = "0.5", features = ["trace"] } serde = { version = "1.0", features = ["derive"] } clap = { version = "4.5", features = ["derive"] } eyre = "0.6" +rusqlite = { version = "0.32", features = ["bundled"] } +tokio-rusqlite = { version = "0.6", features = ["bundled"] } +lockfree-object-pool = "0.1.6" +serde_json = "1.0" diff --git a/load-balancer/src/async_pool.rs b/load-balancer/src/async_pool.rs new file mode 100644 index 0000000..8c0a627 --- /dev/null +++ b/load-balancer/src/async_pool.rs @@ -0,0 +1,118 @@ +use std::{ + future::{Future, IntoFuture}, + pin::Pin, +}; + +use lockfree_object_pool::{LinearObjectPool, LinearReusable}; +use tokio_rusqlite::{Connection, Params}; + +use crate::ref_guard::RefGuard; + +pub enum PoolAwaitable { + Pending(Pin + Send + Sync>>), + Ready(T), + Unreachable, +} + +pub enum PoolFuture<'a, T> { + Pending( + Pin + Send + Sync>>, + &'a mut PoolAwaitable, + ), + Ready(&'a mut T), + Unreachable, +} + +impl<'a, T> IntoFuture for &'a mut PoolAwaitable { + type Output = &'a mut T; + type IntoFuture = PoolFuture<'a, T>; + + fn into_future(self) -> Self::IntoFuture { + match self { + PoolAwaitable::Pending(_) => { + match std::mem::replace(self, PoolAwaitable::Unreachable) { + PoolAwaitable::Pending(fut) => PoolFuture::Pending(fut, self), + _ => unreachable!(), + } + } + PoolAwaitable::Ready(val) => PoolFuture::Ready(val), + PoolAwaitable::Unreachable => unreachable!(), + } + } +} + +impl<'a, T> Future for PoolFuture<'a, T> { + type Output = &'a mut T; + + fn poll( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let this = self.get_mut(); + match std::mem::replace(this, PoolFuture::Unreachable) { + PoolFuture::Pending(mut future, awaitable) => match future.as_mut().poll(cx) { + std::task::Poll::Ready(val) => { + *awaitable = PoolAwaitable::Ready(val); + match awaitable { + PoolAwaitable::Ready(val) => std::task::Poll::Ready(val), + _ => unreachable!(), + } + } + std::task::Poll::Pending => { + *this = PoolFuture::Pending(future, awaitable); + std::task::Poll::Pending + } + }, + PoolFuture::Ready(val) => std::task::Poll::Ready(val), + PoolFuture::Unreachable => unreachable!(), + } + } +} + +pub struct AsyncPool(LinearObjectPool>); + +impl AsyncPool { + pub fn new(create: Func) -> Self + where + Func: Fn() -> Fut, + Func: Clone + Send + Sync + 'static, + Fut: Future + Send + Sync + 'static, + { + AsyncPool(LinearObjectPool::new( + move || PoolAwaitable::Pending(Box::pin(create())), + |_| (), + )) + } + + pub async fn pull(&self) -> RefGuard>, &mut T> { + RefGuard::new_deref_mut(self.0.pull()).map_await().await + } +} + +impl AsyncPool { + pub async fn execute_batch(&self, sql: &str) -> Result<(), rusqlite::Error> { + let sql = sql.to_owned(); + self.pull() + .await + .call_unwrap(move |conn| conn.execute_batch(&sql)) + .await + } + pub async fn execute( + &self, + sql: &str, + params: impl Params + Send + 'static, + ) -> Result { + let sql = sql.to_owned(); + self.pull() + .await + .call_unwrap(move |conn| conn.execute(&sql, params)) + .await + } + + pub async fn call( + &self, + f: impl FnOnce(&mut rusqlite::Connection) -> Out + Send + 'static, + ) -> Out { + self.pull().await.call_unwrap(f).await + } +} diff --git a/load-balancer/src/main.rs b/load-balancer/src/main.rs index 303a852..9a2c465 100644 --- a/load-balancer/src/main.rs +++ b/load-balancer/src/main.rs @@ -5,6 +5,7 @@ use clap::Parser; use serde::{Deserialize, Serialize}; use tower_http::trace::TraceLayer; +pub mod async_pool; pub mod cluster; pub mod ref_guard; pub mod service; diff --git a/load-balancer/src/ref_guard.rs b/load-balancer/src/ref_guard.rs index d5a1e89..f3fb517 100644 --- a/load-balancer/src/ref_guard.rs +++ b/load-balancer/src/ref_guard.rs @@ -1,4 +1,5 @@ use std::{ + future::{Future, IntoFuture}, mem::ManuallyDrop, ops::{Deref, DerefMut}, }; @@ -51,6 +52,22 @@ impl RefGuard { } impl RefGuard { + unsafe fn into_parts(mut self) -> (G, T) { + unsafe { + let ref_guard = ( + ManuallyDrop::take(&mut self.guard), + ManuallyDrop::take(&mut self.reference), + ); + std::mem::forget(self); + ref_guard + } + } + unsafe fn from_parts(guard: G, reference: T) -> Self { + Self { + guard: ManuallyDrop::new(guard), + reference: ManuallyDrop::new(reference), + } + } pub fn get(&self) -> &T { let x = self.reference.deref(); x @@ -60,18 +77,33 @@ impl RefGuard { self.reference.deref_mut() } - pub fn map(mut self, f: impl FnOnce(T) -> U) -> RefGuard { - let (guard, reference) = unsafe { - let ref_guard = ( - ManuallyDrop::take(&mut self.guard), - ManuallyDrop::take(&mut self.reference), - ); - std::mem::forget(self); - ref_guard - }; - RefGuard { - guard: ManuallyDrop::new(guard), - reference: ManuallyDrop::new(f(reference)), + pub fn map(self, f: impl FnOnce(T) -> U) -> RefGuard { + unsafe { + let (guard, reference) = self.into_parts(); + RefGuard::from_parts(guard, f(reference)) + } + } + + pub async fn map_async(self, f: Func) -> RefGuard + where + Func: FnOnce(T) -> Fut, + Fut: Future, + Out: Unpin, + { + unsafe { + let (guard, reference) = self.into_parts(); + RefGuard::from_parts(guard, f(reference).await) + } + } + + pub async fn map_await(self) -> RefGuard::Output> + where + T: IntoFuture, + ::Output: Unpin, + { + unsafe { + let (guard, reference) = self.into_parts(); + RefGuard::from_parts(guard, reference.await) } } } @@ -90,14 +122,32 @@ impl<'a, G: Unpin, T: Unpin + DerefMut> RefGuard { } } -impl Deref for RefGuard { +impl RefGuard> { + pub fn into_option(self) -> Option> { + unsafe { + let (guard, reference) = self.into_parts(); + reference.map(|reference| RefGuard::from_parts(guard, reference)) + } + } +} + +impl RefGuard> { + pub fn into_result(self) -> Result, E> { + unsafe { + let (guard, reference) = self.into_parts(); + reference.map(|reference| RefGuard::from_parts(guard, reference)) + } + } +} + +impl Deref for RefGuard { type Target = T; fn deref(&self) -> &Self::Target { self.get() } } -impl Deref for RefGuard { +impl Deref for RefGuard { type Target = T; fn deref(&self) -> &Self::Target { @@ -105,7 +155,7 @@ impl Deref for RefGuard { } } -impl DerefMut for RefGuard { +impl DerefMut for RefGuard { fn deref_mut(&mut self) -> &mut Self::Target { self.get_mut() } diff --git a/load-balancer/src/service/mod.rs b/load-balancer/src/service/mod.rs index fcf5eb1..739e00c 100644 --- a/load-balancer/src/service/mod.rs +++ b/load-balancer/src/service/mod.rs @@ -1,13 +1,16 @@ #![allow(clippy::mutable_key_type)] use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, sync::{atomic::AtomicUsize, Arc}, }; +use sessions::Session; +use tokio_rusqlite::Connection; + use armonik::reexports::{tokio::sync::RwLock, tokio_stream::StreamExt, tonic::Status}; -use crate::{cluster::Cluster, utils::IntoStatus}; +use crate::{async_pool::AsyncPool, cluster::Cluster, utils::IntoStatus}; mod applications; mod auth; @@ -22,7 +25,7 @@ mod versions; pub struct Service { clusters: HashMap>, - mapping_session: RwLock, armonik::sessions::Raw)>>, + db: AsyncPool, mapping_result: RwLock>>, mapping_task: RwLock>>, counter: AtomicUsize, @@ -30,55 +33,165 @@ pub struct Service { impl Service { pub async fn new(clusters: impl IntoIterator) -> Self { + let pool = AsyncPool::new(|| async { + Connection::open("file::memory:?cache=shared") + .await + .unwrap() + }); + pool.execute_batch( + "BEGIN; + CREATE TABLE session( + session_id TEXT PRIMARY KEY NOT NULL, + cluster TEXT NOT NULL, + status TINYINT NOT NULL, + client_submission BOOL NOT NULL, + worker_submission BOOL NOT NULL, + partition_ids JSONB, + default_task_options JSONB, + created_at REAL, + cancelled_at REAL, + closed_at REAL, + purged_at REAL, + deleted_at REAL, + duration REAL + ); + CREATE INDEX session_status ON session(status); + CREATE INDEX session_client_submission ON session(client_submission); + CREATE INDEX session_worker_submission ON session(worker_submission); + CREATE INDEX session_created_at ON session(created_at); + CREATE INDEX session_cancelled_at ON session(cancelled_at); + CREATE INDEX session_closed_at ON session(closed_at); + CREATE INDEX session_purged_at ON session(purged_at); + CREATE INDEX session_deleted_at ON session(deleted_at); + CREATE INDEX session_duration ON session(duration); + COMMIT;", + ) + .await + .unwrap(); Self { clusters: clusters .into_iter() .map(|(name, cluster)| (name, Arc::new(cluster))) .collect(), - mapping_session: RwLock::new(Default::default()), + db: pool, mapping_result: RwLock::new(Default::default()), mapping_task: RwLock::new(Default::default()), counter: AtomicUsize::new(0), } } + pub async fn add_sessions( + &self, + sessions: Vec, + cluster_name: String, + ) -> Result<(), Status> { + self.db + .call(move |conn| { + let mut stmt = conn.prepare_cached( + "WITH data AS ( + SELECT + e.value ->> 'session_id' as session_id, + e.value ->> 'cluster' as cluster, + e.value ->> 'status' as status, + e.value ->> 'client_submission' as client_submission, + e.value ->> 'worker_submission' as worker_submission, + e.value ->> 'partition_ids' as partition_ids, + e.value ->> 'default_task_options' as default_task_options, + e.value ->> 'created_at' as created_at, + e.value ->> 'cancelled_at' as cancelled_at, + e.value ->> 'closed_at' as closed_at, + e.value ->> 'purged_at' as purged_at, + e.value ->> 'deleted_at' as deleted_at, + e.value ->> 'duration' as duration + FROM json_each(?) e + ) + INSERT OR REPLACE INTO session( + session_id, + cluster, + status, + client_submission, + worker_submission, + partition_ids, + default_task_options, + created_at, + cancelled_at, + closed_at, + purged_at, + deleted_at, + duration + ) SELECT + session_id, + cluster, + status, + client_submission, + worker_submission, + partition_ids, + default_task_options, + created_at, + cancelled_at, + closed_at, + purged_at, + deleted_at, + duration + FROM data", + )?; + + stmt.execute([serde_json::to_string( + &sessions + .into_iter() + .map(|session| Session::from_grpc(session, cluster_name.clone())) + .collect::>(), + ) + .unwrap()])?; + + Result::<(), rusqlite::Error>::Ok(()) + }) + .await + .map_err(IntoStatus::into_status) + } + pub async fn get_cluster_from_sessions<'a>( &'a self, session_ids: &[&str], ) -> Result, Vec>, Status> { - let mut missing_ids = Vec::new(); - let mut mapping = HashMap::, Vec>::new(); + let mut missing_ids: HashSet<_> = session_ids.iter().copied().map(String::from).collect(); - { - let guard = self.mapping_session.read().await; + let (mapping, missing_ids) = self.db.call(move |conn| { + let mut mapping = HashMap::>::new(); - for &session_id in session_ids { - if let Some(cluster) = guard.get(session_id) { - match mapping.entry(cluster.0.clone()) { - std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { - occupied_entry.get_mut().push(String::from(session_id)); - } - std::collections::hash_map::Entry::Vacant(vacant_entry) => { - vacant_entry.insert(vec![String::from(session_id)]); - } - } - } else { - missing_ids.push(session_id); + let mut stmt = conn.prepare_cached("SELECT session_id, cluster FROM session WHERE session_id IN (SELECT e.value FROM json_each(?) e)")?; + let mut rows = stmt.query([serde_json::to_string(&missing_ids).unwrap()])?; + + while let Some(row) = rows.next()? { + let session_id: String = row.get(0)?; + let cluster: String = row.get(1)?; + + missing_ids.remove(session_id.as_str()); + match mapping.entry(cluster) { + std::collections::hash_map::Entry::Occupied(mut occupied_entry) => occupied_entry.get_mut().push(session_id), + std::collections::hash_map::Entry::Vacant(vacant_entry) => {vacant_entry.insert(vec![session_id]);}, } } - } + + Result::<_, rusqlite::Error>::Ok((mapping, missing_ids)) + }).await.map_err(IntoStatus::into_status)?; + + let mut mapping = mapping + .into_iter() + .map(|(cluster_name, session_ids)| (self.clusters[&cluster_name].clone(), session_ids)) + .collect::>(); if !missing_ids.is_empty() { let filter = missing_ids - .iter() - .map(|&session_id| { + .into_iter() + .map(|session_id| { [armonik::sessions::filter::Field { field: armonik::sessions::Field::Raw( armonik::sessions::RawField::SessionId, ), condition: armonik::sessions::filter::Condition::String( armonik::FilterString { - value: String::from(session_id), + value: session_id, operator: armonik::FilterStringOperator::Equal, }, ), @@ -86,7 +199,8 @@ impl Service { }) .collect::>(); - for cluster in self.clusters.values() { + for (cluster_name, cluster) in &self.clusters { + let cluster_name = cluster_name.clone(); let sessions = cluster .client() .await @@ -105,14 +219,11 @@ impl Service { if !sessions.is_empty() { let cluster_mapping = mapping.entry(cluster.clone()).or_default(); - let mut guard = self.mapping_session.write().await; - for session in sessions { - let session_id = session.session_id.clone(); - guard - .entry(session_id.clone()) - .or_insert_with(|| (cluster.clone(), session)); - cluster_mapping.push(session_id); + for session in &sessions { + cluster_mapping.push(session.session_id.clone()); } + + self.add_sessions(sessions, cluster_name).await?; } } } @@ -304,18 +415,7 @@ impl Service { ); while let Some(chunk) = stream.try_next().await? { - let mut guard = self.mapping_session.write().await; - - for session in chunk { - match guard.entry(session.session_id.clone()) { - std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { - occupied_entry.get_mut().1 = session - } - std::collections::hash_map::Entry::Vacant(vacant_entry) => { - vacant_entry.insert((cluster.clone(), session)); - } - } - } + self.add_sessions(chunk, name.clone()).await?; } } diff --git a/load-balancer/src/service/sessions.rs b/load-balancer/src/service/sessions.rs index 902ec03..c71528f 100644 --- a/load-balancer/src/service/sessions.rs +++ b/load-balancer/src/service/sessions.rs @@ -1,16 +1,22 @@ -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use armonik::{ - reexports::{tokio_util, tonic}, + reexports::{ + tokio_util, + tonic::{self, Status}, + }, server::SessionsService, sessions, }; +use rusqlite::params_from_iter; +use serde::{Deserialize, Serialize}; use crate::utils::{impl_unary, run_with_cancellation, IntoStatus}; use super::Service; impl SessionsService for Service { + #[allow(clippy::blocks_in_conditions)] async fn list( self: Arc, request: sessions::list::Request, @@ -24,142 +30,239 @@ impl SessionsService for Service { "Page size should be positive", )); }; - let guard = self.mapping_session.read().await; - let mut sessions = Vec::new(); + let mut params = Vec::>::new(); + let mut query_suffix = String::new(); + let mut sep = " WHERE ("; + let mut term = ""; + for filter in &request.filters { + query_suffix.push_str(sep); + sep = ") OR ("; + term = ")"; + let mut filter = filter.iter(); + if let Some(cond) = filter.next() { + let mut cond = cond; + let mut sep = ""; + loop { + query_suffix.push_str(sep); + sep = " AND "; + let (column, grpc, value_type) = + field_to_column_name(cond.field.clone(), true)?; + if let sessions::Field::TaskOptionGeneric(key) = &cond.field { + params.push(Box::new(key.clone())); + } + match (value_type, &cond.condition) { + (ValueType::String, sessions::filter::Condition::String(_)) => (), + (ValueType::Number, sessions::filter::Condition::Number(_)) => (), + (ValueType::Boolean, sessions::filter::Condition::Boolean(_)) => (), + (ValueType::Status, sessions::filter::Condition::Status(_)) => (), + (ValueType::Date, sessions::filter::Condition::Date(_)) => (), + (ValueType::Duration, sessions::filter::Condition::Duration(_)) => (), + (ValueType::Array, sessions::filter::Condition::Array(_)) => (), + _ => { + return Err(Status::invalid_argument(format!( + "Condition {:?} is not valid for the field {}", + &cond.condition, grpc + ))); + } + } - for (_, (_, session)) in guard.iter() { - if cancellation_token.is_cancelled() { - return Err(tonic::Status::aborted("Request aborted")); - } - for filter in &request.filters { - let mut ok = true; + let thunk = match &cond.condition { + sessions::filter::Condition::String(cond) => { + params.push(Box::new(cond.value.clone())); + match cond.operator { + armonik::FilterStringOperator::Equal => format!("{column} = ?"), + armonik::FilterStringOperator::NotEqual => format!("{column} != ?"), + armonik::FilterStringOperator::Contains => { + format!("instr({column}, ?) > 0") + } + armonik::FilterStringOperator::NotContains => { + format!("instr({column}, ?) == 0") + } + armonik::FilterStringOperator::StartsWith => { + format!("instr({column}, ?) == 1") + } + armonik::FilterStringOperator::EndsWith => { + params.push(Box::new(cond.value.clone())); + format!( + "instr({column}, ?) + length(?) == length({column}) + 1" + ) + } + } + } + sessions::filter::Condition::Number(cond) => { + params.push(Box::new(cond.value)); + match &cond.operator { + armonik::FilterNumberOperator::Equal => format!("{column} = ?"), + armonik::FilterNumberOperator::NotEqual => format!("{column} != ?"), + armonik::FilterNumberOperator::LessThan => format!("{column} < ?"), + armonik::FilterNumberOperator::LessThanOrEqual => { + format!("{column} <= ?") + } + armonik::FilterNumberOperator::GreaterThanOrEqual => { + format!("{column} >= ?") + } + armonik::FilterNumberOperator::GreaterThan => { + format!("{column} > ?") + } + } + } + sessions::filter::Condition::Boolean(cond) => { + if cond.value { + column.to_string() + } else { + format!("NOT {column}") + } + } + sessions::filter::Condition::Status(cond) => { + params.push(Box::new(cond.value.clone() as i32)); + match &cond.operator { + armonik::FilterStatusOperator::Equal => format!("{column} = ?"), + armonik::FilterStatusOperator::NotEqual => format!("{column} != ?"), + } + } + sessions::filter::Condition::Date(cond) => { + params.push(Box::new( + cond.value.seconds as f64 + cond.value.nanos as f64 * 1e-9f64, + )); + match &cond.operator { + armonik::FilterDateOperator::Equal => format!("{column} = ?"), + armonik::FilterDateOperator::NotEqual => format!("{column} != ?"), + armonik::FilterDateOperator::Before => format!("{column} < ?"), + armonik::FilterDateOperator::BeforeOrEqual => { + format!("{column} <= ?") + } + armonik::FilterDateOperator::AfterOrEqual => { + format!("{column} >= ?") + } + armonik::FilterDateOperator::After => format!("{column} > ?"), + } + } + sessions::filter::Condition::Duration(cond) => { + params.push(Box::new( + cond.value.seconds as f64 + cond.value.nanos as f64 * 1e-9f64, + )); + match &cond.operator { + armonik::FilterDurationOperator::Equal => format!("{column} = ?"), + armonik::FilterDurationOperator::NotEqual => { + format!("{column} != ?") + } + armonik::FilterDurationOperator::ShorterThan => { + format!("{column} < ?") + } + armonik::FilterDurationOperator::ShorterThanOrEqual => { + format!("{column} <= ?") + } + armonik::FilterDurationOperator::LongerThanOrEqual => { + format!("{column} >= ?") + } + armonik::FilterDurationOperator::LongerThan => { + format!("{column} > ?") + } + } + } + sessions::filter::Condition::Array(cond) => { + params.push(Box::new(cond.value.clone())); + match &cond.operator { + armonik::FilterArrayOperator::Contains => format!("EXISTS (SELECT 1 FROM json_each({column}) WHERE value = ?)"), + armonik::FilterArrayOperator::NotContains => format!("NOT EXISTS (SELECT 1 FROM json_each({column}) WHERE value = ?)"), + } + } + }; - for filter in filter { - if !crate::utils::filter_match_session(session, filter)? { - ok = false; + query_suffix.push_str(&thunk); + let Some(c) = filter.next() else { break; - } - } - - if ok { - sessions.push(session.clone()); - break; + }; + cond = c; } + } else { + query_suffix.push_str("TRUE"); } } - std::mem::drop(guard); + query_suffix.push_str(term); - match request.sort.field { - sessions::Field::Raw(raw_field) => { - match raw_field { - sessions::RawField::Unspecified => (), - sessions::RawField::SessionId => { - sessions.sort_by(|a, b| a.session_id.cmp(&b.session_id)) - } - sessions::RawField::Status => sessions.sort_by(|a, b| a.status.cmp(&b.status)), - sessions::RawField::ClientSubmission => { - sessions.sort_by(|a, b| a.client_submission.cmp(&b.client_submission)) - } - sessions::RawField::WorkerSubmission => { - sessions.sort_by(|a, b| a.worker_submission.cmp(&b.worker_submission)) - } - sessions::RawField::PartitionIds => { - sessions.sort_by(|a, b| a.partition_ids.cmp(&b.partition_ids)) - } - sessions::RawField::Options => { - return Err(tonic::Status::invalid_argument( - "Field Options is not sortable", - )); - } - sessions::RawField::CreatedAt => sessions - .sort_by(|a, b| crate::utils::cmp_timestamp(a.created_at, b.created_at)), - sessions::RawField::CancelledAt => sessions.sort_by(|a, b| { - crate::utils::cmp_timestamp(a.cancelled_at, b.cancelled_at) - }), - sessions::RawField::ClosedAt => sessions - .sort_by(|a, b| crate::utils::cmp_timestamp(a.closed_at, b.closed_at)), - sessions::RawField::PurgedAt => sessions - .sort_by(|a, b| crate::utils::cmp_timestamp(a.purged_at, b.purged_at)), - sessions::RawField::DeletedAt => sessions - .sort_by(|a, b| crate::utils::cmp_timestamp(a.deleted_at, b.deleted_at)), - sessions::RawField::Duration => { - sessions.sort_by(|a, b| crate::utils::cmp_duration(a.duration, b.duration)) - } - } - } - sessions::Field::TaskOption(task_option_field) => match task_option_field { - armonik::TaskOptionField::Unspecified => (), - armonik::TaskOptionField::MaxDuration => sessions.sort_by(|a, b| { - crate::utils::cmp_duration( - Some(a.default_task_options.max_duration), - Some(b.default_task_options.max_duration), - ) - }), - armonik::TaskOptionField::MaxRetries => sessions.sort_by(|a, b| { - a.default_task_options - .max_retries - .cmp(&b.default_task_options.max_retries) - }), - armonik::TaskOptionField::Priority => sessions.sort_by(|a, b| { - a.default_task_options - .priority - .cmp(&b.default_task_options.priority) - }), - armonik::TaskOptionField::PartitionId => sessions.sort_by(|a, b| { - a.default_task_options - .partition_id - .cmp(&b.default_task_options.partition_id) - }), - armonik::TaskOptionField::ApplicationName => sessions.sort_by(|a, b| { - a.default_task_options - .application_name - .cmp(&b.default_task_options.application_name) - }), - armonik::TaskOptionField::ApplicationVersion => sessions.sort_by(|a, b| { - a.default_task_options - .application_version - .cmp(&b.default_task_options.application_version) - }), - armonik::TaskOptionField::ApplicationNamespace => sessions.sort_by(|a, b| { - a.default_task_options - .application_namespace - .cmp(&b.default_task_options.application_namespace) - }), - armonik::TaskOptionField::ApplicationService => sessions.sort_by(|a, b| { - a.default_task_options - .application_service - .cmp(&b.default_task_options.application_service) - }), - armonik::TaskOptionField::ApplicationEngine => sessions.sort_by(|a, b| { - a.default_task_options - .engine_type - .cmp(&b.default_task_options.engine_type) - }), - }, - sessions::Field::TaskOptionGeneric(key) => { - sessions.sort_by(|a, b| { - a.default_task_options - .options - .get(&key) - .cmp(&b.default_task_options.options.get(&key)) - }); + match &request.sort { + sessions::Sort { + field: sessions::Field::Raw(sessions::RawField::Unspecified), + .. + } => (), + sessions::Sort { + field: sessions::Field::TaskOption(armonik::TaskOptionField::Unspecified), + .. + } => (), + sessions::Sort { + direction: armonik::SortDirection::Unspecified, + .. + } => (), + _ => { + let (column, _, _) = field_to_column_name(request.sort.field, false)?; + let direction = if matches!(request.sort.direction, armonik::SortDirection::Desc) { + "DESC" + } else { + "ASC" + }; + query_suffix.push_str(&format!(" ORDER BY {column} {direction}")); } } - if matches!(request.sort.direction, armonik::SortDirection::Desc) { - sessions.reverse(); - } + let query = format!( + "SELECT json_object( + 'session_id', session_id, + 'cluster', cluster, + 'status', status, + 'client_submission', json(iif(client_submission, 'true', 'false')), + 'worker_submission', json(iif(worker_submission, 'true', 'false')), + 'partition_ids', json(partition_ids), + 'default_task_options', json(default_task_options), + 'created_at', created_at, + 'cancelled_at', cancelled_at, + 'closed_at', closed_at, + 'purged_at', purged_at, + 'deleted_at', deleted_at, + 'duration', duration + ) FROM session{} LIMIT {} OFFSET {}", + query_suffix, + page_size, + page * page_size + ); + let query_count = format!("SELECT COUNT(*) FROM session{query_suffix}"); - let total = sessions.len() as i32; + let (sessions, total) = run_with_cancellation!( + cancellation_token, + self.db.call(move |conn| { + let mut sessions = Vec::::new(); + let transaction = conn.transaction()?; + let total = + transaction + .query_row(&query_count, params_from_iter(¶ms), |row| row.get(0))?; + let mut stmt = transaction.prepare(&query)?; + let mut rows = stmt.query(params_from_iter(¶ms))?; + + while let Some(row) = rows.next()? { + let json: String = row.get(0)?; + match serde_json::from_str(&json) { + Ok(session) => sessions.push(Session::into(session)), + Err(err) => { + return Err(rusqlite::Error::FromSqlConversionFailure( + 0, + rusqlite::types::Type::Text, + Box::new(err), + )) + } + }; + } + std::mem::drop(rows); + std::mem::drop(stmt); + transaction.commit()?; + Result::<_, rusqlite::Error>::Ok((sessions, total)) + }) + ) + .map_err(IntoStatus::into_status)?; Ok(armonik::sessions::list::Response { - sessions: sessions - .into_iter() - .skip(page * page_size) - .take(page_size) - .collect(), + sessions, page: request.page, page_size: request.page_size, total, @@ -194,7 +297,7 @@ impl SessionsService for Service { let mut err = None; - for (_, cluster) in self.clusters.iter().cycle().skip(i % n).take(n) { + for (cluster_name, cluster) in self.clusters.iter().cycle().skip(i % n).take(n) { match run_with_cancellation!(cancellation_token, cluster.client()) { Ok(client) => { let response = run_with_cancellation!( @@ -203,7 +306,29 @@ impl SessionsService for Service { ); match response { - Ok(response) => return Ok(response), + Ok(response) => { + self.add_sessions( + vec![Session { + session_id: response.session_id.clone(), + cluster: cluster_name.clone(), + status: armonik::SessionStatus::Running as i32 as u8, + client_submission: true, + worker_submission: true, + partition_ids: request.partition_ids, + default_task_options: request.default_task_options.into(), + created_at: None, + cancelled_at: None, + closed_at: None, + purged_at: None, + deleted_at: None, + duration: None, + } + .into()], + cluster_name.clone(), + ) + .await?; + return Ok(response); + } Err(error) => err = Some(error.into_status()), } } @@ -255,13 +380,16 @@ impl SessionsService for Service { cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { let service = self.clone(); + let session_id = request.session_id.clone(); let response = impl_unary!(service.sessions, request, cancellation_token, session)?; // If delete is successful, remove the session from the list - let mut guard = - crate::utils::run_with_cancellation!(cancellation_token, self.mapping_session.write()); - - guard.remove(&response.session.session_id); + run_with_cancellation!( + cancellation_token, + self.db + .execute("DELETE FROM session WHERE session_id = ?", [session_id]) + ) + .map_err(IntoStatus::into_status)?; Ok(response) } @@ -274,3 +402,263 @@ impl SessionsService for Service { impl_unary!(self.sessions, request, cancellation_token, session) } } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(super) struct TaskOptions { + pub options: HashMap, + pub max_duration: f64, + pub max_retries: i32, + pub priority: i32, + pub partition_id: String, + pub application_name: String, + pub application_version: String, + pub application_namespace: String, + pub application_service: String, + pub engine_type: String, +} + +fn f64_to_timestamp(t: f64) -> armonik::reexports::prost_types::Timestamp { + armonik::reexports::prost_types::Timestamp { + seconds: t.trunc() as i64, + nanos: (t.fract() * 1e9) as i32, + } +} + +fn f64_to_duration(t: f64) -> armonik::reexports::prost_types::Duration { + armonik::reexports::prost_types::Duration { + seconds: t.trunc() as i64, + nanos: (t.fract() * 1e9) as i32, + } +} + +fn timestamp_to_f64(t: armonik::reexports::prost_types::Timestamp) -> f64 { + t.seconds as f64 + t.nanos as f64 * 1e-9f64 +} +fn duration_to_f64(t: armonik::reexports::prost_types::Duration) -> f64 { + t.seconds as f64 + t.nanos as f64 * 1e-9f64 +} + +impl From for armonik::TaskOptions { + fn from(value: TaskOptions) -> Self { + Self { + options: value.options, + max_duration: f64_to_duration(value.max_duration), + max_retries: value.max_retries, + priority: value.priority, + partition_id: value.partition_id, + application_name: value.application_name, + application_version: value.application_version, + application_namespace: value.application_namespace, + application_service: value.application_service, + engine_type: value.engine_type, + } + } +} + +impl From for TaskOptions { + fn from(value: armonik::TaskOptions) -> Self { + Self { + options: value.options, + max_duration: duration_to_f64(value.max_duration), + max_retries: value.max_retries, + priority: value.priority, + partition_id: value.partition_id, + application_name: value.application_name, + application_version: value.application_version, + application_namespace: value.application_namespace, + application_service: value.application_service, + engine_type: value.engine_type, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(super) struct Session { + /// The session ID. + pub session_id: String, + /// The cluster which host the session. + pub cluster: String, + /// The session status. + pub status: u8, + /// Whether clients can submit tasks in the session. + pub client_submission: bool, + /// Whether workers can submit tasks in the session. + pub worker_submission: bool, + /// The partition IDs. + pub partition_ids: Vec, + /// The task options. In fact, these are used as default value in child tasks. + pub default_task_options: TaskOptions, + /// The creation date. + pub created_at: Option, + /// The cancellation date. Only set when status is 'cancelled'. + pub cancelled_at: Option, + /// The closure date. Only set when status is 'closed'. + pub closed_at: Option, + /// The purge date. Only set when status is 'purged'. + pub purged_at: Option, + /// The deletion date. Only set when status is 'deleted'. + pub deleted_at: Option, + /// The duration. Only set when status is 'cancelled'. + pub duration: Option, +} + +impl From for armonik::sessions::Raw { + fn from(value: Session) -> Self { + Self { + session_id: value.session_id, + status: armonik::SessionStatus::from(value.status as i32), + client_submission: value.client_submission, + worker_submission: value.worker_submission, + partition_ids: value.partition_ids, + default_task_options: value.default_task_options.into(), + created_at: value.created_at.map(f64_to_timestamp), + cancelled_at: value.cancelled_at.map(f64_to_timestamp), + closed_at: value.closed_at.map(f64_to_timestamp), + purged_at: value.purged_at.map(f64_to_timestamp), + deleted_at: value.deleted_at.map(f64_to_timestamp), + duration: value.duration.map(f64_to_duration), + } + } +} + +impl Session { + pub fn from_grpc(raw: armonik::sessions::Raw, cluster: String) -> Self { + Self { + session_id: raw.session_id, + cluster, + status: raw.status as i32 as u8, + client_submission: raw.client_submission, + worker_submission: raw.worker_submission, + partition_ids: raw.partition_ids, + default_task_options: raw.default_task_options.into(), + created_at: raw.created_at.map(timestamp_to_f64), + cancelled_at: raw.cancelled_at.map(timestamp_to_f64), + closed_at: raw.closed_at.map(timestamp_to_f64), + purged_at: raw.purged_at.map(timestamp_to_f64), + deleted_at: raw.deleted_at.map(timestamp_to_f64), + duration: raw.duration.map(duration_to_f64), + } + } +} + +enum ValueType { + String, + Number, + Boolean, + Status, + Date, + Duration, + Array, +} + +fn field_to_column_name( + field: armonik::sessions::Field, + filter: bool, +) -> Result<(&'static str, &'static str, ValueType), Status> { + match field { + sessions::Field::Raw(sessions::RawField::Unspecified) => { + Err(Status::invalid_argument(if filter { + "Filter field is not set" + } else { + "Sort field is not set" + })) + } + sessions::Field::Raw(sessions::RawField::SessionId) => { + Ok(("session_id", "SessionId", ValueType::String)) + } + sessions::Field::Raw(sessions::RawField::Status) => { + Ok(("status", "Status", ValueType::Status)) + } + sessions::Field::Raw(sessions::RawField::ClientSubmission) => { + Ok(("client_submission", "ClientSubmission", ValueType::Boolean)) + } + sessions::Field::Raw(sessions::RawField::WorkerSubmission) => { + Ok(("worker_submission", "WorkerSubmission", ValueType::Boolean)) + } + sessions::Field::Raw(sessions::RawField::PartitionIds) => { + Ok(("partition_ids", "PartitionIds", ValueType::Array)) + } + sessions::Field::Raw(sessions::RawField::Options) => { + Err(Status::invalid_argument(if filter { + "Filter field Options is not valid for a RawField filter" + } else { + "Sort field Options is not valid for a RawField sort" + })) + } + sessions::Field::Raw(sessions::RawField::CreatedAt) => { + Ok(("created_at", "CreatedAt", ValueType::Date)) + } + sessions::Field::Raw(sessions::RawField::CancelledAt) => { + Ok(("cancelled_at", "CancelledAt", ValueType::Date)) + } + sessions::Field::Raw(sessions::RawField::ClosedAt) => { + Ok(("closed_at", "ClosedAt", ValueType::Date)) + } + sessions::Field::Raw(sessions::RawField::PurgedAt) => { + Ok(("purged_at", "PurgedAt", ValueType::Date)) + } + sessions::Field::Raw(sessions::RawField::DeletedAt) => { + Ok(("deleted_at", "DeletedAt", ValueType::Date)) + } + sessions::Field::Raw(sessions::RawField::Duration) => { + Ok(("duration", "Duration", ValueType::Duration)) + } + sessions::Field::TaskOption(armonik::TaskOptionField::Unspecified) => { + Err(Status::invalid_argument(if filter { + "Filter field is not set" + } else { + "Sort field is not set" + })) + } + sessions::Field::TaskOption(armonik::TaskOptionField::MaxDuration) => Ok(( + "default_task_options ->> 'max_duration'", + "DefaultTaskOptions.MaxDuration", + ValueType::Duration, + )), + sessions::Field::TaskOption(armonik::TaskOptionField::MaxRetries) => Ok(( + "default_task_options ->> 'max_retries'", + "DefaultTaskOptions.MaxRetries", + ValueType::Number, + )), + sessions::Field::TaskOption(armonik::TaskOptionField::Priority) => Ok(( + "default_task_options ->> 'priority'", + "DefaultTaskOptions.Priority", + ValueType::Number, + )), + sessions::Field::TaskOption(armonik::TaskOptionField::PartitionId) => Ok(( + "default_task_options ->> 'partition_id'", + "DefaultTaskOptions.PartitionId", + ValueType::String, + )), + sessions::Field::TaskOption(armonik::TaskOptionField::ApplicationName) => Ok(( + "default_task_options ->> 'application_name'", + "DefaultTaskOptions.ApplicationName", + ValueType::String, + )), + sessions::Field::TaskOption(armonik::TaskOptionField::ApplicationVersion) => Ok(( + "default_task_options ->> 'application_version'", + "DefaultTaskOptions.ApplicationVersion", + ValueType::String, + )), + sessions::Field::TaskOption(armonik::TaskOptionField::ApplicationNamespace) => Ok(( + "default_task_options ->> 'application_namespace'", + "DefaultTaskOptions.ApplicationNamespace", + ValueType::String, + )), + sessions::Field::TaskOption(armonik::TaskOptionField::ApplicationService) => Ok(( + "default_task_options ->> 'application_service'", + "DefaultTaskOptions.ApplicationService", + ValueType::String, + )), + sessions::Field::TaskOption(armonik::TaskOptionField::ApplicationEngine) => Ok(( + "default_task_options ->> 'engine_type'", + "DefaultTaskOptions.ApplicationEngine", + ValueType::String, + )), + sessions::Field::TaskOptionGeneric(_) => Ok(( + "default_task_options -> 'options' ->> ?", + "DefaultTaskOptions.Options", + ValueType::String, + )), + } +} diff --git a/load-balancer/src/utils.rs b/load-balancer/src/utils.rs index 4a0104c..e4717fd 100644 --- a/load-balancer/src/utils.rs +++ b/load-balancer/src/utils.rs @@ -1,6 +1,4 @@ -use std::cmp::Ordering; - -use armonik::reexports::{prost_types, tonic::Status}; +use armonik::reexports::tonic::Status; macro_rules! run_with_cancellation { (use $ct:expr; $($body:tt)*) => { @@ -73,340 +71,18 @@ impl IntoStatus for armonik::client::ConnectionError { impl IntoStatus for armonik::client::ConfigError { fn into_status(self) -> Status { - Status::internal(self.to_string()) + Status::failed_precondition(self.to_string()) } } impl IntoStatus for armonik::client::ReadEnvError { fn into_status(self) -> Status { - Status::internal(self.to_string()) - } -} - -pub(crate) fn filter_match_string(value: &str, condition: &armonik::FilterString) -> bool { - match condition.operator { - armonik::FilterStringOperator::Equal => condition.value == value, - armonik::FilterStringOperator::NotEqual => condition.value != value, - armonik::FilterStringOperator::Contains => condition.value.contains(value), - armonik::FilterStringOperator::NotContains => !condition.value.contains(value), - armonik::FilterStringOperator::StartsWith => condition.value.starts_with(value), - armonik::FilterStringOperator::EndsWith => condition.value.ends_with(value), - } -} - -pub(crate) fn filter_match_number(value: i64, condition: &armonik::FilterNumber) -> bool { - match condition.operator { - armonik::FilterNumberOperator::Equal => value == condition.value, - armonik::FilterNumberOperator::NotEqual => value != condition.value, - armonik::FilterNumberOperator::LessThan => value < condition.value, - armonik::FilterNumberOperator::LessThanOrEqual => value <= condition.value, - armonik::FilterNumberOperator::GreaterThanOrEqual => value >= condition.value, - armonik::FilterNumberOperator::GreaterThan => value > condition.value, - } -} - -pub(crate) fn filter_match_bool(value: bool, condition: &armonik::FilterBoolean) -> bool { - match condition.operator { - armonik::FilterBooleanOperator::Is => value == condition.value, - } -} - -pub(crate) fn filter_match_array( - value: impl IntoIterator>, - condition: &armonik::FilterArray, -) -> bool { - let contains = value.into_iter().any(|s| s.as_ref() == condition.value); - match condition.operator { - armonik::FilterArrayOperator::Contains => contains, - armonik::FilterArrayOperator::NotContains => !contains, - } -} - -pub(crate) fn filter_match_status( - value: &T, - condition: &armonik::FilterStatus, -) -> bool { - match condition.operator { - armonik::FilterStatusOperator::Equal => *value == condition.value, - armonik::FilterStatusOperator::NotEqual => *value != condition.value, - } -} - -pub(crate) fn filter_match_duration( - value: Option, - condition: &armonik::FilterDuration, -) -> bool { - let Some(value) = value else { - return matches!( - condition.operator, - armonik::FilterDurationOperator::NotEqual - ); - }; - let prost_types::Duration { seconds, nanos } = value; - let lhs = (seconds, nanos); - - let prost_types::Duration { seconds, nanos } = condition.value; - let rhs = (seconds, nanos); - - match condition.operator { - armonik::FilterDurationOperator::Equal => lhs == rhs, - armonik::FilterDurationOperator::NotEqual => lhs != rhs, - armonik::FilterDurationOperator::ShorterThan => lhs < rhs, - armonik::FilterDurationOperator::ShorterThanOrEqual => lhs <= rhs, - armonik::FilterDurationOperator::LongerThanOrEqual => lhs >= rhs, - armonik::FilterDurationOperator::LongerThan => lhs > rhs, + Status::failed_precondition(self.to_string()) } } -pub(crate) fn filter_match_date( - value: Option, - condition: &armonik::FilterDate, -) -> bool { - let Some(value) = value else { - return matches!(condition.operator, armonik::FilterDateOperator::NotEqual); - }; - - let prost_types::Timestamp { seconds, nanos } = value; - let lhs = (seconds, nanos); - - let prost_types::Timestamp { seconds, nanos } = condition.value; - let rhs = (seconds, nanos); - - match condition.operator { - armonik::FilterDateOperator::Equal => lhs == rhs, - armonik::FilterDateOperator::NotEqual => lhs != rhs, - armonik::FilterDateOperator::Before => lhs < rhs, - armonik::FilterDateOperator::BeforeOrEqual => lhs <= rhs, - armonik::FilterDateOperator::AfterOrEqual => lhs >= rhs, - armonik::FilterDateOperator::After => lhs > rhs, - } -} - -pub(crate) fn filter_match_session( - value: &armonik::sessions::Raw, - condition: &armonik::sessions::filter::Field, -) -> Result { - match &condition.field { - armonik::sessions::Field::Raw(raw_field) => match raw_field { - armonik::sessions::RawField::Unspecified => { - Err(Status::invalid_argument("Filter field is not set")) - } - armonik::sessions::RawField::SessionId => match &condition.condition { - armonik::sessions::filter::Condition::String(filter_string) => { - Ok(filter_match_string(&value.session_id, filter_string)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field SessionId" - ))), - }, - armonik::sessions::RawField::Status => match &condition.condition { - armonik::sessions::filter::Condition::Status(filter_status) => { - Ok(filter_match_status(&value.status, filter_status)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field Status" - ))), - }, - armonik::sessions::RawField::ClientSubmission => match &condition.condition { - armonik::sessions::filter::Condition::Boolean(filter_bool) => { - Ok(filter_match_bool(value.client_submission, filter_bool)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field ClientSubmission" - ))), - }, - armonik::sessions::RawField::WorkerSubmission => match &condition.condition { - armonik::sessions::filter::Condition::Boolean(filter_bool) => { - Ok(filter_match_bool(value.worker_submission, filter_bool)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field WorkerSubmission" - ))), - }, - armonik::sessions::RawField::PartitionIds => match &condition.condition { - armonik::sessions::filter::Condition::Array(filter_array) => { - Ok(filter_match_array(&value.partition_ids, filter_array)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field PartitionIds" - ))), - }, - armonik::sessions::RawField::Options => Err(Status::invalid_argument( - "Filter field Options is not valid for a RawField filter", - )), - armonik::sessions::RawField::CreatedAt => match &condition.condition { - armonik::sessions::filter::Condition::Date(filter_date) => { - Ok(filter_match_date(value.created_at, filter_date)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field CreatedAt" - ))), - }, - armonik::sessions::RawField::CancelledAt => match &condition.condition { - armonik::sessions::filter::Condition::Date(filter_date) => { - Ok(filter_match_date(value.cancelled_at, filter_date)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field CancelledAt" - ))), - }, - armonik::sessions::RawField::ClosedAt => match &condition.condition { - armonik::sessions::filter::Condition::Date(filter_date) => { - Ok(filter_match_date(value.closed_at, filter_date)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field ClosedAt" - ))), - }, - armonik::sessions::RawField::PurgedAt => match &condition.condition { - armonik::sessions::filter::Condition::Date(filter_date) => { - Ok(filter_match_date(value.purged_at, filter_date)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field PurgedAt" - ))), - }, - armonik::sessions::RawField::DeletedAt => match &condition.condition { - armonik::sessions::filter::Condition::Date(filter_date) => { - Ok(filter_match_date(value.deleted_at, filter_date)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DeletedAt" - ))), - }, - armonik::sessions::RawField::Duration => match &condition.condition { - armonik::sessions::filter::Condition::Duration(filter_duration) => { - Ok(filter_match_duration(value.duration, filter_duration)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field Duration" - ))), - }, - }, - armonik::sessions::Field::TaskOption(task_option_field) => match task_option_field { - armonik::TaskOptionField::Unspecified => { - Err(Status::invalid_argument("Filter field is not set")) - } - armonik::TaskOptionField::MaxDuration => match &condition.condition { - armonik::sessions::filter::Condition::Duration(filter_duration) => { - Ok(filter_match_duration(Some(value.default_task_options.max_duration), filter_duration)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.MaxDuration" - ))), - }, - armonik::TaskOptionField::MaxRetries => match &condition.condition { - armonik::sessions::filter::Condition::Number(filter_number) => { - Ok(filter_match_number(value.default_task_options.max_retries as i64, filter_number)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.MaxRetries" - ))), - }, - armonik::TaskOptionField::Priority => match &condition.condition { - armonik::sessions::filter::Condition::Number(filter_number) => { - Ok(filter_match_number(value.default_task_options.priority as i64, filter_number)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.Priority" - ))), - }, - armonik::TaskOptionField::PartitionId => match &condition.condition { - armonik::sessions::filter::Condition::String(filter_string) => { - Ok(filter_match_string(&value.default_task_options.partition_id, filter_string)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.PartitionId" - ))), - }, - armonik::TaskOptionField::ApplicationName => match &condition.condition { - armonik::sessions::filter::Condition::String(filter_string) => { - Ok(filter_match_string(&value.default_task_options.application_name, filter_string)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationName" - ))), - }, - armonik::TaskOptionField::ApplicationVersion => match &condition.condition { - armonik::sessions::filter::Condition::String(filter_string) => { - Ok(filter_match_string(&value.default_task_options.application_version, filter_string)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationVersion" - ))), - }, - armonik::TaskOptionField::ApplicationNamespace => match &condition.condition { - armonik::sessions::filter::Condition::String(filter_string) => { - Ok(filter_match_string(&value.default_task_options.application_namespace, filter_string)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationNamespace" - ))), - }, - armonik::TaskOptionField::ApplicationService => match &condition.condition { - armonik::sessions::filter::Condition::String(filter_string) => { - Ok(filter_match_string(&value.default_task_options.application_service, filter_string)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationService" - ))), - }, - armonik::TaskOptionField::ApplicationEngine => match &condition.condition { - armonik::sessions::filter::Condition::String(filter_string) => { - Ok(filter_match_string(&value.default_task_options.engine_type, filter_string)) - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationEngine" - ))), - }, - }, - armonik::sessions::Field::TaskOptionGeneric(field) => match &condition.condition { - armonik::sessions::filter::Condition::String(filter_string) => { - if let Some(value) = value.default_task_options.options.get(field) { - Ok(filter_match_string(value, filter_string)) - } else { - Ok(false) - } - } - condition => Err(Status::invalid_argument(format!( - "Condition {condition:?} is not valid for the field DefaultTaskOptions.ApplicationEngine" - ))), - }, - } -} - -pub(crate) fn cmp_duration( - lhs: Option, - rhs: Option, -) -> Ordering { - match (lhs, rhs) { - (None, None) => Ordering::Equal, - (None, Some(_)) => Ordering::Less, - (Some(_), None) => Ordering::Greater, - (Some(lhs), Some(rhs)) => { - let cmp = lhs.seconds.cmp(&rhs.seconds); - if cmp.is_eq() { - return cmp; - } - lhs.nanos.cmp(&rhs.nanos) - } - } -} - -pub(crate) fn cmp_timestamp( - lhs: Option, - rhs: Option, -) -> Ordering { - match (lhs, rhs) { - (None, None) => Ordering::Equal, - (None, Some(_)) => Ordering::Less, - (Some(_), None) => Ordering::Greater, - (Some(lhs), Some(rhs)) => { - let cmp = lhs.seconds.cmp(&rhs.seconds); - if cmp.is_eq() { - return cmp; - } - lhs.nanos.cmp(&rhs.nanos) - } +impl IntoStatus for rusqlite::Error { + fn into_status(self) -> Status { + Status::failed_precondition(self.to_string()) } } From 2a001916ca160bca612eaf71499834397a385673 Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Thu, 2 Jan 2025 00:06:34 +0100 Subject: [PATCH 04/12] Concurrent streams --- load-balancer/src/cluster.rs | 10 +- load-balancer/src/main.rs | 9 +- load-balancer/src/service/applications.rs | 30 ++- load-balancer/src/service/events.rs | 4 +- load-balancer/src/service/mod.rs | 309 ++++++++++++++++------ load-balancer/src/service/partitions.rs | 32 ++- load-balancer/src/utils.rs | 30 +++ 7 files changed, 308 insertions(+), 116 deletions(-) diff --git a/load-balancer/src/cluster.rs b/load-balancer/src/cluster.rs index daba5ad..ae3fd8a 100644 --- a/load-balancer/src/cluster.rs +++ b/load-balancer/src/cluster.rs @@ -4,7 +4,8 @@ use armonik::reexports::{tokio_stream, tonic}; #[derive(Debug, Default, Clone)] pub struct Cluster { - endpoint: armonik::ClientConfig, + pub name: String, + pub endpoint: armonik::ClientConfig, } impl PartialEq for Cluster { @@ -30,8 +31,11 @@ impl Hash for Cluster { } impl Cluster { - pub fn new(config: armonik::ClientConfig) -> Self { - Self { endpoint: config } + pub fn new(name: String, config: armonik::ClientConfig) -> Self { + Self { + name, + endpoint: config, + } } pub async fn client(&self) -> Result { diff --git a/load-balancer/src/main.rs b/load-balancer/src/main.rs index 9a2c465..45c907c 100644 --- a/load-balancer/src/main.rs +++ b/load-balancer/src/main.rs @@ -164,10 +164,11 @@ async fn main() -> Result<(), eyre::Report> { for (name, cluster_config) in conf.clusters { clusters.insert( - name, - cluster::Cluster::new(armonik::ClientConfig::from_config_args( - cluster_config.into(), - )?), + name.clone(), + cluster::Cluster::new( + name, + armonik::ClientConfig::from_config_args(cluster_config.into())?, + ), ); } diff --git a/load-balancer/src/service/applications.rs b/load-balancer/src/service/applications.rs index 935444b..ef41400 100644 --- a/load-balancer/src/service/applications.rs +++ b/load-balancer/src/service/applications.rs @@ -6,7 +6,7 @@ use armonik::{ server::ApplicationsService, }; -use crate::utils::{run_with_cancellation, IntoStatus}; +use crate::utils::{merge_streams, run_with_cancellation, IntoStatus}; use super::Service; @@ -30,16 +30,26 @@ impl ApplicationsService for Service { run_with_cancellation! { use cancellation_token; - for cluster in self.clusters.values() { - let client = cluster.client().await.map_err(IntoStatus::into_status)?; - let stream = client - .get_all_applications(request.filters.clone(), request.sort.clone()) - .await?; + let streams = self.clusters.values().map(|cluster| { + let request = request.clone(); + Box::pin(async_stream::stream! { + let stream = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .get_all_applications(request.filters.clone(), request.sort.clone()) + .await?; + let mut stream = std::pin::pin!(stream); - let mut stream = std::pin::pin!(stream); - while let Some(chunk) = stream.try_next().await? { - applications.extend(chunk); - } + while let Some(item) = stream.next().await { + yield item; + } + }) + }); + let mut streams = std::pin::pin!(merge_streams(streams)); + + while let Some(chunk) = streams.try_next().await? { + applications.extend(chunk); } if !request.sort.fields.is_empty() { diff --git a/load-balancer/src/service/events.rs b/load-balancer/src/service/events.rs index 93d0280..afca033 100644 --- a/load-balancer/src/service/events.rs +++ b/load-balancer/src/service/events.rs @@ -45,11 +45,11 @@ impl EventsService for Service { .await .map_err(IntoStatus::into_status)?; - let stream = async_stream::try_stream! { + let stream = async_stream::stream! { let mut stream = std::pin::pin!(stream); while let Some(Some(event)) = cancellation_token.run_until_cancelled(stream.next()).await { - yield event.map_err(IntoStatus::into_status)?; + yield event.map_err(IntoStatus::into_status); } }; diff --git a/load-balancer/src/service/mod.rs b/load-balancer/src/service/mod.rs index 739e00c..be46af4 100644 --- a/load-balancer/src/service/mod.rs +++ b/load-balancer/src/service/mod.rs @@ -10,7 +10,11 @@ use tokio_rusqlite::Connection; use armonik::reexports::{tokio::sync::RwLock, tokio_stream::StreamExt, tonic::Status}; -use crate::{async_pool::AsyncPool, cluster::Cluster, utils::IntoStatus}; +use crate::{ + async_pool::AsyncPool, + cluster::Cluster, + utils::{merge_streams, IntoStatus}, +}; mod applications; mod auth; @@ -156,7 +160,7 @@ impl Service { ) -> Result, Vec>, Status> { let mut missing_ids: HashSet<_> = session_ids.iter().copied().map(String::from).collect(); - let (mapping, missing_ids) = self.db.call(move |conn| { + let (mapping, mut missing_ids) = self.db.call(move |conn| { let mut mapping = HashMap::>::new(); let mut stmt = conn.prepare_cached("SELECT session_id, cluster FROM session WHERE session_id IN (SELECT e.value FROM json_each(?) e)")?; @@ -183,7 +187,7 @@ impl Service { if !missing_ids.is_empty() { let filter = missing_ids - .into_iter() + .iter() .map(|session_id| { [armonik::sessions::filter::Field { field: armonik::sessions::Field::Raw( @@ -191,7 +195,7 @@ impl Service { ), condition: armonik::sessions::filter::Condition::String( armonik::FilterString { - value: session_id, + value: session_id.clone(), operator: armonik::FilterStringOperator::Equal, }, ), @@ -199,32 +203,63 @@ impl Service { }) .collect::>(); - for (cluster_name, cluster) in &self.clusters { - let cluster_name = cluster_name.clone(); - let sessions = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .sessions() - .list( - filter.clone(), - Default::default(), - true, - 0, - filter.len() as i32, - ) - .await - .map_err(IntoStatus::into_status)? - .sessions; - - if !sessions.is_empty() { - let cluster_mapping = mapping.entry(cluster.clone()).or_default(); - for session in &sessions { - cluster_mapping.push(session.session_id.clone()); + let mut list_all = self + .clusters + .values() + .map(|cluster| async { + let client = match cluster.client().await { + Ok(client) => client, + Err(err) => return (cluster.clone(), Err(IntoStatus::into_status(err))), + }; + let response = match client + .sessions() + .list( + filter.clone(), + Default::default(), + true, + 0, + filter.len() as i32, + ) + .await + { + Ok(response) => response, + Err(err) => return (cluster.clone(), Err(IntoStatus::into_status(err))), + }; + (cluster.clone(), Ok(response.sessions)) + }) + .collect::>(); + + let mut errors = Vec::new(); + while let Some((cluster, list)) = list_all.next().await { + match list { + Ok(sessions) => { + if !sessions.is_empty() { + let cluster_mapping = mapping.entry(cluster.clone()).or_default(); + for session in &sessions { + missing_ids.remove(&session.session_id); + cluster_mapping.push(session.session_id.clone()); + } + + self.add_sessions(sessions, cluster.name.clone()).await?; + } } + Err(err) => { + errors.push((cluster, err)); + } + } + } - self.add_sessions(sessions, cluster_name).await?; + if !missing_ids.is_empty() { + let mut message = String::new(); + let mut sep = ""; + for (cluster, error) in errors { + let cluster_name = &cluster.name; + message.push_str(&format!( + "{sep}Error while fetching sessions from cluster {cluster_name}: {error}" + )); + sep = "\n"; } + return Err(Status::unavailable(message)); } } @@ -244,7 +279,7 @@ impl Service { &'a self, result_ids: &[&str], ) -> Result, Vec>, Status> { - let mut missing_ids = Vec::new(); + let mut missing_ids = HashSet::new(); let mut mapping = HashMap::, Vec>::new(); { @@ -261,7 +296,7 @@ impl Service { } } } else { - missing_ids.push(result_id); + missing_ids.insert(result_id); } } } @@ -282,28 +317,60 @@ impl Service { }) .collect::>(); - for cluster in self.clusters.values() { - let results = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .results() - .list(filter.clone(), Default::default(), 0, filter.len() as i32) - .await - .map_err(IntoStatus::into_status)? - .results; - - if !results.is_empty() { - let cluster_mapping = mapping.entry(cluster.clone()).or_default(); - let mut guard = self.mapping_result.write().await; - for result in results { - guard - .entry(result.result_id.clone()) - .or_insert_with(|| cluster.clone()); - cluster_mapping.push(result.result_id); + let mut list_all = self + .clusters + .values() + .map(|cluster| async { + let client = match cluster.client().await { + Ok(client) => client, + Err(err) => return (cluster.clone(), Err(IntoStatus::into_status(err))), + }; + let response = match client + .results() + .list(filter.clone(), Default::default(), 0, filter.len() as i32) + .await + { + Ok(response) => response, + Err(err) => return (cluster.clone(), Err(IntoStatus::into_status(err))), + }; + (cluster.clone(), Ok(response.results)) + }) + .collect::>(); + + let mut errors = Vec::new(); + while let Some((cluster, list)) = list_all.next().await { + match list { + Ok(results) => { + if !results.is_empty() { + let cluster_mapping = mapping.entry(cluster.clone()).or_default(); + let mut guard = self.mapping_result.write().await; + for result in &results { + missing_ids.remove(result.result_id.as_str()); + cluster_mapping.push(result.result_id.clone()); + guard + .entry(result.result_id.clone()) + .or_insert_with(|| cluster.clone()); + } + } + } + Err(err) => { + errors.push((cluster, err)); } } } + + if !missing_ids.is_empty() { + let mut message = String::new(); + let mut sep = ""; + for (cluster, error) in errors { + let cluster_name = &cluster.name; + message.push_str(&format!( + "{sep}Error while fetching results from cluster {cluster_name}: {error}" + )); + sep = "\n"; + } + return Err(Status::unavailable(message)); + } } Ok(mapping) @@ -322,7 +389,7 @@ impl Service { &'a self, task_ids: &[&str], ) -> Result, Vec>, Status> { - let mut missing_ids = Vec::new(); + let mut missing_ids = HashSet::new(); let mut mapping = HashMap::, Vec>::new(); { @@ -339,7 +406,7 @@ impl Service { } } } else { - missing_ids.push(task_id); + missing_ids.insert(task_id); } } } @@ -360,34 +427,66 @@ impl Service { }) .collect::>(); - for cluster in self.clusters.values() { - let tasks = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .tasks() - .list( - filter.clone(), - Default::default(), - false, - 0, - filter.len() as i32, - ) - .await - .map_err(IntoStatus::into_status)? - .tasks; - - if !tasks.is_empty() { - let cluster_mapping = mapping.entry(cluster.clone()).or_default(); - let mut guard = self.mapping_task.write().await; - for task in tasks { - guard - .entry(task.task_id.clone()) - .or_insert_with(|| cluster.clone()); - cluster_mapping.push(task.task_id); + let mut list_all = self + .clusters + .values() + .map(|cluster| async { + let client = match cluster.client().await { + Ok(client) => client, + Err(err) => return (cluster.clone(), Err(IntoStatus::into_status(err))), + }; + let response = match client + .tasks() + .list( + filter.clone(), + Default::default(), + false, + 0, + filter.len() as i32, + ) + .await + { + Ok(response) => response, + Err(err) => return (cluster.clone(), Err(IntoStatus::into_status(err))), + }; + (cluster.clone(), Ok(response.tasks)) + }) + .collect::>(); + + let mut errors = Vec::new(); + while let Some((cluster, list)) = list_all.next().await { + match list { + Ok(tasks) => { + if !tasks.is_empty() { + let cluster_mapping = mapping.entry(cluster.clone()).or_default(); + let mut guard = self.mapping_task.write().await; + for task in &tasks { + missing_ids.remove(task.task_id.as_str()); + cluster_mapping.push(task.task_id.clone()); + guard + .entry(task.task_id.clone()) + .or_insert_with(|| cluster.clone()); + } + } + } + Err(err) => { + errors.push((cluster, err)); } } } + + if !missing_ids.is_empty() { + let mut message = String::new(); + let mut sep = ""; + for (cluster, error) in errors { + let cluster_name = &cluster.name; + message.push_str(&format!( + "{sep}Error while fetching tasks from cluster {cluster_name}: {error}" + )); + sep = "\n"; + } + return Err(Status::unavailable(message)); + } } Ok(mapping) @@ -403,19 +502,57 @@ impl Service { } pub async fn update_sessions(&self) -> Result<(), Status> { - for (name, cluster) in &self.clusters { - log::debug!("Refreshing sessions from {}\n {:?}", name, cluster); - let mut stream = std::pin::pin!( - cluster - .client() - .await - .map_err(IntoStatus::into_status)? + let streams = self.clusters.values().map(|cluster| { + Box::pin(async_stream::stream! { + let client = match cluster.client().await.map_err(IntoStatus::into_status) { + Ok(client) => client, + Err(err) => { + yield (cluster.clone(), Err(err)); + return; + } + }; + let stream = match client .get_all_sessions(Default::default(), Default::default()) - .await? - ); + .await + { + Ok(stream) => stream, + Err(err) => { + yield (cluster.clone(), Err(err)); + return; + } + }; + let mut stream = std::pin::pin!(stream); + + while let Some(response) = stream.next().await { + match response { + Ok(response) => yield (cluster.clone(), Result::<_, Status>::Ok(response)), + Err(err) => { + yield (cluster.clone(), Err(err)); + return; + } + } + } + }) + }); - while let Some(chunk) = stream.try_next().await? { - self.add_sessions(chunk, name.clone()).await?; + let mut streams = std::pin::pin!(merge_streams(streams)); + + while let Some((cluster, response)) = streams.next().await { + match response { + Ok(chunk) => { + if let Err(err) = self.add_sessions(chunk, cluster.name.clone()).await { + log::error!( + "Could not record sessions from cluster {}: {}", + cluster.name, + err + ) + } + } + Err(err) => log::error!( + "Could not fetch sessions from cluster {}: {}", + cluster.name, + err + ), } } diff --git a/load-balancer/src/service/partitions.rs b/load-balancer/src/service/partitions.rs index fbcc1a5..0d744e6 100644 --- a/load-balancer/src/service/partitions.rs +++ b/load-balancer/src/service/partitions.rs @@ -6,7 +6,7 @@ use armonik::{ server::PartitionsService, }; -use crate::utils::{run_with_cancellation, IntoStatus}; +use crate::utils::{merge_streams, run_with_cancellation, IntoStatus}; use super::Service; @@ -30,16 +30,26 @@ impl PartitionsService for Service { run_with_cancellation! { use cancellation_token; - for cluster in self.clusters.values() { - let client = cluster.client().await - .map_err(IntoStatus::into_status)?; - let stream = - client.get_all_partitions(request.filters.clone(), request.sort.clone()).await?; - - let mut stream = std::pin::pin!(stream); - while let Some(chunk) = stream.try_next().await? { - partitions.extend(chunk); - } + let streams = self.clusters.values().map(|cluster| { + let request = request.clone(); + Box::pin(async_stream::stream! { + let stream = cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .get_all_partitions(request.filters.clone(), request.sort.clone()) + .await?; + let mut stream = std::pin::pin!(stream); + + while let Some(item) = stream.next().await { + yield item; + } + }) + }); + let mut streams = std::pin::pin!(merge_streams(streams)); + + while let Some(chunk) = streams.try_next().await? { + partitions.extend(chunk); } match &request.sort.field { diff --git a/load-balancer/src/utils.rs b/load-balancer/src/utils.rs index e4717fd..05aa5d8 100644 --- a/load-balancer/src/utils.rs +++ b/load-balancer/src/utils.rs @@ -13,6 +13,7 @@ macro_rules! run_with_cancellation { } } } +use futures::{stream::futures_unordered, Stream, StreamExt}; pub(crate) use run_with_cancellation; macro_rules! impl_unary { @@ -86,3 +87,32 @@ impl IntoStatus for rusqlite::Error { Status::failed_precondition(self.to_string()) } } + +async fn stream_next(mut stream: S) -> (S, Option<::Item>) +where + ::Item: 'static, +{ + let res = stream.next().await; + (stream, res) +} + +pub fn merge_streams<'a, S>( + streams: impl IntoIterator, +) -> impl Stream::Item> + 'a +where + S: Stream + Unpin + 'a, + ::Item: 'static, +{ + let mut futures = futures_unordered::FuturesUnordered::new(); + for stream in streams { + futures.push(stream_next(stream)); + } + async_stream::stream! { + while let Some((stream, res)) = futures.next().await { + if let Some(item) = res { + futures.push(stream_next(stream)); + yield item; + } + } + } +} From d74ecbf26719a3ddc7d441e6ecc7c4972e2d6f14 Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Thu, 2 Jan 2025 00:24:30 +0100 Subject: [PATCH 05/12] Concurrent count and cancel --- load-balancer/src/service/tasks.rs | 98 +++++++++++++++++++----------- 1 file changed, 61 insertions(+), 37 deletions(-) diff --git a/load-balancer/src/service/tasks.rs b/load-balancer/src/service/tasks.rs index b74e6f3..489cefe 100644 --- a/load-balancer/src/service/tasks.rs +++ b/load-balancer/src/service/tasks.rs @@ -1,10 +1,11 @@ use std::{collections::HashMap, sync::Arc}; use armonik::{ - reexports::{tokio_util, tonic}, + reexports::{tokio_stream::StreamExt, tokio_util, tonic}, server::TasksService, tasks, }; +use futures::stream::FuturesUnordered; use crate::utils::{run_with_cancellation, IntoStatus}; @@ -167,20 +168,27 @@ impl TasksService for Service { run_with_cancellation! { use cancellation_token; + let mut futures = self + .clusters + .values() + .map(|cluster| async { + Result::<_, tonic::Status>::Ok( + cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .tasks() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)? + .tasks, + ) + }) + .collect::>(); + let mut tasks = Vec::new(); - for cluster in self.clusters.values() { - tasks.extend( - cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .tasks() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)? - .tasks - .into_iter(), - ); + while let Some(chunk) = futures.try_next().await? { + tasks.extend(chunk.into_iter()) } Ok(tasks::cancel::Response { tasks }) @@ -195,18 +203,27 @@ impl TasksService for Service { run_with_cancellation! { use cancellation_token; + let mut futures = self + .clusters + .values() + .map(|cluster| async { + Result::<_, tonic::Status>::Ok( + cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .tasks() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)? + .task_results, + ) + }) + .collect::>(); + let mut task_results = HashMap::>::new(); - for cluster in self.clusters.values() { - let response = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .tasks() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)?; - - for (task_id, result_ids) in response.task_results { + while let Some(response) = futures.try_next().await? { + for (task_id, result_ids) in response { task_results.entry(task_id).or_default().extend(result_ids); } } @@ -223,19 +240,26 @@ impl TasksService for Service { run_with_cancellation! { use cancellation_token; - let mut status = HashMap::::new(); - - for cluster in self.clusters.values() { - let response = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .tasks() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)? - .status; + let mut futures = self + .clusters + .values() + .map(|cluster| async { + Result::<_, tonic::Status>::Ok( + cluster + .client() + .await + .map_err(IntoStatus::into_status)? + .tasks() + .call(request.clone()) + .await + .map_err(IntoStatus::into_status)? + .status, + ) + }) + .collect::>(); + let mut status = HashMap::::new(); + while let Some(response) = futures.try_next().await? { for count in response { *status.entry(count.status).or_default() += count.count; } From 62fd5dfc261e45e750817e1c35d1d2d6dfcfa82e Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Thu, 2 Jan 2025 10:20:38 +0100 Subject: [PATCH 06/12] Connection pool --- Cargo.lock | 1 + load-balancer/Cargo.toml | 1 + load-balancer/src/cluster.rs | 67 +++++++++++++++++++++++----- load-balancer/src/main.rs | 5 ++- load-balancer/src/service/events.rs | 10 +++-- load-balancer/src/service/results.rs | 20 +++------ load-balancer/src/service/tasks.rs | 40 +++++++---------- load-balancer/src/utils.rs | 6 +-- 8 files changed, 94 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ac1c8c9..bd2e2af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1516,6 +1516,7 @@ dependencies = [ "tokio", "tokio-rusqlite", "tower-http", + "tracing", ] [[package]] diff --git a/load-balancer/Cargo.toml b/load-balancer/Cargo.toml index 8d611e0..ead837f 100644 --- a/load-balancer/Cargo.toml +++ b/load-balancer/Cargo.toml @@ -19,3 +19,4 @@ rusqlite = { version = "0.32", features = ["bundled"] } tokio-rusqlite = { version = "0.6", features = ["bundled"] } lockfree-object-pool = "0.1.6" serde_json = "1.0" +tracing = { version = "0.1", features = ["log"] } diff --git a/load-balancer/src/cluster.rs b/load-balancer/src/cluster.rs index ae3fd8a..3c2a234 100644 --- a/load-balancer/src/cluster.rs +++ b/load-balancer/src/cluster.rs @@ -1,11 +1,27 @@ -use std::{hash::Hash, ops::Deref}; +use std::{hash::Hash, ops::Deref, sync::Arc}; use armonik::reexports::{tokio_stream, tonic}; +use lockfree_object_pool::LinearReusable; -#[derive(Debug, Default, Clone)] +use crate::{ + async_pool::{AsyncPool, PoolAwaitable}, + ref_guard::RefGuard, +}; + +#[derive(Clone)] pub struct Cluster { pub name: String, pub endpoint: armonik::ClientConfig, + pub pool: Arc>>, +} + +impl std::fmt::Debug for Cluster { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Cluster") + .field("name", &self.name) + .field("endpoint", &self.endpoint) + .finish() + } } impl PartialEq for Cluster { @@ -35,19 +51,49 @@ impl Cluster { Self { name, endpoint: config, + pool: Arc::new(AsyncPool::new(|| async { None })), } } pub async fn client(&self) -> Result { - Ok(ClusterClient( - armonik::Client::with_config(self.endpoint.clone()).await?, - )) + let client = self + .pool + .pull() + .await + .map_async(|reference| async move { + match reference { + Some(x) => Ok(x), + None => { + log::debug!( + "Creating new client for cluster {}: {:?}", + self.name, + self.endpoint + ); + + let endpoint = self.endpoint.clone(); + + // Somehow, armonik::Client::with_config() is not Send + // So we starting a blocking executor that blocks on the future to ensure it stays on the same thread + tokio::task::spawn_blocking(move || { + futures::executor::block_on(armonik::Client::with_config(endpoint)) + }) + .await + .unwrap() + .map(|client| reference.insert(client)) + } + } + }) + .await + .into_result()?; + Ok(ClusterClient(client)) } } -pub struct ClusterClient(armonik::Client); +pub struct ClusterClient<'a>( + RefGuard>>, &'a mut armonik::Client>, +); -impl Deref for ClusterClient { +impl Deref for ClusterClient<'_> { type Target = armonik::Client; fn deref(&self) -> &Self::Target { @@ -55,13 +101,13 @@ impl Deref for ClusterClient { } } -impl AsRef for ClusterClient { +impl AsRef for ClusterClient<'_> { fn as_ref(&self) -> &armonik::Client { &self.0 } } -impl ClusterClient { +impl ClusterClient<'_> { pub async fn get_all_sessions( &self, filters: armonik::sessions::filter::Or, @@ -84,8 +130,7 @@ impl ClusterClient { page_index, page_size, ) - .await - .map_err(crate::utils::IntoStatus::into_status)?; + .await.unwrap(); if page.sessions.is_empty() { break; diff --git a/load-balancer/src/main.rs b/load-balancer/src/main.rs index 45c907c..3b96377 100644 --- a/load-balancer/src/main.rs +++ b/load-balancer/src/main.rs @@ -140,7 +140,10 @@ async fn wait_terminate() { #[tokio::main] async fn main() -> Result<(), eyre::Report> { - env_logger::init(); + env_logger::builder() + .filter_module("tracing", log::LevelFilter::Info) + .parse_default_env() + .init(); let cli = Cli::parse(); diff --git a/load-balancer/src/service/events.rs b/load-balancer/src/service/events.rs index afca033..c78462a 100644 --- a/load-balancer/src/service/events.rs +++ b/load-balancer/src/service/events.rs @@ -31,16 +31,18 @@ impl EventsService for Service { returned_events, } = request; - let client = self + let mut client = self .get_cluster_from_session(&session_id) .await? - .ok_or_else(|| tonic::Status::not_found(format!("Session {} was not found", session_id)))? + .ok_or_else(|| { + tonic::Status::not_found(format!("Session {} was not found", session_id)) + })? .client() .await - .map_err(IntoStatus::into_status)?; + .map_err(IntoStatus::into_status)? + .events(); let stream = client - .events() .subscribe(session_id, task_filters, result_filters, returned_events) .await .map_err(IntoStatus::into_status)?; diff --git a/load-balancer/src/service/results.rs b/load-balancer/src/service/results.rs index 0bc3156..b4cf748 100644 --- a/load-balancer/src/service/results.rs +++ b/load-balancer/src/service/results.rs @@ -63,8 +63,7 @@ impl ResultsService for Service { self.get_cluster_from_results(&requested_results) ); - let (mut sessions, mut results) = - (sessions?.into_iter(), results?.into_iter()); + let (mut sessions, mut results) = (sessions?.into_iter(), results?.into_iter()); let cluster = match (sessions.next(), results.next()) { (None, None) => { @@ -95,20 +94,15 @@ impl ResultsService for Service { } } - match cluster + let mut client = cluster .client() .await .map_err(IntoStatus::into_status)? - .results() - .call(request) - .await - { - Ok(response) => Ok(response), - Err(err) => match err { - armonik::client::RequestError::Grpc { source, .. } => Err(*source), - err => Err(tonic::Status::internal(err.to_string())), - }, - } + .results(); + client.call(request).await.map_err(|err| match err { + armonik::client::RequestError::Grpc { source, .. } => *source, + err => tonic::Status::internal(err.to_string()), + }) } } diff --git a/load-balancer/src/service/tasks.rs b/load-balancer/src/service/tasks.rs index 489cefe..df3daa9 100644 --- a/load-balancer/src/service/tasks.rs +++ b/load-balancer/src/service/tasks.rs @@ -27,7 +27,8 @@ impl TasksService for Service { for field in and { if let armonik::tasks::filter::Field { - field: armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), + field: + armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), condition: armonik::tasks::filter::Condition::String(armonik::FilterString { value, @@ -65,20 +66,15 @@ impl TasksService for Service { )); } - match cluster + let mut client = cluster .client() .await .map_err(IntoStatus::into_status)? - .tasks() - .call(request) - .await - { - Ok(response) => Ok(response), - Err(err) => match err { - armonik::client::RequestError::Grpc { source, .. } => Err(*source), - err => Err(tonic::Status::internal(err.to_string())), - }, - } + .tasks(); + client.call(request).await.map_err(|err| match err { + armonik::client::RequestError::Grpc { source, .. } => *source, + err => tonic::Status::internal(err.to_string()), + }) } } @@ -97,7 +93,8 @@ impl TasksService for Service { for field in and { if let armonik::tasks::filter::Field { - field: armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), + field: + armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), condition: armonik::tasks::filter::Condition::String(armonik::FilterString { value, @@ -135,20 +132,15 @@ impl TasksService for Service { )); } - match cluster + let mut client = cluster .client() .await .map_err(IntoStatus::into_status)? - .tasks() - .call(request) - .await - { - Ok(response) => Ok(response), - Err(err) => match err { - armonik::client::RequestError::Grpc { source, .. } => Err(*source), - err => Err(tonic::Status::internal(err.to_string())), - }, - } + .tasks(); + client.call(request).await.map_err(|err| match err { + armonik::client::RequestError::Grpc { source, .. } => *source, + err => tonic::Status::internal(err.to_string()), + }) } } diff --git a/load-balancer/src/utils.rs b/load-balancer/src/utils.rs index 05aa5d8..c68b5bf 100644 --- a/load-balancer/src/utils.rs +++ b/load-balancer/src/utils.rs @@ -38,12 +38,12 @@ macro_rules! impl_unary { ))); }; - cluster + let mut client = cluster .client() .await .map_err(crate::utils::IntoStatus::into_status)? - .$service() - .call($request) + .$service(); + client.call($request) .await .map_err(crate::utils::IntoStatus::into_status) } From 4ba6087bd8814b1ac9f4de37a5c8e0cb9f2f2037 Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Thu, 2 Jan 2025 14:10:29 +0100 Subject: [PATCH 07/12] Add Dockerfile for load balancer --- .github/workflows/build.yml | 22 +++++++++++++++------ .github/workflows/release.yml | 1 + load-balancer/Dockerfile | 33 ++++++++++++++++++++++++++++++++ load-balancer/src/main.rs | 1 + load-balancer/src/service/mod.rs | 12 ++++++------ pdc-update/Dockerfile | 3 +-- 6 files changed, 58 insertions(+), 14 deletions(-) create mode 100644 load-balancer/Dockerfile diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 49b89f0..f66ce60 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -42,6 +42,7 @@ jobs: fail-fast: false matrix: image: + - { path: load-balancer, name: armonik_load_balancer, platforms: "linux/amd64,linux/arm64" } - { path: pdc-update, name: armonik_pdc_update, platforms: "linux/amd64,linux/arm64" } steps: - name: Checkout @@ -58,6 +59,11 @@ jobs: username: ${{ secrets.DOCKER_HUB_LOGIN }} password: ${{ secrets.DOCKER_HUB_TOKEN }} + - name: Cargo Lock + working-directory: "${{ matrix.image.path }}" + run: | + [ -e Cargo.lock ] || cp ../Cargo.lock . + - name: Build and push uses: docker/build-push-action@v4 with: @@ -74,6 +80,7 @@ jobs: matrix: project: - pdc-update + - load-balancer target: # Linux Musl - { platform: aarch64-unknown-linux-musl, os: linux, runner: ubuntu-latest, run: true } @@ -88,6 +95,9 @@ jobs: toolchain: - stable - nightly + exclude: + - project: load-balancer + target: { platform: i686-pc-windows-msvc, os: windows, runner: windows-latest, run: true } runs-on: ${{ matrix.target.runner }} steps: - name: Checkout @@ -121,26 +131,26 @@ jobs: - name: Build working-directory: "${{ matrix.project }}" run: | - cargo build --all --locked + cargo build --locked - name: Test if: matrix.target.run working-directory: "${{ matrix.project }}" run: | - cargo test --all --locked + cargo test --locked - name: Test Release if: matrix.target.run working-directory: "${{ matrix.project }}" run: | - cargo test --all --locked --release + cargo test --locked --release - name: Test Miri if: contains(matrix.toolchain, 'nightly') working-directory: "${{ matrix.project }}" run: | - cargo miri test --all --locked + cargo miri test --locked - name: Format working-directory: "${{ matrix.project }}" run: | - cargo fmt --all --check + cargo fmt --check - name: Doc if: matrix.target.os == 'linux' working-directory: "${{ matrix.project }}" @@ -162,4 +172,4 @@ jobs: - name: Clippy working-directory: "${{ matrix.project }}" run: | - cargo clippy --all --no-deps -- -Dwarnings -Dunused-crate-dependencies + cargo clippy --no-deps -- -Dwarnings -Dunused-crate-dependencies diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 34c36ad..e6b071a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -88,6 +88,7 @@ jobs: fail-fast: false matrix: image: + - { path: load-balancer, name: armonik_load_balancer, platforms: "linux/amd64,linux/arm64" } - { path: pdc-update, name: armonik_pdc_update, platforms: "linux/amd64,linux/arm64" } steps: - name: Checkout diff --git a/load-balancer/Dockerfile b/load-balancer/Dockerfile new file mode 100644 index 0000000..9ab929a --- /dev/null +++ b/load-balancer/Dockerfile @@ -0,0 +1,33 @@ +FROM --platform=$BUILDPLATFORM rust as build + +RUN apt-get update && apt-get install -y musl-tools musl-dev protobuf-compiler ca-certificates && update-ca-certificates +RUN case "$TARGETARCH" in \ + amd64) RUST_PLATFORM=x86_64-unknown-linux-musl ;; \ + arm64) RUST_PLATFORM=aarch64-unknown-linux-musl ;; \ + "") case "$(uname -m)" in \ + x86_64) RUST_PLATFORM=x86_64-unknown-linux-musl ;; \ + aarch64) RUST_PLATFORM=aarch64-unknown-linux-musl ;; \ + *) exit 1 ;; \ + esac ;; \ + *) exit 1 ;; \ + esac ; \ + rustup target add "$RUST_PLATFORM" ; \ + mkdir .cargo ; \ + printf '[build]\ntarget="%s"\n' "$RUST_PLATFORM" > .cargo/config.toml + +WORKDIR /app + +RUN cargo init +COPY Cargo.* . + +RUN cargo build --release + +COPY src src +RUN touch src/main.rs && cargo build --release && mv target/*/release/load-balancer . + +FROM scratch +COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=build /app/load-balancer /load-balancer +USER 65534:65534 +ENTRYPOINT ["/load-balancer"] +CMD [] diff --git a/load-balancer/src/main.rs b/load-balancer/src/main.rs index 3b96377..3b25bbb 100644 --- a/load-balancer/src/main.rs +++ b/load-balancer/src/main.rs @@ -4,6 +4,7 @@ use armonik::reexports::tonic; use clap::Parser; use serde::{Deserialize, Serialize}; use tower_http::trace::TraceLayer; +use tracing as _; pub mod async_pool; pub mod cluster; diff --git a/load-balancer/src/service/mod.rs b/load-balancer/src/service/mod.rs index be46af4..f275b63 100644 --- a/load-balancer/src/service/mod.rs +++ b/load-balancer/src/service/mod.rs @@ -154,8 +154,8 @@ impl Service { .map_err(IntoStatus::into_status) } - pub async fn get_cluster_from_sessions<'a>( - &'a self, + pub async fn get_cluster_from_sessions( + &self, session_ids: &[&str], ) -> Result, Vec>, Status> { let mut missing_ids: HashSet<_> = session_ids.iter().copied().map(String::from).collect(); @@ -275,8 +275,8 @@ impl Service { Ok(sessions.into_keys().next()) } - pub async fn get_cluster_from_results<'a>( - &'a self, + pub async fn get_cluster_from_results( + &self, result_ids: &[&str], ) -> Result, Vec>, Status> { let mut missing_ids = HashSet::new(); @@ -385,8 +385,8 @@ impl Service { Ok(results.into_keys().next()) } - pub async fn get_cluster_from_tasks<'a>( - &'a self, + pub async fn get_cluster_from_tasks( + &self, task_ids: &[&str], ) -> Result, Vec>, Status> { let mut missing_ids = HashSet::new(); diff --git a/pdc-update/Dockerfile b/pdc-update/Dockerfile index c8786ac..a16aebf 100644 --- a/pdc-update/Dockerfile +++ b/pdc-update/Dockerfile @@ -19,8 +19,7 @@ WORKDIR /app RUN cargo init -COPY Cargo.toml . -COPY Cargo.lock . +COPY Cargo.* . RUN cargo build --release From 2a1e4aa7acc5c25a53286bd3a9d5e774d37257b5 Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Fri, 3 Jan 2025 18:39:08 +0100 Subject: [PATCH 08/12] Proper channel pool --- Cargo.lock | 143 +++++++- Cargo.toml | 4 + load-balancer/Cargo.toml | 11 +- load-balancer/src/async_pool.rs | 25 +- load-balancer/src/cluster.rs | 157 +++++---- load-balancer/src/main.rs | 49 +-- load-balancer/src/service/applications.rs | 130 ++++--- load-balancer/src/service/auth.rs | 55 ++- load-balancer/src/service/events.rs | 59 ++-- load-balancer/src/service/health_check.rs | 73 ++-- load-balancer/src/service/mod.rs | 43 ++- load-balancer/src/service/partitions.rs | 165 +++++---- load-balancer/src/service/results.rs | 252 ++++++------- load-balancer/src/service/sessions.rs | 80 +++-- load-balancer/src/service/submitter.rs | 412 ++++++++++------------ load-balancer/src/service/tasks.rs | 371 +++++++++---------- load-balancer/src/service/versions.rs | 55 ++- load-balancer/src/utils.rs | 40 +-- 18 files changed, 1105 insertions(+), 1019 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bd2e2af..ce5135b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -102,9 +102,8 @@ checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "armonik" -version = "3.21.0-beta-3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a932e18538d27280291cd13a265be9f6a234abf290dd1349ed6ccfe67f1cce1d" +version = "3.22.0-beta-0" +source = "git+https://github.com/aneoconsulting/ArmoniK.Api.git?rev=refs/pull/569/head#4ffb0b0b78b41192a2f9b05c3aa28bbd5e14859e" dependencies = [ "futures", "hyper", @@ -114,10 +113,10 @@ dependencies = [ "rustls", "snafu", "tokio", - "tokio-util", "tonic", "tonic-build", "tracing", + "tracing-futures", ] [[package]] @@ -1517,6 +1516,7 @@ dependencies = [ "tokio-rusqlite", "tower-http", "tracing", + "tracing-subscriber", ] [[package]] @@ -1541,6 +1541,15 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "matchit" version = "0.7.3" @@ -1601,6 +1610,16 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -1650,6 +1669,12 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "parking" version = "2.2.1" @@ -2006,8 +2031,17 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", ] [[package]] @@ -2018,9 +2052,15 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.8.5", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.5" @@ -2357,6 +2397,15 @@ dependencies = [ "digest", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.3.0" @@ -2543,6 +2592,16 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + [[package]] name = "tiny-keccak" version = "2.0.2" @@ -2825,6 +2884,48 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "futures", + "futures-task", + "pin-project", + "tracing", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", ] [[package]] @@ -2898,6 +2999,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vcpkg" version = "0.2.15" @@ -3034,6 +3141,28 @@ dependencies = [ "rustix", ] +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-registry" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index e67a557..6d3962b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,7 @@ [workspace] members = ["pdc-update", "load-balancer"] + +[profile.release-with-debug] +inherits = "release" +debug = true diff --git a/load-balancer/Cargo.toml b/load-balancer/Cargo.toml index ead837f..9489587 100644 --- a/load-balancer/Cargo.toml +++ b/load-balancer/Cargo.toml @@ -4,7 +4,11 @@ version = "0.1.0" edition = "2021" [dependencies] -armonik = { version = "3.21.0-beta-3", features = ["client", "server"] } +#armonik = { version = "3.21.0-beta-3", features = ["client", "server"] } +armonik = { git = "https://github.com/aneoconsulting/ArmoniK.Api.git", rev = "refs/pull/569/head", features = [ + "client", + "server", +] } async-stream = "0.3" futures = "0.3" config = "0.14" @@ -20,3 +24,8 @@ tokio-rusqlite = { version = "0.6", features = ["bundled"] } lockfree-object-pool = "0.1.6" serde_json = "1.0" tracing = { version = "0.1", features = ["log"] } +tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] } + +[profile.release-with-debug] +inherits = "release" +debug = true diff --git a/load-balancer/src/async_pool.rs b/load-balancer/src/async_pool.rs index 8c0a627..5355d04 100644 --- a/load-balancer/src/async_pool.rs +++ b/load-balancer/src/async_pool.rs @@ -90,29 +90,36 @@ impl AsyncPool { } impl AsyncPool { - pub async fn execute_batch(&self, sql: &str) -> Result<(), rusqlite::Error> { + pub async fn execute_batch( + &self, + sql: &str, + span: tracing::Span, + ) -> Result<(), rusqlite::Error> { let sql = sql.to_owned(); - self.pull() - .await - .call_unwrap(move |conn| conn.execute_batch(&sql)) - .await + self.call(span, move |conn| conn.execute_batch(&sql)).await } pub async fn execute( &self, sql: &str, params: impl Params + Send + 'static, + span: tracing::Span, ) -> Result { let sql = sql.to_owned(); - self.pull() - .await - .call_unwrap(move |conn| conn.execute(&sql, params)) + self.call(span, move |conn| conn.execute(&sql, params)) .await } pub async fn call( &self, + span: tracing::Span, f: impl FnOnce(&mut rusqlite::Connection) -> Out + Send + 'static, ) -> Out { - self.pull().await.call_unwrap(f).await + self.pull() + .await + .call_unwrap(|conn| { + let _entered = span.entered(); + f(conn) + }) + .await } } diff --git a/load-balancer/src/cluster.rs b/load-balancer/src/cluster.rs index 3c2a234..f6e9091 100644 --- a/load-balancer/src/cluster.rs +++ b/load-balancer/src/cluster.rs @@ -1,4 +1,8 @@ -use std::{hash::Hash, ops::Deref, sync::Arc}; +use std::{ + hash::Hash, + ops::{Deref, DerefMut}, + sync::Arc, +}; use armonik::reexports::{tokio_stream, tonic}; use lockfree_object_pool::LinearReusable; @@ -56,6 +60,7 @@ impl Cluster { } pub async fn client(&self) -> Result { + let span = tracing::debug_span!("Cluster", name = self.name); let client = self .pool .pull() @@ -64,7 +69,7 @@ impl Cluster { match reference { Some(x) => Ok(x), None => { - log::debug!( + tracing::debug!( "Creating new client for cluster {}: {:?}", self.name, self.endpoint @@ -85,14 +90,17 @@ impl Cluster { }) .await .into_result()?; - Ok(ClusterClient(client)) + Ok(ClusterClient(client, span)) } } pub struct ClusterClient<'a>( RefGuard>>, &'a mut armonik::Client>, + tracing::Span, ); +unsafe impl Send for ClusterClient<'_> {} + impl Deref for ClusterClient<'_> { type Target = armonik::Client; @@ -100,6 +108,11 @@ impl Deref for ClusterClient<'_> { &self.0 } } +impl DerefMut for ClusterClient<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} impl AsRef for ClusterClient<'_> { fn as_ref(&self) -> &armonik::Client { @@ -108,108 +121,120 @@ impl AsRef for ClusterClient<'_> { } impl ClusterClient<'_> { + pub fn span(&self) -> tracing::Span { + self.1.clone() + } pub async fn get_all_sessions( - &self, + &mut self, filters: armonik::sessions::filter::Or, sort: armonik::sessions::Sort, ) -> Result< - impl tokio_stream::Stream, tonic::Status>>, + impl tokio_stream::Stream, tonic::Status>> + '_, tonic::Status, > { let mut client = self.sessions(); let page_size = 1000; let mut page_index = 0; - Ok(async_stream::try_stream! { - loop { - let page = client - .list( - filters.clone(), - sort.clone(), - true, - page_index, - page_size, - ) - .await.unwrap(); - - if page.sessions.is_empty() { - break; - } + Ok(armonik::reexports::tracing_futures::Instrument::instrument( + async_stream::try_stream! { + loop { + let page = client + .list( + filters.clone(), + sort.clone(), + true, + page_index, + page_size, + ) + .await.unwrap(); + + if page.sessions.is_empty() { + break; + } - page_index += 1; + page_index += 1; - yield page.sessions; - } - }) + yield page.sessions; + } + }, + tracing::trace_span!("get_all_sessions"), + )) } pub async fn get_all_partitions( - &self, + &mut self, filters: armonik::partitions::filter::Or, sort: armonik::partitions::Sort, ) -> Result< - impl tokio_stream::Stream, tonic::Status>>, + impl tokio_stream::Stream, tonic::Status>> + '_, tonic::Status, > { let mut client = self.partitions(); let page_size = 1000; let mut page_index = 0; - Ok(async_stream::try_stream! { - loop { - let page = client - .list( - filters.clone(), - sort.clone(), - page_index, - page_size, - ) - .await - .map_err(crate::utils::IntoStatus::into_status)?; - - if page.partitions.is_empty() { - break; - } + Ok(armonik::reexports::tracing_futures::Instrument::instrument( + async_stream::try_stream! { + loop { + let page = client + .list( + filters.clone(), + sort.clone(), + page_index, + page_size, + ) + .await + .map_err(crate::utils::IntoStatus::into_status)?; + + if page.partitions.is_empty() { + break; + } - page_index += 1; + page_index += 1; - yield page.partitions; - } - }) + yield page.partitions; + } + }, + tracing::trace_span!("get_all_partitions"), + )) } pub async fn get_all_applications( - &self, + &mut self, filters: armonik::applications::filter::Or, sort: armonik::applications::Sort, ) -> Result< - impl tokio_stream::Stream, tonic::Status>>, + impl tokio_stream::Stream, tonic::Status>> + '_, tonic::Status, > { let mut client = self.applications(); let page_size = 1000; let mut page_index = 0; - Ok(async_stream::try_stream! { - loop { - let page = client - .list( - filters.clone(), - sort.clone(), - page_index, - page_size, - ) - .await - .map_err(crate::utils::IntoStatus::into_status)?; - - if page.applications.is_empty() { - break; - } + Ok(armonik::reexports::tracing_futures::Instrument::instrument( + async_stream::try_stream! { + loop { + let page = client + .list( + filters.clone(), + sort.clone(), + page_index, + page_size, + ) + .await + .map_err(crate::utils::IntoStatus::into_status)?; + + if page.applications.is_empty() { + break; + } - page_index += 1; + page_index += 1; - yield page.applications; - } - }) + yield page.applications; + } + }, + tracing::trace_span!("get_all_applications"), + )) } } diff --git a/load-balancer/src/main.rs b/load-balancer/src/main.rs index 3b25bbb..5d1c10c 100644 --- a/load-balancer/src/main.rs +++ b/load-balancer/src/main.rs @@ -3,8 +3,8 @@ use std::{collections::HashMap, net::SocketAddr, sync::Arc}; use armonik::reexports::tonic; use clap::Parser; use serde::{Deserialize, Serialize}; -use tower_http::trace::TraceLayer; use tracing as _; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; pub mod async_pool; pub mod cluster; @@ -44,14 +44,14 @@ impl From for armonik::client::ClientConfigArgs { override_target_name, }: ClusterConfig, ) -> Self { - armonik::client::ClientConfigArgs { - endpoint, - cert_pem, - key_pem, - ca_cert, - allow_unsafe_connection, - override_target_name, - } + let mut args = armonik::client::ClientConfigArgs::default(); + args.endpoint = endpoint; + args.cert_pem = cert_pem; + args.key_pem = key_pem; + args.ca_cert = ca_cert; + args.allow_unsafe_connection = allow_unsafe_connection; + args.override_target_name = override_target_name; + args } } @@ -84,7 +84,7 @@ async fn wait_terminate() { for sig in [SignalKind::terminate(), SignalKind::interrupt()] { match signal(sig) { Ok(sig) => signals.push(sig), - Err(err) => log::error!("Could not register signal handler: {err}"), + Err(err) => tracing::error!("Could not register signal handler: {err}"), } } @@ -117,7 +117,7 @@ macro_rules! win_signal { return; } } - Err(err) => log::error!( + Err(err) => tracing::error!( "Could not register signal handler for {}: {err}", stringify!($sig), ), @@ -141,9 +141,12 @@ async fn wait_terminate() { #[tokio::main] async fn main() -> Result<(), eyre::Report> { - env_logger::builder() - .filter_module("tracing", log::LevelFilter::Info) - .parse_default_env() + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().with_span_events( + tracing_subscriber::fmt::format::FmtSpan::NEW + | tracing_subscriber::fmt::format::FmtSpan::CLOSE, + )) + .with(tracing_subscriber::EnvFilter::from_default_env()) .init(); let cli = Cli::parse(); @@ -180,7 +183,9 @@ async fn main() -> Result<(), eyre::Report> { let refresh_delay = std::time::Duration::from_secs_f64(conf.refresh_delay.parse()?); let router = tonic::transport::Server::builder() - .layer(TraceLayer::new_for_grpc()) + .trace_fn(|r| tracing::info_span!("gRPC", "path" = r.uri().path())) + .concurrency_limit_per_connection(1024) + .http2_max_pending_accept_reset_streams(Some(2048)) .add_service( armonik::api::v3::applications::applications_server::ApplicationsServer::from_arc( service.clone(), @@ -230,7 +235,7 @@ async fn main() -> Result<(), eyre::Report> { loop { timer.tick().await; if let Err(err) = service.update_sessions().await { - log::error!("Error while fetching sessions from clusters:\n{err:?}"); + tracing::error!("Error while fetching sessions from clusters:\n{err:?}"); } } } @@ -239,27 +244,27 @@ async fn main() -> Result<(), eyre::Report> { let mut service_future = tokio::spawn(router.serve(SocketAddr::new(conf.listen_ip.parse()?, conf.listen_port))); - log::info!("Application running"); + tracing::info!("Application running"); tokio::select! { output = &mut background_future => { if let Err(err) = output { - log::error!("Background future had an error: {err:?}"); + tracing::error!("Background future had an error: {err:?}"); } } output = &mut service_future => { match output { Ok(Ok(())) => (), Ok(Err(err)) => { - log::error!("Service had an error: {err:?}"); + tracing::error!("Service had an error: {err:?}"); } Err(err) => { - log::error!("Service future had an error: {err:?}"); + tracing::error!("Service future had an error: {err:?}"); } } } _ = wait_terminate() => { - log::info!("Application stopping"); + tracing::info!("Application stopping"); } } @@ -269,7 +274,7 @@ async fn main() -> Result<(), eyre::Report> { _ = background_future.await; _ = service_future.await; - log::info!("Application stopped"); + tracing::info!("Application stopped"); Ok(()) } diff --git a/load-balancer/src/service/applications.rs b/load-balancer/src/service/applications.rs index ef41400..2d1dd81 100644 --- a/load-balancer/src/service/applications.rs +++ b/load-balancer/src/service/applications.rs @@ -2,11 +2,11 @@ use std::sync::Arc; use armonik::{ applications, - reexports::{tokio_stream::StreamExt, tokio_util, tonic}, + reexports::{tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, server::ApplicationsService, }; -use crate::utils::{merge_streams, run_with_cancellation, IntoStatus}; +use crate::utils::{merge_streams, IntoStatus}; use super::Service; @@ -14,7 +14,6 @@ impl ApplicationsService for Service { async fn list( self: Arc, request: applications::list::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { let Ok(page) = usize::try_from(request.page) else { return Err(tonic::Status::invalid_argument("Page should be positive")); @@ -27,77 +26,76 @@ impl ApplicationsService for Service { let mut applications = Vec::new(); - run_with_cancellation! { - use cancellation_token; + let streams = self.clusters.values().map(|cluster| { + let request = request.clone(); + Box::pin(async_stream::stream! { + let mut client = cluster + .client() + .await + .map_err(IntoStatus::into_status)?; + let span = client.span(); + let stream = client + .get_all_applications(request.filters.clone(), request.sort.clone()) + .instrument(span) + .await?; + let mut stream = std::pin::pin!(stream); - let streams = self.clusters.values().map(|cluster| { - let request = request.clone(); - Box::pin(async_stream::stream! { - let stream = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .get_all_applications(request.filters.clone(), request.sort.clone()) - .await?; - let mut stream = std::pin::pin!(stream); - - while let Some(item) = stream.next().await { - yield item; - } - }) - }); - let mut streams = std::pin::pin!(merge_streams(streams)); + while let Some(item) = stream.next().await { + yield item; + } + }) + }); + let mut streams = std::pin::pin!(merge_streams(streams)); - while let Some(chunk) = streams.try_next().await? { - applications.extend(chunk); - } + while let Some(chunk) = streams.try_next().await? { + applications.extend(chunk); + } - if !request.sort.fields.is_empty() { - applications.sort_by(|a, b| { - for field in &request.sort.fields { - let ordering = match field { - applications::Field::Unspecified => a.name.cmp(&b.name), - applications::Field::Name => a.name.cmp(&b.name), - applications::Field::Version => a.version.cmp(&b.version), - applications::Field::Namespace => a.namespace.cmp(&b.namespace), - applications::Field::Service => a.service.cmp(&b.service), - }; + if !request.sort.fields.is_empty() { + applications.sort_by(|a, b| { + for field in &request.sort.fields { + let ordering = match field { + applications::Field::Unspecified => a.name.cmp(&b.name), + applications::Field::Name => a.name.cmp(&b.name), + applications::Field::Version => a.version.cmp(&b.version), + applications::Field::Namespace => a.namespace.cmp(&b.namespace), + applications::Field::Service => a.service.cmp(&b.service), + }; - match (ordering, &request.sort.direction) { - ( - std::cmp::Ordering::Less, - armonik::SortDirection::Unspecified | armonik::SortDirection::Asc, - ) => return std::cmp::Ordering::Less, - (std::cmp::Ordering::Less, armonik::SortDirection::Desc) => { - return std::cmp::Ordering::Greater - } - (std::cmp::Ordering::Equal, _) => (), - ( - std::cmp::Ordering::Greater, - armonik::SortDirection::Unspecified | armonik::SortDirection::Asc, - ) => return std::cmp::Ordering::Greater, - (std::cmp::Ordering::Greater, armonik::SortDirection::Desc) => { - return std::cmp::Ordering::Less - } + match (ordering, &request.sort.direction) { + ( + std::cmp::Ordering::Less, + armonik::SortDirection::Unspecified | armonik::SortDirection::Asc, + ) => return std::cmp::Ordering::Less, + (std::cmp::Ordering::Less, armonik::SortDirection::Desc) => { + return std::cmp::Ordering::Greater + } + (std::cmp::Ordering::Equal, _) => (), + ( + std::cmp::Ordering::Greater, + armonik::SortDirection::Unspecified | armonik::SortDirection::Asc, + ) => return std::cmp::Ordering::Greater, + (std::cmp::Ordering::Greater, armonik::SortDirection::Desc) => { + return std::cmp::Ordering::Less } } + } - std::cmp::Ordering::Equal - }); - } + std::cmp::Ordering::Equal + }); + } - let total = applications.len() as i32; + let total = applications.len() as i32; - Ok(armonik::applications::list::Response { - applications: applications - .into_iter() - .skip(page * page_size) - .take(page_size) - .collect(), - page: request.page, - page_size: request.page_size, - total, - }) - } + Ok(armonik::applications::list::Response { + applications: applications + .into_iter() + .skip(page * page_size) + .take(page_size) + .collect(), + page: request.page, + page_size: request.page_size, + total, + }) } } diff --git a/load-balancer/src/service/auth.rs b/load-balancer/src/service/auth.rs index 448df2b..b206b1b 100644 --- a/load-balancer/src/service/auth.rs +++ b/load-balancer/src/service/auth.rs @@ -2,11 +2,11 @@ use std::sync::Arc; use armonik::{ auth, - reexports::{tokio_util, tonic}, + reexports::{tonic, tracing_futures::Instrument}, server::AuthService, }; -use crate::utils::{run_with_cancellation, IntoStatus}; +use crate::utils::IntoStatus; use super::Service; @@ -14,39 +14,34 @@ impl AuthService for Service { async fn current_user( self: Arc, _request: auth::current_user::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut users = Vec::new(); - - for cluster in self.clusters.values() { - let user = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .auth() - .current_user() - .await - .map_err(IntoStatus::into_status)?; - - users.push(user); - } + let mut users = Vec::new(); + + for cluster in self.clusters.values() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let user = client + .auth() + .current_user() + .instrument(span) + .await + .map_err(IntoStatus::into_status)?; + + users.push(user); + } - let mut users = users.into_iter(); + let mut users = users.into_iter(); - let Some(user) = users.next() else { - return Err(tonic::Status::internal("No cluster")); - }; + let Some(user) = users.next() else { + return Err(tonic::Status::internal("No cluster")); + }; - for other in users { - if user != other { - return Err(tonic::Status::internal("Mismatch between clusters")); - } + for other in users { + if user != other { + return Err(tonic::Status::internal("Mismatch between clusters")); } - - Ok(auth::current_user::Response { user }) } + + Ok(auth::current_user::Response { user }) } } diff --git a/load-balancer/src/service/events.rs b/load-balancer/src/service/events.rs index c78462a..8ec90b7 100644 --- a/load-balancer/src/service/events.rs +++ b/load-balancer/src/service/events.rs @@ -2,11 +2,11 @@ use std::sync::Arc; use armonik::{ events, - reexports::{tokio_stream::StreamExt, tokio_util, tonic}, + reexports::{tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, server::EventsService, }; -use crate::utils::{run_with_cancellation, IntoStatus}; +use crate::utils::IntoStatus; use super::Service; @@ -14,48 +14,49 @@ impl EventsService for Service { async fn subscribe( self: Arc, request: events::subscribe::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> Result< impl tonic::codegen::tokio_stream::Stream< Item = Result, > + Send, tonic::Status, > { - run_with_cancellation! { - use cancellation_token.clone(); - - let events::subscribe::Request { - session_id, - task_filters, - result_filters, - returned_events, - } = request; - - let mut client = self - .get_cluster_from_session(&session_id) - .await? - .ok_or_else(|| { - tonic::Status::not_found(format!("Session {} was not found", session_id)) - })? + let events::subscribe::Request { + session_id, + task_filters, + result_filters, + returned_events, + } = request; + + let cluster = self + .get_cluster_from_session(&session_id) + .await? + .ok_or_else(|| { + tonic::Status::not_found(format!("Session {} was not found", session_id)) + })?; + + let span = tracing::Span::current(); + let stream = async_stream::stream! { + let mut client = cluster .client() + .instrument(span) .await - .map_err(IntoStatus::into_status)? - .events(); + .map_err(IntoStatus::into_status)?; + let span = client.span(); let stream = client + .events() .subscribe(session_id, task_filters, result_filters, returned_events) + .instrument(span.clone()) .await .map_err(IntoStatus::into_status)?; - let stream = async_stream::stream! { - let mut stream = std::pin::pin!(stream); + let mut stream = std::pin::pin!(stream); - while let Some(Some(event)) = cancellation_token.run_until_cancelled(stream.next()).await { - yield event.map_err(IntoStatus::into_status); - } - }; + while let Some(event) = stream.next().await { + yield event.map_err(IntoStatus::into_status); + } + }; - Ok(stream) - } + Ok(stream) } } diff --git a/load-balancer/src/service/health_check.rs b/load-balancer/src/service/health_check.rs index 5d84db1..181851a 100644 --- a/load-balancer/src/service/health_check.rs +++ b/load-balancer/src/service/health_check.rs @@ -2,11 +2,11 @@ use std::{collections::HashMap, sync::Arc}; use armonik::{ health_checks, - reexports::{tokio_util, tonic}, + reexports::{tonic, tracing_futures::Instrument}, server::HealthChecksService, }; -use crate::utils::{run_with_cancellation, IntoStatus}; +use crate::utils::IntoStatus; use super::Service; @@ -14,48 +14,43 @@ impl HealthChecksService for Service { async fn check( self: Arc, _request: health_checks::check::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut services = HashMap::::new(); - - for cluster in self.clusters.values() { - let health = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .health_checks() - .check() - .await - .map_err(IntoStatus::into_status)?; - - for service in health { - match services.entry(service.name) { - std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { - let health = occupied_entry.get_mut(); - if health.0 < service.health { - *health = (service.health, service.message); - } - } - std::collections::hash_map::Entry::Vacant(vacant_entry) => { - vacant_entry.insert((service.health, service.message)); + let mut services = HashMap::::new(); + + for cluster in self.clusters.values() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let health = client + .health_checks() + .check() + .instrument(span) + .await + .map_err(IntoStatus::into_status)?; + + for service in health { + match services.entry(service.name) { + std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { + let health = occupied_entry.get_mut(); + if health.0 < service.health { + *health = (service.health, service.message); } } + std::collections::hash_map::Entry::Vacant(vacant_entry) => { + vacant_entry.insert((service.health, service.message)); + } } } - - Ok(health_checks::check::Response { - services: services - .into_iter() - .map(|(name, (health, message))| health_checks::ServiceHealth { - name, - message, - health, - }) - .collect(), - }) } + + Ok(health_checks::check::Response { + services: services + .into_iter() + .map(|(name, (health, message))| health_checks::ServiceHealth { + name, + message, + health, + }) + .collect(), + }) } } diff --git a/load-balancer/src/service/mod.rs b/load-balancer/src/service/mod.rs index f275b63..ad98c21 100644 --- a/load-balancer/src/service/mod.rs +++ b/load-balancer/src/service/mod.rs @@ -8,7 +8,9 @@ use std::{ use sessions::Session; use tokio_rusqlite::Connection; -use armonik::reexports::{tokio::sync::RwLock, tokio_stream::StreamExt, tonic::Status}; +use armonik::reexports::{ + tokio::sync::RwLock, tokio_stream::StreamExt, tonic::Status, tracing_futures::Instrument, +}; use crate::{ async_pool::AsyncPool, @@ -38,7 +40,7 @@ pub struct Service { impl Service { pub async fn new(clusters: impl IntoIterator) -> Self { let pool = AsyncPool::new(|| async { - Connection::open("file::memory:?cache=shared") + Connection::open("file::memory:?cache=shared&psow=1") .await .unwrap() }); @@ -69,6 +71,7 @@ impl Service { CREATE INDEX session_deleted_at ON session(deleted_at); CREATE INDEX session_duration ON session(duration); COMMIT;", + tracing::trace_span!("create_table"), ) .await .unwrap(); @@ -89,8 +92,10 @@ impl Service { sessions: Vec, cluster_name: String, ) -> Result<(), Status> { + let span = tracing::trace_span!("add_sessions"); self.db - .call(move |conn| { + .call(span.clone(), move |conn| { + let prepare_span = tracing::trace_span!(parent: &span, "prepare").entered(); let mut stmt = conn.prepare_cached( "WITH data AS ( SELECT @@ -139,7 +144,9 @@ impl Service { duration FROM data", )?; + std::mem::drop(prepare_span); + let _execute_span = tracing::trace_span!(parent: &span, "execute").entered(); stmt.execute([serde_json::to_string( &sessions .into_iter() @@ -154,16 +161,21 @@ impl Service { .map_err(IntoStatus::into_status) } + #[armonik::reexports::tracing::instrument(level = armonik::reexports::tracing::Level::TRACE, skip_all)] pub async fn get_cluster_from_sessions( &self, session_ids: &[&str], ) -> Result, Vec>, Status> { let mut missing_ids: HashSet<_> = session_ids.iter().copied().map(String::from).collect(); - let (mapping, mut missing_ids) = self.db.call(move |conn| { + let (mapping, mut missing_ids) = self.db.call(tracing::Span::current(), move |conn| { let mut mapping = HashMap::>::new(); + let prepare_span = tracing::trace_span!("prepare"); let mut stmt = conn.prepare_cached("SELECT session_id, cluster FROM session WHERE session_id IN (SELECT e.value FROM json_each(?) e)")?; + std::mem::drop(prepare_span); + + let _execute_span = tracing::trace_span!("execute"); let mut rows = stmt.query([serde_json::to_string(&missing_ids).unwrap()])?; while let Some(row) = rows.next()? { @@ -207,10 +219,11 @@ impl Service { .clusters .values() .map(|cluster| async { - let client = match cluster.client().await { + let mut client = match cluster.client().await { Ok(client) => client, Err(err) => return (cluster.clone(), Err(IntoStatus::into_status(err))), }; + let span = client.span(); let response = match client .sessions() .list( @@ -220,6 +233,7 @@ impl Service { 0, filter.len() as i32, ) + .instrument(span) .await { Ok(response) => response, @@ -275,6 +289,7 @@ impl Service { Ok(sessions.into_keys().next()) } + #[armonik::reexports::tracing::instrument(level = armonik::reexports::tracing::Level::TRACE, skip_all)] pub async fn get_cluster_from_results( &self, result_ids: &[&str], @@ -321,13 +336,15 @@ impl Service { .clusters .values() .map(|cluster| async { - let client = match cluster.client().await { + let mut client = match cluster.client().await { Ok(client) => client, Err(err) => return (cluster.clone(), Err(IntoStatus::into_status(err))), }; + let span = client.span(); let response = match client .results() .list(filter.clone(), Default::default(), 0, filter.len() as i32) + .instrument(span) .await { Ok(response) => response, @@ -385,6 +402,7 @@ impl Service { Ok(results.into_keys().next()) } + #[armonik::reexports::tracing::instrument(level = armonik::reexports::tracing::Level::TRACE, skip_all)] pub async fn get_cluster_from_tasks( &self, task_ids: &[&str], @@ -431,10 +449,11 @@ impl Service { .clusters .values() .map(|cluster| async { - let client = match cluster.client().await { + let mut client = match cluster.client().await { Ok(client) => client, Err(err) => return (cluster.clone(), Err(IntoStatus::into_status(err))), }; + let span = client.span(); let response = match client .tasks() .list( @@ -444,6 +463,7 @@ impl Service { 0, filter.len() as i32, ) + .instrument(span) .await { Ok(response) => response, @@ -501,18 +521,21 @@ impl Service { Ok(results.into_keys().next()) } + #[armonik::reexports::tracing::instrument(skip_all)] pub async fn update_sessions(&self) -> Result<(), Status> { let streams = self.clusters.values().map(|cluster| { Box::pin(async_stream::stream! { - let client = match cluster.client().await.map_err(IntoStatus::into_status) { + let mut client = match cluster.client().await.map_err(IntoStatus::into_status) { Ok(client) => client, Err(err) => { yield (cluster.clone(), Err(err)); return; } }; + let span = client.span(); let stream = match client .get_all_sessions(Default::default(), Default::default()) + .instrument(span) .await { Ok(stream) => stream, @@ -541,14 +564,14 @@ impl Service { match response { Ok(chunk) => { if let Err(err) = self.add_sessions(chunk, cluster.name.clone()).await { - log::error!( + tracing::error!( "Could not record sessions from cluster {}: {}", cluster.name, err ) } } - Err(err) => log::error!( + Err(err) => tracing::error!( "Could not fetch sessions from cluster {}: {}", cluster.name, err diff --git a/load-balancer/src/service/partitions.rs b/load-balancer/src/service/partitions.rs index 0d744e6..2cf8154 100644 --- a/load-balancer/src/service/partitions.rs +++ b/load-balancer/src/service/partitions.rs @@ -2,11 +2,11 @@ use std::sync::Arc; use armonik::{ partitions, - reexports::{tokio_stream::StreamExt, tokio_util, tonic}, + reexports::{tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, server::PartitionsService, }; -use crate::utils::{merge_streams, run_with_cancellation, IntoStatus}; +use crate::utils::{merge_streams, IntoStatus}; use super::Service; @@ -14,7 +14,6 @@ impl PartitionsService for Service { async fn list( self: Arc, request: partitions::list::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { let Ok(page) = usize::try_from(request.page) else { return Err(tonic::Status::invalid_argument("Page should be positive")); @@ -27,98 +26,98 @@ impl PartitionsService for Service { let mut partitions = Vec::new(); - run_with_cancellation! { - use cancellation_token; - - let streams = self.clusters.values().map(|cluster| { - let request = request.clone(); - Box::pin(async_stream::stream! { - let stream = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .get_all_partitions(request.filters.clone(), request.sort.clone()) - .await?; - let mut stream = std::pin::pin!(stream); - - while let Some(item) = stream.next().await { - yield item; - } - }) - }); - let mut streams = std::pin::pin!(merge_streams(streams)); - - while let Some(chunk) = streams.try_next().await? { - partitions.extend(chunk); - } - - match &request.sort.field { - partitions::Field::Unspecified => (), - partitions::Field::Id => partitions.sort_by(|a, b| a.partition_id.cmp(&b.partition_id)), - partitions::Field::ParentPartitionIds => { - partitions.sort_by(|a, b| a.parent_partition_ids.cmp(&b.parent_partition_ids)) - } - partitions::Field::PodReserved => { - partitions.sort_by(|a, b| a.pod_reserved.cmp(&b.pod_reserved)) + let streams = self.clusters.values().map(|cluster| { + let request = request.clone(); + Box::pin(async_stream::stream! { + let mut client = cluster + .client() + .await + .map_err(IntoStatus::into_status)?; + let span = client.span(); + let stream = client + .get_all_partitions(request.filters.clone(), request.sort.clone()) + .instrument(span.clone()) + .await?; + let mut stream = std::pin::pin!(stream.instrument(span)); + + while let Some(item) = stream.next().await { + yield item; } - partitions::Field::PodMax => partitions.sort_by(|a, b| a.pod_max.cmp(&b.pod_max)), - partitions::Field::PreemptionPercentage => { - partitions.sort_by(|a, b| a.preemption_percentage.cmp(&b.preemption_percentage)) - } - partitions::Field::Priority => partitions.sort_by(|a, b| a.priority.cmp(&b.priority)), - } + }) + }); + let mut streams = std::pin::pin!(merge_streams(streams)); + + while let Some(chunk) = streams.try_next().await? { + partitions.extend(chunk); + } - if matches!(&request.sort.direction, armonik::SortDirection::Desc) { - partitions.reverse(); + match &request.sort.field { + partitions::Field::Unspecified => (), + partitions::Field::Id => partitions.sort_by(|a, b| a.partition_id.cmp(&b.partition_id)), + partitions::Field::ParentPartitionIds => { + partitions.sort_by(|a, b| a.parent_partition_ids.cmp(&b.parent_partition_ids)) + } + partitions::Field::PodReserved => { + partitions.sort_by(|a, b| a.pod_reserved.cmp(&b.pod_reserved)) } + partitions::Field::PodMax => partitions.sort_by(|a, b| a.pod_max.cmp(&b.pod_max)), + partitions::Field::PreemptionPercentage => { + partitions.sort_by(|a, b| a.preemption_percentage.cmp(&b.preemption_percentage)) + } + partitions::Field::Priority => partitions.sort_by(|a, b| a.priority.cmp(&b.priority)), + } - let total = partitions.len() as i32; - - Ok(armonik::partitions::list::Response { - partitions: partitions - .into_iter() - .skip(page * page_size) - .take(page_size) - .collect(), - page: request.page, - page_size: request.page_size, - total, - }) + if matches!(&request.sort.direction, armonik::SortDirection::Desc) { + partitions.reverse(); } + + let total = partitions.len() as i32; + + Ok(armonik::partitions::list::Response { + partitions: partitions + .into_iter() + .skip(page * page_size) + .take(page_size) + .collect(), + page: request.page, + page_size: request.page_size, + total, + }) } async fn get( self: Arc, request: partitions::get::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut err = None; - - for cluster in self.clusters.values() { - let client = match cluster.client().await { - Ok(client) => client, - Err(error) => { - err = Some(error.into_status()); - continue; - } - }; - - match client.partitions().call(request.clone()).await { - Ok(response) => return Ok(response), - Err(error) => { - err = Some(error.into_status()); - continue; - } - }; - } + let mut err = None; + + for cluster in self.clusters.values() { + let mut client = match cluster.client().await { + Ok(client) => client, + Err(error) => { + err = Some(error.into_status()); + continue; + } + }; + let span = client.span(); + + match client + .partitions() + .call(request.clone()) + .instrument(span) + .await + { + Ok(response) => return Ok(response), + Err(error) => { + err = Some(error.into_status()); + continue; + } + }; + } - match err { - Some(err) => Err(err), - None => Err(tonic::Status::internal("No cluster")), - } + match err { + Some(err) => Err(err), + None => Err(tonic::Status::internal("No cluster")), } } } diff --git a/load-balancer/src/service/results.rs b/load-balancer/src/service/results.rs index b4cf748..5aa875c 100644 --- a/load-balancer/src/service/results.rs +++ b/load-balancer/src/service/results.rs @@ -1,12 +1,12 @@ use std::sync::Arc; use armonik::{ - reexports::{tokio, tokio_stream::StreamExt, tokio_util, tonic}, + reexports::{tokio, tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, results, server::ResultsService, }; -use crate::utils::{run_with_cancellation, IntoStatus}; +use crate::utils::IntoStatus; use super::Service; @@ -14,203 +14,187 @@ impl ResultsService for Service { async fn list( self: Arc, request: results::list::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut requested_results = Vec::new(); - let mut requested_sessions = Vec::new(); - - for and in &request.filters.or { - let mut has_check = false; - - for field in and { - match field { - armonik::results::filter::Field { - field: armonik::results::Field::SessionId, - condition: - armonik::results::filter::Condition::String(armonik::FilterString { - value, - operator: armonik::FilterStringOperator::Equal, - }), - } => { - requested_sessions.push(value.as_str()); - has_check = true; - } - armonik::results::filter::Field { - field: armonik::results::Field::ResultId, - condition: - armonik::results::filter::Condition::String(armonik::FilterString { - value, - operator: armonik::FilterStringOperator::Equal, - }), - } => { - requested_results.push(value.as_str()); - has_check = true; - } - _ => {} + let mut requested_results = Vec::new(); + let mut requested_sessions = Vec::new(); + + for and in &request.filters.or { + let mut has_check = false; + + for field in and { + match field { + armonik::results::filter::Field { + field: armonik::results::Field::SessionId, + condition: + armonik::results::filter::Condition::String(armonik::FilterString { + value, + operator: armonik::FilterStringOperator::Equal, + }), + } => { + requested_sessions.push(value.as_str()); + has_check = true; } + armonik::results::filter::Field { + field: armonik::results::Field::ResultId, + condition: + armonik::results::filter::Condition::String(armonik::FilterString { + value, + operator: armonik::FilterStringOperator::Equal, + }), + } => { + requested_results.push(value.as_str()); + has_check = true; + } + _ => {} } + } - if !has_check { - return Err(armonik::reexports::tonic::Status::invalid_argument(String::from("Cannot determine the cluster from the filter, missing condition on session_id"))); - } + if !has_check { + return Err(armonik::reexports::tonic::Status::invalid_argument(String::from("Cannot determine the cluster from the filter, missing condition on session_id"))); } + } - let (sessions, results) = tokio::join!( - self.get_cluster_from_sessions(&requested_sessions), - self.get_cluster_from_results(&requested_results) - ); + let (sessions, results) = tokio::join!( + self.get_cluster_from_sessions(&requested_sessions), + self.get_cluster_from_results(&requested_results) + ); - let (mut sessions, mut results) = (sessions?.into_iter(), results?.into_iter()); + let (mut sessions, mut results) = (sessions?.into_iter(), results?.into_iter()); - let cluster = match (sessions.next(), results.next()) { - (None, None) => { - return Ok(results::list::Response { - results: Vec::new(), - page: request.page, - page_size: request.page_size, - total: 0, - }); - } - (None, Some(res_cluster)) => res_cluster.0, - (Some(ses_cluster), None) => ses_cluster.0, - (Some(ses_cluster), Some(res_cluster)) => { - if res_cluster != ses_cluster { - return Err(tonic::Status::invalid_argument( - "Cannot determine the cluster from the filter, multiple clusters targeted", - )); - } - ses_cluster.0 - } - }; - match (sessions.next(), results.next()) { - (None, None) => {} - _ => { + let cluster = match (sessions.next(), results.next()) { + (None, None) => { + return Ok(results::list::Response { + results: Vec::new(), + page: request.page, + page_size: request.page_size, + total: 0, + }); + } + (None, Some(res_cluster)) => res_cluster.0, + (Some(ses_cluster), None) => ses_cluster.0, + (Some(ses_cluster), Some(res_cluster)) => { + if res_cluster != ses_cluster { return Err(tonic::Status::invalid_argument( "Cannot determine the cluster from the filter, multiple clusters targeted", )); } + ses_cluster.0 + } + }; + match (sessions.next(), results.next()) { + (None, None) => {} + _ => { + return Err(tonic::Status::invalid_argument( + "Cannot determine the cluster from the filter, multiple clusters targeted", + )); } + } - let mut client = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .results(); - client.call(request).await.map_err(|err| match err { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .results() + .call(request) + .instrument(span) + .await + .map_err(|err| match err { armonik::client::RequestError::Grpc { source, .. } => *source, err => tonic::Status::internal(err.to_string()), }) - } } async fn get( self: Arc, request: results::get::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - crate::utils::impl_unary!(self.results, request, cancellation_token, {get_cluster_from_result, id, "Result {} was not found"}) + crate::utils::impl_unary!(self.results, request, {get_cluster_from_result, id, "Result {} was not found"}) } async fn get_owner_task_id( self: Arc, request: results::get_owner_task_id::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - crate::utils::impl_unary!(self.results, request, cancellation_token, session) + crate::utils::impl_unary!(self.results, request, session) } async fn create_metadata( self: Arc, request: results::create_metadata::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - crate::utils::impl_unary!(self.results, request, cancellation_token, session) + crate::utils::impl_unary!(self.results, request, session) } async fn create( self: Arc, request: results::create::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - crate::utils::impl_unary!(self.results, request, cancellation_token, session) + crate::utils::impl_unary!(self.results, request, session) } async fn delete_data( self: Arc, request: results::delete_data::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - crate::utils::impl_unary!(self.results, request, cancellation_token, session) + crate::utils::impl_unary!(self.results, request, session) } async fn get_service_configuration( self: Arc, _request: results::get_service_configuration::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - crate::utils::run_with_cancellation! { - use cancellation_token; - - let mut min = 1 << 24; + let mut min = 1 << 24; - for (_, cluster) in self.clusters.iter() { - let conf = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .results() - .get_service_configuration() - .await - .map_err(IntoStatus::into_status)?; - - min = min.min(conf.data_chunk_max_size); - } + for (_, cluster) in self.clusters.iter() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let conf = client + .results() + .get_service_configuration() + .instrument(span) + .await + .map_err(IntoStatus::into_status)?; - Ok(results::get_service_configuration::Response { - data_chunk_max_size: min, - }) + min = min.min(conf.data_chunk_max_size); } + + Ok(results::get_service_configuration::Response { + data_chunk_max_size: min, + }) } async fn download( self: Arc, request: results::download::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> Result< impl tonic::codegen::tokio_stream::Stream< Item = Result, > + Send, tonic::Status, > { - crate::utils::run_with_cancellation! { - use cancellation_token.clone(); - - let Some(cluster) = self.get_cluster_from_session(&request.session_id).await? else { - return Err(tonic::Status::not_found(format!( - "Session {} was not found", - request.session_id - ))); - }; - - let mut stream = cluster - .client() - .await - .map_err(IntoStatus::into_status)? + let Some(cluster) = self.get_cluster_from_session(&request.session_id).await? else { + return Err(tonic::Status::not_found(format!( + "Session {} was not found", + request.session_id + ))); + }; + + let span = tracing::Span::current(); + Ok(async_stream::try_stream! { + let mut client = cluster.client().instrument(span).await.map_err(IntoStatus::into_status)?; + let span = client.span(); + + let mut stream = client .results() .download(request.session_id, request.result_id) + .instrument(span) .await .map_err(IntoStatus::into_status)?; - Ok(async_stream::try_stream! { - while let Some(Some(chunk)) = cancellation_token.run_until_cancelled(stream.next()).await { - let chunk = chunk.map_err(IntoStatus::into_status)?; - yield results::download::Response{ data_chunk: chunk }; - } - }) - } + while let Some(chunk) = stream.next().await { + let chunk = chunk.map_err(IntoStatus::into_status)?; + yield results::download::Response{ data_chunk: chunk }; + } + }) } async fn upload( @@ -219,11 +203,10 @@ impl ResultsService for Service { Item = Result, > + Send + 'static, - cancellation_token: tokio_util::sync::CancellationToken, ) -> Result { let mut request = Box::pin(request); - match crate::utils::run_with_cancellation!(cancellation_token, request.next()) { + match request.next().await { Some(Ok(results::upload::Request::Identifier { session_id, result_id, @@ -248,14 +231,12 @@ impl ResultsService for Service { } }); - let mut result_client = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .results(); + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let mut result_client = client.results(); tokio::select! { - result = result_client.upload(session_id, result_id, stream) => { + result = result_client.upload(session_id, result_id, stream).instrument(span) => { match result { Ok(result) => Ok(results::upload::Response { result }), Err(err) => Err(err.into_status()) @@ -270,7 +251,6 @@ impl ResultsService for Service { Err(err) => Err(err), } } - _ = cancellation_token.cancelled() => Err(tonic::Status::aborted("Cancellation token has been triggered")) } } Some(Ok(results::upload::Request::DataChunk(_))) => { diff --git a/load-balancer/src/service/sessions.rs b/load-balancer/src/service/sessions.rs index c71528f..9ab2883 100644 --- a/load-balancer/src/service/sessions.rs +++ b/load-balancer/src/service/sessions.rs @@ -2,8 +2,8 @@ use std::{collections::HashMap, sync::Arc}; use armonik::{ reexports::{ - tokio_util, tonic::{self, Status}, + tracing_futures::Instrument, }, server::SessionsService, sessions, @@ -11,7 +11,7 @@ use armonik::{ use rusqlite::params_from_iter; use serde::{Deserialize, Serialize}; -use crate::utils::{impl_unary, run_with_cancellation, IntoStatus}; +use crate::utils::{impl_unary, IntoStatus}; use super::Service; @@ -20,7 +20,6 @@ impl SessionsService for Service { async fn list( self: Arc, request: sessions::list::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { let Ok(page) = usize::try_from(request.page) else { return Err(tonic::Status::invalid_argument("Page should be positive")); @@ -31,6 +30,7 @@ impl SessionsService for Service { )); }; + let build_span = tracing::trace_span!("build"); let mut params = Vec::>::new(); let mut query_suffix = String::new(); let mut sep = " WHERE ("; @@ -228,16 +228,25 @@ impl SessionsService for Service { page * page_size ); let query_count = format!("SELECT COUNT(*) FROM session{query_suffix}"); + std::mem::drop(build_span); - let (sessions, total) = run_with_cancellation!( - cancellation_token, - self.db.call(move |conn| { + let (sessions, total) = self + .db + .call(tracing::trace_span!("transaction"), move |conn| { let mut sessions = Vec::::new(); let transaction = conn.transaction()?; + + let count_span = tracing::trace_span!("count"); let total = transaction .query_row(&query_count, params_from_iter(¶ms), |row| row.get(0))?; + std::mem::drop(count_span); + + let prepare_span = tracing::trace_span!("prepare"); let mut stmt = transaction.prepare(&query)?; + std::mem::drop(prepare_span); + + let execute_span = tracing::trace_span!("execute"); let mut rows = stmt.query(params_from_iter(¶ms))?; while let Some(row) = rows.next()? { @@ -253,13 +262,15 @@ impl SessionsService for Service { } }; } + std::mem::drop(execute_span); std::mem::drop(rows); std::mem::drop(stmt); + transaction.commit()?; Result::<_, rusqlite::Error>::Ok((sessions, total)) }) - ) - .map_err(IntoStatus::into_status)?; + .await + .map_err(IntoStatus::into_status)?; Ok(armonik::sessions::list::Response { sessions, @@ -272,23 +283,20 @@ impl SessionsService for Service { async fn get( self: Arc, request: sessions::get::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - impl_unary!(self.sessions, request, cancellation_token, session) + impl_unary!(self.sessions, request, session) } async fn cancel( self: Arc, request: sessions::cancel::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - impl_unary!(self.sessions, request, cancellation_token, session) + impl_unary!(self.sessions, request, session) } async fn create( self: Arc, request: sessions::create::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { let n = self.clusters.len(); let i = self @@ -298,12 +306,14 @@ impl SessionsService for Service { let mut err = None; for (cluster_name, cluster) in self.clusters.iter().cycle().skip(i % n).take(n) { - match run_with_cancellation!(cancellation_token, cluster.client()) { - Ok(client) => { - let response = run_with_cancellation!( - cancellation_token, - client.sessions().call(request.clone()) - ); + match cluster.client().await { + Ok(mut client) => { + let span = client.span(); + let response = client + .sessions() + .call(request.clone()) + .instrument(span) + .await; match response { Ok(response) => { @@ -345,51 +355,48 @@ impl SessionsService for Service { async fn pause( self: Arc, request: sessions::pause::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - impl_unary!(self.sessions, request, cancellation_token, session) + impl_unary!(self.sessions, request, session) } async fn resume( self: Arc, request: sessions::resume::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - impl_unary!(self.sessions, request, cancellation_token, session) + impl_unary!(self.sessions, request, session) } async fn close( self: Arc, request: sessions::close::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - impl_unary!(self.sessions, request, cancellation_token, session) + impl_unary!(self.sessions, request, session) } async fn purge( self: Arc, request: sessions::purge::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - impl_unary!(self.sessions, request, cancellation_token, session) + impl_unary!(self.sessions, request, session) } async fn delete( self: Arc, request: sessions::delete::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { let service = self.clone(); let session_id = request.session_id.clone(); - let response = impl_unary!(service.sessions, request, cancellation_token, session)?; + let response = impl_unary!(service.sessions, request, session)?; // If delete is successful, remove the session from the list - run_with_cancellation!( - cancellation_token, - self.db - .execute("DELETE FROM session WHERE session_id = ?", [session_id]) - ) - .map_err(IntoStatus::into_status)?; + self.db + .execute( + "DELETE FROM session WHERE session_id = ?", + [session_id], + tracing::trace_span!("delete"), + ) + .await + .map_err(IntoStatus::into_status)?; Ok(response) } @@ -397,9 +404,8 @@ impl SessionsService for Service { async fn stop_submission( self: Arc, request: sessions::stop_submission::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - impl_unary!(self.sessions, request, cancellation_token, session) + impl_unary!(self.sessions, request, session) } } diff --git a/load-balancer/src/service/submitter.rs b/load-balancer/src/service/submitter.rs index 521bd20..4df9a0b 100644 --- a/load-balancer/src/service/submitter.rs +++ b/load-balancer/src/service/submitter.rs @@ -3,13 +3,13 @@ use std::{collections::HashMap, sync::Arc}; use armonik::{ - reexports::{tokio, tokio_util, tonic}, + reexports::{tokio, tonic, tracing_futures::Instrument}, server::SubmitterService, submitter, }; use futures::StreamExt as _; -use crate::utils::{impl_unary, run_with_cancellation, IntoStatus}; +use crate::utils::{impl_unary, IntoStatus}; use super::Service; @@ -17,40 +17,34 @@ impl SubmitterService for Service { async fn get_service_configuration( self: Arc, _request: submitter::get_service_configuration::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!("SubmitterService::GetServiceConfiguration is deprecated, please use ResultsService::GetServiceConfiguration instead"); + tracing::warn!("SubmitterService::GetServiceConfiguration is deprecated, please use ResultsService::GetServiceConfiguration instead"); - crate::utils::run_with_cancellation! { - use cancellation_token; + let mut min = 1 << 24; - let mut min = 1 << 24; - - for (_, cluster) in self.clusters.iter() { - let conf = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .results() - .get_service_configuration() - .await - .map_err(IntoStatus::into_status)?; - - min = min.min(conf.data_chunk_max_size); - } + for (_, cluster) in self.clusters.iter() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let conf = client + .results() + .get_service_configuration() + .instrument(span) + .await + .map_err(IntoStatus::into_status)?; - Ok(submitter::get_service_configuration::Response { - data_chunk_max_size: min, - }) + min = min.min(conf.data_chunk_max_size); } + + Ok(submitter::get_service_configuration::Response { + data_chunk_max_size: min, + }) } async fn create_session( self: Arc, request: submitter::create_session::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!("SubmitterService::CreateSession is deprecated, please use SessionsService::CreateSession instead"); + tracing::warn!("SubmitterService::CreateSession is deprecated, please use SessionsService::CreateSession instead"); let n = self.clusters.len(); let i = self @@ -60,12 +54,14 @@ impl SubmitterService for Service { let mut err = None; for (_, cluster) in self.clusters.iter().cycle().skip(i % n).take(n) { - match run_with_cancellation!(cancellation_token, cluster.client()) { - Ok(client) => { - let response = run_with_cancellation!( - cancellation_token, - client.submitter().call(request.clone()) - ); + match cluster.client().await { + Ok(mut client) => { + let span = client.span(); + let response = client + .submitter() + .call(request.clone()) + .instrument(span) + .await; match response { Ok(response) => return Ok(response), @@ -85,311 +81,273 @@ impl SubmitterService for Service { async fn cancel_session( self: Arc, request: submitter::cancel_session::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!("SubmitterService::CancelSession is deprecated, please use SessionsService::CancelSession instead"); + tracing::warn!("SubmitterService::CancelSession is deprecated, please use SessionsService::CancelSession instead"); - impl_unary!(self.submitter, request, cancellation_token, session) + impl_unary!(self.submitter, request, session) } async fn list_tasks( self: Arc, request: submitter::list_tasks::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!( + tracing::warn!( "SubmitterService::ListTasks is deprecated, please use TasksService::ListTasks instead" ); - run_with_cancellation! { - use cancellation_token; - - let mut task_ids = Vec::new(); - - for cluster in self.clusters.values() { - let response = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .submitter() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)?; + let mut task_ids = Vec::new(); - task_ids.extend(response.task_ids); - } + for cluster in self.clusters.values() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let response = client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status)?; - Ok(submitter::list_tasks::Response { task_ids }) + task_ids.extend(response.task_ids); } + + Ok(submitter::list_tasks::Response { task_ids }) } async fn list_sessions( self: Arc, request: submitter::list_sessions::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!("SubmitterService::ListSessions is deprecated, please use SessionsService::ListSessions instead"); + tracing::warn!("SubmitterService::ListSessions is deprecated, please use SessionsService::ListSessions instead"); - run_with_cancellation! { - use cancellation_token; + let mut session_ids = Vec::new(); - let mut session_ids = Vec::new(); - - for cluster in self.clusters.values() { - let response = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .submitter() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)?; - - session_ids.extend(response.session_ids); - } + for cluster in self.clusters.values() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let response = client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status)?; - Ok(submitter::list_sessions::Response { session_ids }) + session_ids.extend(response.session_ids); } + + Ok(submitter::list_sessions::Response { session_ids }) } async fn count_tasks( self: Arc, request: submitter::count_tasks::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!( + tracing::warn!( "SubmitterService::CountTasks is deprecated, please use TasksService::CountTasksByStatus instead" ); - run_with_cancellation! { - use cancellation_token; - - let mut status_count = HashMap::::new(); + let mut status_count = HashMap::::new(); - for cluster in self.clusters.values() { - let response = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .submitter() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)? - .values; + for cluster in self.clusters.values() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let response = client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status)? + .values; - for (status, count) in response { - *status_count.entry(status).or_default() += count; - } + for (status, count) in response { + *status_count.entry(status).or_default() += count; } - - Ok(armonik::submitter::count_tasks::Response { - values: status_count, - }) } + + Ok(armonik::submitter::count_tasks::Response { + values: status_count, + }) } async fn try_get_task_output( self: Arc, request: submitter::try_get_task_output::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!( + tracing::warn!( "SubmitterService::TryGetTaskOutput is deprecated, please use TasksService::GetTask instead" ); - crate::utils::impl_unary!(self.submitter, request, cancellation_token, session) + crate::utils::impl_unary!(self.submitter, request, session) } async fn wait_for_availability( self: Arc, request: submitter::wait_for_availability::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!("SubmitterService::WaitForAvailability is deprecated, please use EventsService::GetEvents instead"); - crate::utils::impl_unary!(self.submitter, request, cancellation_token, session) + tracing::warn!("SubmitterService::WaitForAvailability is deprecated, please use EventsService::GetEvents instead"); + crate::utils::impl_unary!(self.submitter, request, session) } async fn wait_for_completion( self: Arc, request: submitter::wait_for_completion::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!("SubmitterService::WaitForCompletion is deprecated, please use EventsService::GetEvents instead"); - run_with_cancellation! { - use cancellation_token.clone(); - - let mut status_count = HashMap::new(); - - let mut wait_all = self - .clusters - .values() - .map(|cluster| async { - cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .submitter() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status) - }) - .collect::>(); - - while let Some(completion) = wait_all.next().await { - let mut is_error = false; - let mut is_cancelled = false; - for (status, count) in completion?.values { - match status { - armonik::TaskStatus::Error => is_error = true, - armonik::TaskStatus::Cancelling | armonik::TaskStatus::Cancelled => { - is_cancelled = true - } - _ => (), + tracing::warn!("SubmitterService::WaitForCompletion is deprecated, please use EventsService::GetEvents instead"); + let mut status_count = HashMap::new(); + + let mut wait_all = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + + while let Some(completion) = wait_all.next().await { + let mut is_error = false; + let mut is_cancelled = false; + for (status, count) in completion?.values { + match status { + armonik::TaskStatus::Error => is_error = true, + armonik::TaskStatus::Cancelling | armonik::TaskStatus::Cancelled => { + is_cancelled = true } - *status_count.entry(status).or_default() += count; - } - - if (is_error && request.stop_on_first_task_error) - || (is_cancelled && request.stop_on_first_task_cancellation) - { - std::mem::drop(wait_all); - - return self - .count_tasks( - armonik::submitter::count_tasks::Request { - filter: request.filter, - }, - cancellation_token, - ) - .await; + _ => (), } + *status_count.entry(status).or_default() += count; } - Ok(armonik::submitter::wait_for_completion::Response { - values: status_count, - }) + if (is_error && request.stop_on_first_task_error) + || (is_cancelled && request.stop_on_first_task_cancellation) + { + std::mem::drop(wait_all); + + return self + .count_tasks(armonik::submitter::count_tasks::Request { + filter: request.filter, + }) + .await; + } } + + Ok(armonik::submitter::wait_for_completion::Response { + values: status_count, + }) } async fn cancel_tasks( self: Arc, request: submitter::cancel_tasks::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!( + tracing::warn!( "SubmitterService::CancelTasks is deprecated, please use TasksService::CancelTasks instead" ); - run_with_cancellation! { - use cancellation_token; - - for cluster in self.clusters.values() { - cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .submitter() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)?; - } - - Ok(submitter::cancel_tasks::Response { }) + for cluster in self.clusters.values() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status)?; } + + Ok(submitter::cancel_tasks::Response {}) } async fn task_status( self: Arc, request: submitter::task_status::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!( + tracing::warn!( "SubmitterService::TaskStatus is deprecated, please use TasksService::ListTasks instead" ); - run_with_cancellation! { - use cancellation_token; - - let mut task_status = HashMap::::new(); + let mut task_status = HashMap::::new(); - for cluster in self.clusters.values() { - let response = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .submitter() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)? - .statuses; + for cluster in self.clusters.values() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let response = client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status)? + .statuses; - for (task_id, status) in response { - task_status.insert(task_id, status); - } + for (task_id, status) in response { + task_status.insert(task_id, status); } - - Ok(submitter::task_status::Response { - statuses: task_status - }) } + + Ok(submitter::task_status::Response { + statuses: task_status, + }) } async fn result_status( self: Arc, request: submitter::result_status::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - log::warn!("SubmitterService::ResultStatus is deprecated, please use ResultsService::ListResults instead"); - crate::utils::impl_unary!(self.submitter, request, cancellation_token, session) + tracing::warn!("SubmitterService::ResultStatus is deprecated, please use ResultsService::ListResults instead"); + crate::utils::impl_unary!(self.submitter, request, session) } async fn try_get_result( self: Arc, request: submitter::try_get_result::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> Result< impl tonic::codegen::tokio_stream::Stream< Item = Result, > + Send, tonic::Status, > { - log::warn!( + tracing::warn!( "SubmitterService::TryGetResult is deprecated, please use ResultsService::DownloadResultData instead" ); - crate::utils::run_with_cancellation! { - use cancellation_token.clone(); - - let Some(cluster) = self.get_cluster_from_session(&request.session_id).await? else { - return Err(tonic::Status::not_found(format!( - "Session {} was not found", - request.session_id - ))); - }; - - let mut stream = cluster + let Some(cluster) = self.get_cluster_from_session(&request.session_id).await? else { + return Err(tonic::Status::not_found(format!( + "Session {} was not found", + request.session_id + ))); + }; + + let span = tracing::Span::current(); + Ok(async_stream::try_stream! { + let mut client = cluster .client() + .instrument(span) .await - .map_err(IntoStatus::into_status)? + .map_err(IntoStatus::into_status)?; + let span = client.span(); + let mut stream = client .submitter() .try_get_result(request.session_id, request.result_id) + .instrument(span) .await .map_err(IntoStatus::into_status)?; - - Ok(async_stream::try_stream! { - while let Some(Some(item)) = cancellation_token.run_until_cancelled(stream.next()).await { - let item = item.map_err(IntoStatus::into_status)?; - yield item; - } - }) + while let Some(item) = stream.next().await { + let item = item.map_err(IntoStatus::into_status)?; + yield item; + } } + .in_current_span()) } async fn create_small_tasks( self: Arc, request: submitter::create_tasks::SmallRequest, - cancellation_token: tokio_util::sync::CancellationToken, ) -> Result { - log::warn!( + tracing::warn!( "SubmitterService::CreateSmallTasks is deprecated, please use a combination of ResultsService::CreateResults and TasksService::SubmitTasks instead" ); - crate::utils::impl_unary!(self.submitter, request, cancellation_token, session) + crate::utils::impl_unary!(self.submitter, request, session) } async fn create_large_tasks( @@ -398,14 +356,13 @@ impl SubmitterService for Service { Item = Result, > + Send + 'static, - cancellation_token: tokio_util::sync::CancellationToken, ) -> Result { - log::warn!( + tracing::warn!( "SubmitterService::CreateLargeTasks is deprecated, please use a combination of ResultsService::CreateResults and TasksService::SubmitTasks instead" ); let mut request = Box::pin(request); - match crate::utils::run_with_cancellation!(cancellation_token, request.next()) { + match request.next().await { Some(Ok(submitter::create_tasks::LargeRequest::InitRequest( submitter::create_tasks::InitRequest { session_id, @@ -443,21 +400,18 @@ impl SubmitterService for Service { } }; - let mut submitter_client = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .submitter(); + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let mut submitter_client = client.submitter(); tokio::select! { - result = submitter_client.create_large_tasks(stream) => match result { + result = submitter_client.create_large_tasks(stream).instrument(span) => match result { Ok(result) => Ok(armonik::submitter::create_tasks::Response::Status(result)), Err(err) => Err(err.into_status()), }, Ok(invalid) = rx => { Err(invalid) } - _ = cancellation_token.cancelled() => Err(tonic::Status::aborted("Cancellation token has been triggered")) } } Some(Ok(_)) => Err(tonic::Status::invalid_argument( diff --git a/load-balancer/src/service/tasks.rs b/load-balancer/src/service/tasks.rs index df3daa9..fab9d4b 100644 --- a/load-balancer/src/service/tasks.rs +++ b/load-balancer/src/service/tasks.rs @@ -1,13 +1,13 @@ use std::{collections::HashMap, sync::Arc}; use armonik::{ - reexports::{tokio_stream::StreamExt, tokio_util, tonic}, + reexports::{tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, server::TasksService, tasks, }; use futures::stream::FuturesUnordered; -use crate::utils::{run_with_cancellation, IntoStatus}; +use crate::utils::IntoStatus; use super::Service; @@ -15,262 +15,237 @@ impl TasksService for Service { async fn list( self: Arc, request: tasks::list::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut requested_sessions = Vec::new(); - - for and in &request.filters.or { - let mut has_check = false; - - for field in and { - if let armonik::tasks::filter::Field { - field: - armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), - condition: - armonik::tasks::filter::Condition::String(armonik::FilterString { - value, - operator: armonik::FilterStringOperator::Equal, - }), - } = field - { - requested_sessions.push(value.as_str()); - has_check = true; - } - } - - if !has_check { - return Err(armonik::reexports::tonic::Status::invalid_argument(String::from("Cannot determine the cluster from the filter, missing condition on session_id"))); + let mut requested_sessions = Vec::new(); + + for and in &request.filters.or { + let mut has_check = false; + + for field in and { + if let armonik::tasks::filter::Field { + field: armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), + condition: + armonik::tasks::filter::Condition::String(armonik::FilterString { + value, + operator: armonik::FilterStringOperator::Equal, + }), + } = field + { + requested_sessions.push(value.as_str()); + has_check = true; } } - let mut sessions = self - .get_cluster_from_sessions(&requested_sessions) - .await? - .into_iter(); - - let Some((cluster, _)) = sessions.next() else { - return Ok(tasks::list::Response { - tasks: Vec::new(), - page: request.page, - page_size: request.page_size, - total: 0, - }); - }; - - if sessions.next().is_some() { - return Err(tonic::Status::invalid_argument( - "Cannot determine the cluster from the filter, multiple clusters targeted", - )); + if !has_check { + return Err(armonik::reexports::tonic::Status::invalid_argument(String::from("Cannot determine the cluster from the filter, missing condition on session_id"))); } + } + + let mut sessions = self + .get_cluster_from_sessions(&requested_sessions) + .await? + .into_iter(); + + let Some((cluster, _)) = sessions.next() else { + return Ok(tasks::list::Response { + tasks: Vec::new(), + page: request.page, + page_size: request.page_size, + total: 0, + }); + }; + + if sessions.next().is_some() { + return Err(tonic::Status::invalid_argument( + "Cannot determine the cluster from the filter, multiple clusters targeted", + )); + } - let mut client = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .tasks(); - client.call(request).await.map_err(|err| match err { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .tasks() + .call(request) + .instrument(span) + .await + .map_err(|err| match err { armonik::client::RequestError::Grpc { source, .. } => *source, err => tonic::Status::internal(err.to_string()), }) - } } async fn list_detailed( self: Arc, request: tasks::list_detailed::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut requested_sessions = Vec::new(); - - for and in &request.filters.or { - let mut has_check = false; - - for field in and { - if let armonik::tasks::filter::Field { - field: - armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), - condition: - armonik::tasks::filter::Condition::String(armonik::FilterString { - value, - operator: armonik::FilterStringOperator::Equal, - }), - } = field - { - requested_sessions.push(value.as_str()); - has_check = true; - } - } - - if !has_check { - return Err(armonik::reexports::tonic::Status::invalid_argument(String::from("Cannot determine the cluster from the filter, missing condition on session_id"))); + let mut requested_sessions = Vec::new(); + + for and in &request.filters.or { + let mut has_check = false; + + for field in and { + if let armonik::tasks::filter::Field { + field: armonik::tasks::Field::Summary(armonik::tasks::SummaryField::SessionId), + condition: + armonik::tasks::filter::Condition::String(armonik::FilterString { + value, + operator: armonik::FilterStringOperator::Equal, + }), + } = field + { + requested_sessions.push(value.as_str()); + has_check = true; } } - let mut sessions = self - .get_cluster_from_sessions(&requested_sessions) - .await? - .into_iter(); - - let Some((cluster, _)) = sessions.next() else { - return Ok(tasks::list_detailed::Response { - tasks: Vec::new(), - page: request.page, - page_size: request.page_size, - total: 0, - }); - }; - - if sessions.next().is_some() { - return Err(tonic::Status::invalid_argument( - "Cannot determine the cluster from the filter, multiple clusters targeted", - )); + if !has_check { + return Err(armonik::reexports::tonic::Status::invalid_argument(String::from("Cannot determine the cluster from the filter, missing condition on session_id"))); } + } - let mut client = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .tasks(); - client.call(request).await.map_err(|err| match err { + let mut sessions = self + .get_cluster_from_sessions(&requested_sessions) + .await? + .into_iter(); + + let Some((cluster, _)) = sessions.next() else { + return Ok(tasks::list_detailed::Response { + tasks: Vec::new(), + page: request.page, + page_size: request.page_size, + total: 0, + }); + }; + + if sessions.next().is_some() { + return Err(tonic::Status::invalid_argument( + "Cannot determine the cluster from the filter, multiple clusters targeted", + )); + } + + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .tasks() + .call(request) + .instrument(span) + .await + .map_err(|err| match err { armonik::client::RequestError::Grpc { source, .. } => *source, err => tonic::Status::internal(err.to_string()), }) - } } async fn get( self: Arc, request: tasks::get::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - crate::utils::impl_unary!(self.tasks, request, cancellation_token, task) + crate::utils::impl_unary!(self.tasks, request, task) } async fn cancel( self: Arc, request: tasks::cancel::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut futures = self - .clusters - .values() - .map(|cluster| async { - Result::<_, tonic::Status>::Ok( - cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .tasks() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)? - .tasks, - ) - }) - .collect::>(); - - let mut tasks = Vec::new(); - while let Some(chunk) = futures.try_next().await? { - tasks.extend(chunk.into_iter()) - } + let mut futures = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + Result::<_, tonic::Status>::Ok( + client + .tasks() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status)? + .tasks, + ) + }) + .collect::>(); - Ok(tasks::cancel::Response { tasks }) + let mut tasks = Vec::new(); + while let Some(chunk) = futures.try_next().await? { + tasks.extend(chunk.into_iter()) } + + Ok(tasks::cancel::Response { tasks }) } async fn get_result_ids( self: Arc, request: tasks::get_result_ids::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut futures = self - .clusters - .values() - .map(|cluster| async { - Result::<_, tonic::Status>::Ok( - cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .tasks() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)? - .task_results, - ) - }) - .collect::>(); + let mut futures = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + Result::<_, tonic::Status>::Ok( + client + .tasks() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status)? + .task_results, + ) + }) + .collect::>(); - let mut task_results = HashMap::>::new(); - while let Some(response) = futures.try_next().await? { - for (task_id, result_ids) in response { - task_results.entry(task_id).or_default().extend(result_ids); - } + let mut task_results = HashMap::>::new(); + while let Some(response) = futures.try_next().await? { + for (task_id, result_ids) in response { + task_results.entry(task_id).or_default().extend(result_ids); } - - Ok(tasks::get_result_ids::Response { task_results }) } + + Ok(tasks::get_result_ids::Response { task_results }) } async fn count_status( self: Arc, request: tasks::count_status::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut futures = self - .clusters - .values() - .map(|cluster| async { - Result::<_, tonic::Status>::Ok( - cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .tasks() - .call(request.clone()) - .await - .map_err(IntoStatus::into_status)? - .status, - ) - }) - .collect::>(); + let mut futures = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + Result::<_, tonic::Status>::Ok( + client + .tasks() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status)? + .status, + ) + }) + .collect::>(); - let mut status = HashMap::::new(); - while let Some(response) = futures.try_next().await? { - for count in response { - *status.entry(count.status).or_default() += count.count; - } + let mut status = HashMap::::new(); + while let Some(response) = futures.try_next().await? { + for count in response { + *status.entry(count.status).or_default() += count.count; } - - Ok(armonik::tasks::count_status::Response { - status: status - .into_iter() - .map(|(status, count)| armonik::StatusCount { status, count }) - .collect(), - }) } + + Ok(armonik::tasks::count_status::Response { + status: status + .into_iter() + .map(|(status, count)| armonik::StatusCount { status, count }) + .collect(), + }) } async fn submit( self: Arc, request: tasks::submit::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - crate::utils::impl_unary!(self.tasks, request, cancellation_token, session) + crate::utils::impl_unary!(self.tasks, request, session) } } diff --git a/load-balancer/src/service/versions.rs b/load-balancer/src/service/versions.rs index 8f8adfe..4fee05b 100644 --- a/load-balancer/src/service/versions.rs +++ b/load-balancer/src/service/versions.rs @@ -1,12 +1,12 @@ use std::sync::Arc; use armonik::{ - reexports::{tokio_util, tonic}, + reexports::{tonic, tracing_futures::Instrument}, server::VersionsService, versions, }; -use crate::utils::{run_with_cancellation, IntoStatus}; +use crate::utils::IntoStatus; use super::Service; @@ -14,39 +14,34 @@ impl VersionsService for Service { async fn list( self: Arc, _request: versions::list::Request, - cancellation_token: tokio_util::sync::CancellationToken, ) -> std::result::Result { - run_with_cancellation! { - use cancellation_token; - - let mut cluster_versions = Vec::new(); - - for cluster in self.clusters.values() { - let versions = cluster - .client() - .await - .map_err(IntoStatus::into_status)? - .versions() - .list() - .await - .map_err(IntoStatus::into_status)?; - - cluster_versions.push(versions); - } + let mut cluster_versions = Vec::new(); + + for cluster in self.clusters.values() { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + let versions = client + .versions() + .list() + .instrument(span) + .await + .map_err(IntoStatus::into_status)?; + + cluster_versions.push(versions); + } - let mut cluster_versions = cluster_versions.into_iter(); + let mut cluster_versions = cluster_versions.into_iter(); - let Some(versions) = cluster_versions.next() else { - return Err(tonic::Status::internal("No cluster")); - }; + let Some(versions) = cluster_versions.next() else { + return Err(tonic::Status::internal("No cluster")); + }; - for other in cluster_versions { - if versions != other { - return Err(tonic::Status::internal("Mismatch between clusters")); - } + for other in cluster_versions { + if versions != other { + return Err(tonic::Status::internal("Mismatch between clusters")); } - - Ok(versions) } + + Ok(versions) } } diff --git a/load-balancer/src/utils.rs b/load-balancer/src/utils.rs index c68b5bf..8c9b89e 100644 --- a/load-balancer/src/utils.rs +++ b/load-balancer/src/utils.rs @@ -1,36 +1,20 @@ use armonik::reexports::tonic::Status; -macro_rules! run_with_cancellation { - (use $ct:expr; $($body:tt)*) => { - crate::utils::run_with_cancellation!($ct, async move { $($body)* }) - }; - ($ct:expr, $fut:expr) => { - match $ct.run_until_cancelled($fut).await { - Some(res) => res, - None => { - Err(tonic::Status::aborted("Cancellation token has been triggered"))? - } - } - } -} use futures::{stream::futures_unordered, Stream, StreamExt}; -pub(crate) use run_with_cancellation; macro_rules! impl_unary { - ($self:ident.$service:ident, $request:ident, $ct:ident, session) => { - crate::utils::impl_unary!($self.$service, $request, $ct, {get_cluster_from_session, session_id, "Session {} was not found"}) + ($self:ident.$service:ident, $request:ident, session) => { + crate::utils::impl_unary!($self.$service, $request, {get_cluster_from_session, session_id, "Session {} was not found"}) }; - ($self:ident.$service:ident, $request:ident, $ct:ident, result) => { - crate::utils::impl_unary!($self.$service, $request, $ct, {get_cluster_from_result, result_id, "Result {} was not found"}) + ($self:ident.$service:ident, $request:ident, result) => { + crate::utils::impl_unary!($self.$service, $request, {get_cluster_from_result, result_id, "Result {} was not found"}) }; - ($self:ident.$service:ident, $request:ident, $ct:ident, task) => { - crate::utils::impl_unary!($self.$service, $request, $ct, {get_cluster_from_task, task_id, "Task {} was not found"}) + ($self:ident.$service:ident, $request:ident, task) => { + crate::utils::impl_unary!($self.$service, $request, {get_cluster_from_task, task_id, "Task {} was not found"}) }; - ($self:ident.$service:ident, $request:ident, $ct:ident, {$get_cluster:ident, $id:ident, $msg:literal}) => { - crate::utils::run_with_cancellation! { - use $ct; - + ($self:ident.$service:ident, $request:ident, {$get_cluster:ident, $id:ident, $msg:literal}) => { + { let Some(cluster) = $self.$get_cluster(&$request.$id).await? else { return Err(tonic::Status::not_found(format!( $msg, @@ -41,9 +25,11 @@ macro_rules! impl_unary { let mut client = cluster .client() .await - .map_err(crate::utils::IntoStatus::into_status)? - .$service(); - client.call($request) + .map_err(crate::utils::IntoStatus::into_status)?; + let span = client.span(); + client.$service() + .call($request) + .instrument(span) .await .map_err(crate::utils::IntoStatus::into_status) } From 62c2594c2bf2fc3178614f9b343b0e0a0dec101a Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Sun, 5 Jan 2025 23:33:30 +0100 Subject: [PATCH 09/12] All RPCs are now concurrent --- load-balancer/src/service/auth.rs | 39 +++--- load-balancer/src/service/health_check.rs | 27 ++-- load-balancer/src/service/mod.rs | 9 +- load-balancer/src/service/partitions.rs | 42 +++---- load-balancer/src/service/results.rs | 38 ++++-- load-balancer/src/service/submitter.rs | 144 ++++++++++++++-------- load-balancer/src/service/versions.rs | 39 +++--- 7 files changed, 207 insertions(+), 131 deletions(-) diff --git a/load-balancer/src/service/auth.rs b/load-balancer/src/service/auth.rs index b206b1b..74063c0 100644 --- a/load-balancer/src/service/auth.rs +++ b/load-balancer/src/service/auth.rs @@ -2,9 +2,10 @@ use std::sync::Arc; use armonik::{ auth, - reexports::{tonic, tracing_futures::Instrument}, + reexports::{tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, server::AuthService, }; +use futures::stream::FuturesUnordered; use crate::utils::IntoStatus; @@ -15,28 +16,26 @@ impl AuthService for Service { self: Arc, _request: auth::current_user::Request, ) -> std::result::Result { - let mut users = Vec::new(); - - for cluster in self.clusters.values() { - let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; - let span = client.span(); - let user = client - .auth() - .current_user() - .instrument(span) - .await - .map_err(IntoStatus::into_status)?; - - users.push(user); - } - - let mut users = users.into_iter(); - - let Some(user) = users.next() else { + let mut users = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .auth() + .current_user() + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + + let Some(user) = users.try_next().await? else { return Err(tonic::Status::internal("No cluster")); }; - for other in users { + while let Some(other) = users.try_next().await? { if user != other { return Err(tonic::Status::internal("Mismatch between clusters")); } diff --git a/load-balancer/src/service/health_check.rs b/load-balancer/src/service/health_check.rs index 181851a..bfa2bad 100644 --- a/load-balancer/src/service/health_check.rs +++ b/load-balancer/src/service/health_check.rs @@ -2,9 +2,10 @@ use std::{collections::HashMap, sync::Arc}; use armonik::{ health_checks, - reexports::{tonic, tracing_futures::Instrument}, + reexports::{tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, server::HealthChecksService, }; +use futures::stream::FuturesUnordered; use crate::utils::IntoStatus; @@ -17,16 +18,22 @@ impl HealthChecksService for Service { ) -> std::result::Result { let mut services = HashMap::::new(); - for cluster in self.clusters.values() { - let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; - let span = client.span(); - let health = client - .health_checks() - .check() - .instrument(span) - .await - .map_err(IntoStatus::into_status)?; + let mut healths = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .health_checks() + .check() + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + while let Some(health) = healths.try_next().await? { for service in health { match services.entry(service.name) { std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { diff --git a/load-balancer/src/service/mod.rs b/load-balancer/src/service/mod.rs index ad98c21..b2b7367 100644 --- a/load-balancer/src/service/mod.rs +++ b/load-balancer/src/service/mod.rs @@ -2,7 +2,10 @@ use std::{ collections::{HashMap, HashSet}, - sync::{atomic::AtomicUsize, Arc}, + sync::{ + atomic::{AtomicI32, AtomicUsize}, + Arc, + }, }; use sessions::Session; @@ -35,6 +38,8 @@ pub struct Service { mapping_result: RwLock>>, mapping_task: RwLock>>, counter: AtomicUsize, + result_preferred_size: AtomicI32, + submitter_preferred_size: AtomicI32, } impl Service { @@ -84,6 +89,8 @@ impl Service { mapping_result: RwLock::new(Default::default()), mapping_task: RwLock::new(Default::default()), counter: AtomicUsize::new(0), + result_preferred_size: AtomicI32::new(0), + submitter_preferred_size: AtomicI32::new(0), } } diff --git a/load-balancer/src/service/partitions.rs b/load-balancer/src/service/partitions.rs index 2cf8154..b31ed62 100644 --- a/load-balancer/src/service/partitions.rs +++ b/load-balancer/src/service/partitions.rs @@ -5,6 +5,7 @@ use armonik::{ reexports::{tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, server::PartitionsService, }; +use futures::stream::FuturesUnordered; use crate::utils::{merge_streams, IntoStatus}; @@ -91,28 +92,27 @@ impl PartitionsService for Service { ) -> std::result::Result { let mut err = None; - for cluster in self.clusters.values() { - let mut client = match cluster.client().await { - Ok(client) => client, - Err(error) => { - err = Some(error.into_status()); - continue; - } - }; - let span = client.span(); - - match client - .partitions() - .call(request.clone()) - .instrument(span) - .await - { + let mut partitions = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + + client + .partitions() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + + while let Some(response) = partitions.next().await { + match response { Ok(response) => return Ok(response), - Err(error) => { - err = Some(error.into_status()); - continue; - } - }; + Err(error) => err = Some(error), + } } match err { diff --git a/load-balancer/src/service/results.rs b/load-balancer/src/service/results.rs index 5aa875c..f8b8f21 100644 --- a/load-balancer/src/service/results.rs +++ b/load-balancer/src/service/results.rs @@ -5,6 +5,7 @@ use armonik::{ results, server::ResultsService, }; +use futures::stream::FuturesUnordered; use crate::utils::IntoStatus; @@ -142,21 +143,40 @@ impl ResultsService for Service { self: Arc, _request: results::get_service_configuration::Request, ) -> std::result::Result { + // Try to get the cached value + let size = self + .result_preferred_size + .load(std::sync::atomic::Ordering::Relaxed); + if size > 0 { + return Ok(results::get_service_configuration::Response { + data_chunk_max_size: size, + }); + } let mut min = 1 << 24; - for (_, cluster) in self.clusters.iter() { - let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; - let span = client.span(); - let conf = client - .results() - .get_service_configuration() - .instrument(span) - .await - .map_err(IntoStatus::into_status)?; + let mut configurations = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .results() + .get_service_configuration() + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + while let Some(conf) = configurations.try_next().await? { min = min.min(conf.data_chunk_max_size); } + // As all clients should get the same result, it is safe to store it unconditionally + self.result_preferred_size + .store(min, std::sync::atomic::Ordering::Relaxed); + Ok(results::get_service_configuration::Response { data_chunk_max_size: min, }) diff --git a/load-balancer/src/service/submitter.rs b/load-balancer/src/service/submitter.rs index 4df9a0b..9a881b9 100644 --- a/load-balancer/src/service/submitter.rs +++ b/load-balancer/src/service/submitter.rs @@ -3,11 +3,11 @@ use std::{collections::HashMap, sync::Arc}; use armonik::{ - reexports::{tokio, tonic, tracing_futures::Instrument}, + reexports::{tokio, tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, server::SubmitterService, submitter, }; -use futures::StreamExt as _; +use futures::stream::FuturesUnordered; use crate::utils::{impl_unary, IntoStatus}; @@ -20,21 +20,41 @@ impl SubmitterService for Service { ) -> std::result::Result { tracing::warn!("SubmitterService::GetServiceConfiguration is deprecated, please use ResultsService::GetServiceConfiguration instead"); + // Try to get the cached value + let size = self + .submitter_preferred_size + .load(std::sync::atomic::Ordering::Relaxed); + if size > 0 { + return Ok(submitter::get_service_configuration::Response { + data_chunk_max_size: size, + }); + } + let mut min = 1 << 24; - for (_, cluster) in self.clusters.iter() { - let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; - let span = client.span(); - let conf = client - .results() - .get_service_configuration() - .instrument(span) - .await - .map_err(IntoStatus::into_status)?; + let mut configurations = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .submitter() + .get_service_configuration() + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + while let Some(conf) = configurations.try_next().await? { min = min.min(conf.data_chunk_max_size); } + // As all clients should get the same result, it is safe to store it unconditionally + self.submitter_preferred_size + .store(min, std::sync::atomic::Ordering::Relaxed); + Ok(submitter::get_service_configuration::Response { data_chunk_max_size: min, }) @@ -97,16 +117,22 @@ impl SubmitterService for Service { let mut task_ids = Vec::new(); - for cluster in self.clusters.values() { - let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; - let span = client.span(); - let response = client - .submitter() - .call(request.clone()) - .instrument(span) - .await - .map_err(IntoStatus::into_status)?; + let mut responses = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + while let Some(response) = responses.try_next().await? { task_ids.extend(response.task_ids); } @@ -121,16 +147,22 @@ impl SubmitterService for Service { let mut session_ids = Vec::new(); - for cluster in self.clusters.values() { - let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; - let span = client.span(); - let response = client - .submitter() - .call(request.clone()) - .instrument(span) - .await - .map_err(IntoStatus::into_status)?; + let mut responses = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + while let Some(response) = responses.try_next().await? { session_ids.extend(response.session_ids); } @@ -147,18 +179,23 @@ impl SubmitterService for Service { let mut status_count = HashMap::::new(); - for cluster in self.clusters.values() { - let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; - let span = client.span(); - let response = client - .submitter() - .call(request.clone()) - .instrument(span) - .await - .map_err(IntoStatus::into_status)? - .values; + let mut responses = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); - for (status, count) in response { + while let Some(response) = responses.try_next().await? { + for (status, count) in response.values { *status_count.entry(status).or_default() += count; } } @@ -247,16 +284,23 @@ impl SubmitterService for Service { tracing::warn!( "SubmitterService::CancelTasks is deprecated, please use TasksService::CancelTasks instead" ); - for cluster in self.clusters.values() { - let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; - let span = client.span(); - client - .submitter() - .call(request.clone()) - .instrument(span) - .await - .map_err(IntoStatus::into_status)?; - } + + let mut responses = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .submitter() + .call(request.clone()) + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + + while (responses.try_next().await?).is_some() {} Ok(submitter::cancel_tasks::Response {}) } diff --git a/load-balancer/src/service/versions.rs b/load-balancer/src/service/versions.rs index 4fee05b..781209f 100644 --- a/load-balancer/src/service/versions.rs +++ b/load-balancer/src/service/versions.rs @@ -1,10 +1,11 @@ use std::sync::Arc; use armonik::{ - reexports::{tonic, tracing_futures::Instrument}, + reexports::{tokio_stream::StreamExt, tonic, tracing_futures::Instrument}, server::VersionsService, versions, }; +use futures::stream::FuturesUnordered; use crate::utils::IntoStatus; @@ -15,28 +16,26 @@ impl VersionsService for Service { self: Arc, _request: versions::list::Request, ) -> std::result::Result { - let mut cluster_versions = Vec::new(); - - for cluster in self.clusters.values() { - let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; - let span = client.span(); - let versions = client - .versions() - .list() - .instrument(span) - .await - .map_err(IntoStatus::into_status)?; - - cluster_versions.push(versions); - } - - let mut cluster_versions = cluster_versions.into_iter(); - - let Some(versions) = cluster_versions.next() else { + let mut cluster_versions = self + .clusters + .values() + .map(|cluster| async { + let mut client = cluster.client().await.map_err(IntoStatus::into_status)?; + let span = client.span(); + client + .versions() + .list() + .instrument(span) + .await + .map_err(IntoStatus::into_status) + }) + .collect::>(); + + let Some(versions) = cluster_versions.try_next().await? else { return Err(tonic::Status::internal("No cluster")); }; - for other in cluster_versions { + while let Some(other) = cluster_versions.try_next().await? { if versions != other { return Err(tonic::Status::internal("Mismatch between clusters")); } From 942449bdc6db842f763a0ae9087dd74ef0fe8120 Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Mon, 6 Jan 2025 10:21:13 +0100 Subject: [PATCH 10/12] Documentation --- load-balancer/README.md | 84 +++++++++++++++++++++++++++++++++++ load-balancer/lb.example.yaml | 28 ++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 load-balancer/README.md create mode 100644 load-balancer/lb.example.yaml diff --git a/load-balancer/README.md b/load-balancer/README.md new file mode 100644 index 0000000..11673c4 --- /dev/null +++ b/load-balancer/README.md @@ -0,0 +1,84 @@ +# ArmoniK Load Balancer + +The ArmoniK Load Balancer enables to target multiple ArmoniK clusters from a single endpoint. +It is implemented according to the [AEP 4](https://github.com/aneoconsulting/ArmoniK.Community/blob/main/AEP/aep-00004.md). + +When a session is created, a cluster is selected among the configured ones (using a round-robin scheme), and all tasks of this session will be executed on the selected cluster. +If a cluster becomes unreachable, its sessions are unreachable as well, and their tasks are not executed on another cluster. +New sessions will go to the remaining available clusters, though. + +# Usage + +Once the load balancer is running, you can redirect your client to the load balancer endpoint, and it does not require any further client modification. + +The load balancer does not listen using TLS, so if you need this capability, you would need to add an nginx ingress like [the one in front of the ArmoniK control plane](https://github.com/aneoconsulting/ArmoniK.Infra/blob/main/armonik/ingress-configmap.tf). + +The Admin GUI is not part of the load balancer and should be added in front of it, using the same nginx ingress as previously. + +# Configuration + +The load balancer can be configured using either a configuration file, or environment variables. + +## Confiuration file + +You can find a complete example of a configuration file in the [repository](lb.example.yaml). + +Here is a simplified example: + +> `lb.yaml` +> ```yaml +> clusters: +> remote1: +> endpoint: https://remote1.example.com:5001/ +> remote2: +> endpoint: https://remote2.example.com:5001/ +> refresh_delay: 60 +> ``` + +The load balancer can then be run with the following command: +```sh +./load-balancer -c lb.yaml +``` + +Or using the docker image: +```sh +docker run --rm -v "$PWD/lb.yaml:/lb.yaml" dockerhubaneo/armonik_load_balancer -c lb.yaml +``` + +## Environment variables + +You can also pass the configuration using environment variables using PascalCase separated with `__` and prefixed with `LoadBalancer`: + +```sh +export LoadBalancer__Clusters__Remote1__Endpoint="https://remote1.example.com:5001/" +export LoadBalancer__Clusters__Remote2__Endpoint="https://remote2.example.com:5001/" +export LoadBalancer__RefreshDelay="60" +./load-balancer +``` + +This also works with docker: +```sh +docker run --rm \ + -e LoadBalancer__Clusters__Remote1__Endpoint="https://remote1.example.com:5001/" \ + -e LoadBalancer__Clusters__Remote2__Endpoint="https://remote2.example.com:5001/" \ + -e LoadBalancer__RefreshDelay="60" \ + dockerhubaneo/armonik_load_balancer +``` + +# Logs + +By default, the load balancer logs only errors and warnings to stdout. +If you need more verbose logs, you can configure them using the [`RUST_LOG`](https://docs.rs/env_logger/latest/env_logger/#enabling-logging) environment variable. + +Here are some examples: + +```sh +# Info for everything +RUST_LOG=info + +# Info for everything, debug for armonik and load_balancer +RUST_LOG=info,armonik=debug,load-balancer=debug + +# Trace for everything, except gRPC internals +RUST_LOG=trace,h2=off,tower=off +``` diff --git a/load-balancer/lb.example.yaml b/load-balancer/lb.example.yaml new file mode 100644 index 0000000..3c6e791 --- /dev/null +++ b/load-balancer/lb.example.yaml @@ -0,0 +1,28 @@ +# List of clusters +clusters: + # configuration for the cluster called "remote" + remote: + # Endpoint of the cluster + endpoint: https://93.184.215.14:5001/ + # Path to the client certificate + cert_pem: + # Path to the client certificate's private key + key_pem: + # Path to the Certificate Authority used to validate the server + ca_cert: + # Whether the server authenticity should be verified or not + allow_unsafe_connection: false + # Override the hostname for the TLS verification + override_target_name: example.com + + # configuration for the cluster called "local" + local: + # Endpoint of the cluster + endpoint: http://localhost:5001/ + +# IP on which the load balancer will listen (default: 0.0.0.0) +listen_ip: 127.0.0.1 +# Port on which the load balancer will listen (default: 8081) +listen_port: 1337 +# Time interval in seconds between session refreshes (default: 10) +refresh_delay: 60 From f15587b5dc28a3eff7dd4ce2f2f3f297919df2a6 Mon Sep 17 00:00:00 2001 From: Nicolas Gruel Date: Mon, 6 Jan 2025 12:10:50 +0100 Subject: [PATCH 11/12] chore: small corrections for the load balancer README.md --- load-balancer/README.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/load-balancer/README.md b/load-balancer/README.md index 11673c4..a27930a 100644 --- a/load-balancer/README.md +++ b/load-balancer/README.md @@ -1,25 +1,25 @@ # ArmoniK Load Balancer -The ArmoniK Load Balancer enables to target multiple ArmoniK clusters from a single endpoint. +The ArmoniK Load Balancer enables the capability to use multiple ArmoniK clusters from a single endpoint. It is implemented according to the [AEP 4](https://github.com/aneoconsulting/ArmoniK.Community/blob/main/AEP/aep-00004.md). -When a session is created, a cluster is selected among the configured ones (using a round-robin scheme), and all tasks of this session will be executed on the selected cluster. +When a session is created, a cluster is selected among the configured ones (using a [round-robin scheme](https://en.wikipedia.org/wiki/Round-robin_scheduling)), all tasks of this session will be executed on the selected cluster. If a cluster becomes unreachable, its sessions are unreachable as well, and their tasks are not executed on another cluster. -New sessions will go to the remaining available clusters, though. +Though new sessions will go to the remaining available clusters. # Usage -Once the load balancer is running, you can redirect your client to the load balancer endpoint, and it does not require any further client modification. +Once the load balancer is running, you can redirect your client to the load balancer endpoint. It does not require any further client modification. -The load balancer does not listen using TLS, so if you need this capability, you would need to add an nginx ingress like [the one in front of the ArmoniK control plane](https://github.com/aneoconsulting/ArmoniK.Infra/blob/main/armonik/ingress-configmap.tf). +The load balancer does not listen to TLS connection, if you need this capability, you would need to add a nginx ingress similar to [the one in front of the ArmoniK control plane](https://github.com/aneoconsulting/ArmoniK.Infra/blob/main/armonik/ingress-configmap.tf). The Admin GUI is not part of the load balancer and should be added in front of it, using the same nginx ingress as previously. # Configuration -The load balancer can be configured using either a configuration file, or environment variables. +The load balancer can be configured using either a configuration file or some environment variables. -## Confiuration file +## Configuration file You can find a complete example of a configuration file in the [repository](lb.example.yaml). @@ -47,7 +47,7 @@ docker run --rm -v "$PWD/lb.yaml:/lb.yaml" dockerhubaneo/armonik_load_balancer - ## Environment variables -You can also pass the configuration using environment variables using PascalCase separated with `__` and prefixed with `LoadBalancer`: +You can also pass the configuration using environment variables using (PascalCase)[https://en.wikipedia.org/wiki/Camel_case] separated with `__` and prefixed with `LoadBalancer`: ```sh export LoadBalancer__Clusters__Remote1__Endpoint="https://remote1.example.com:5001/" @@ -57,6 +57,7 @@ export LoadBalancer__RefreshDelay="60" ``` This also works with docker: + ```sh docker run --rm \ -e LoadBalancer__Clusters__Remote1__Endpoint="https://remote1.example.com:5001/" \ From 97993c5b6d0a43d5e91a065a5a077f183e145eb4 Mon Sep 17 00:00:00 2001 From: Florian Lemaitre Date: Mon, 6 Jan 2025 14:28:01 +0100 Subject: [PATCH 12/12] Doc --- load-balancer/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/load-balancer/README.md b/load-balancer/README.md index a27930a..5214e8f 100644 --- a/load-balancer/README.md +++ b/load-balancer/README.md @@ -9,9 +9,10 @@ Though new sessions will go to the remaining available clusters. # Usage -Once the load balancer is running, you can redirect your client to the load balancer endpoint. It does not require any further client modification. +Once the load balancer is running, you can redirect your client to the load balancer endpoint. +It does not require any further client modification. -The load balancer does not listen to TLS connection, if you need this capability, you would need to add a nginx ingress similar to [the one in front of the ArmoniK control plane](https://github.com/aneoconsulting/ArmoniK.Infra/blob/main/armonik/ingress-configmap.tf). +The load balancer does not listen on TLS, if you need this capability, you would need to add an nginx ingress similar to [the one in front of the ArmoniK control plane](https://github.com/aneoconsulting/ArmoniK.Infra/blob/main/armonik/ingress-configmap.tf). The Admin GUI is not part of the load balancer and should be added in front of it, using the same nginx ingress as previously.