From 28c9a14a2e4da99842d41240b301022017b2a04a Mon Sep 17 00:00:00 2001 From: Motoyuki Kimura Date: Sat, 19 Oct 2024 04:34:06 +0900 Subject: [PATCH 1/3] metrics: rename `injection_queue_depth` to `global_queue_depth` (#6918) --- tokio/src/runtime/metrics/runtime.rs | 19 +++++++++++++------ tokio/tests/rt_metrics.rs | 8 ++++---- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/tokio/src/runtime/metrics/runtime.rs b/tokio/src/runtime/metrics/runtime.rs index 008245ecc36..c5614bcbef3 100644 --- a/tokio/src/runtime/metrics/runtime.rs +++ b/tokio/src/runtime/metrics/runtime.rs @@ -71,11 +71,11 @@ impl RuntimeMetrics { } /// Returns the number of tasks currently scheduled in the runtime's - /// injection queue. + /// global queue. /// /// Tasks that are spawned or notified from a non-runtime thread are - /// scheduled using the runtime's injection queue. This metric returns the - /// **current** number of tasks pending in the injection queue. As such, the + /// scheduled using the runtime's global queue. This metric returns the + /// **current** number of tasks pending in the global queue. As such, the /// returned value may increase or decrease as new tasks are scheduled and /// processed. /// @@ -88,11 +88,11 @@ impl RuntimeMetrics { /// async fn main() { /// let metrics = Handle::current().metrics(); /// - /// let n = metrics.injection_queue_depth(); - /// println!("{} tasks currently pending in the runtime's injection queue", n); + /// let n = metrics.global_queue_depth(); + /// println!("{} tasks currently pending in the runtime's global queue", n); /// } /// ``` - pub fn injection_queue_depth(&self) -> usize { + pub fn global_queue_depth(&self) -> usize { self.handle.inner.injection_queue_depth() } @@ -681,6 +681,13 @@ impl RuntimeMetrics { } } + /// Renamed to [`RuntimeMetrics::global_queue_depth`] + #[deprecated = "Renamed to global_queue_depth"] + #[doc(hidden)] + pub fn injection_queue_depth(&self) -> usize { + self.handle.inner.injection_queue_depth() + } + /// Returns the number of tasks currently scheduled in the given worker's /// local queue. /// diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index e2494cf5e64..dac8860b345 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -47,7 +47,7 @@ fn num_alive_tasks() { } #[test] -fn injection_queue_depth_current_thread() { +fn global_queue_depth_current_thread() { use std::thread; let rt = current_thread(); @@ -60,11 +60,11 @@ fn injection_queue_depth_current_thread() { .join() .unwrap(); - assert_eq!(1, metrics.injection_queue_depth()); + assert_eq!(1, metrics.global_queue_depth()); } #[test] -fn injection_queue_depth_multi_thread() { +fn global_queue_depth_multi_thread() { let rt = threaded(); let metrics = rt.metrics(); @@ -85,7 +85,7 @@ fn injection_queue_depth_multi_thread() { let mut fail: Option = None; for i in 0..10 { - let depth = metrics.injection_queue_depth(); + let depth = metrics.global_queue_depth(); if i != depth { fail = Some(format!("{i} is not equal to {depth}")); break; From ce1c74f1cc1e31083744b6eb24b0e60ceaca4b4e Mon Sep 17 00:00:00 2001 From: Jonas Fassbender Date: Mon, 21 Oct 2024 10:04:39 +0200 Subject: [PATCH 2/3] metrics: fix deadlock in injection_queue_depth_multi_thread test (#6916) --- tokio/tests/rt_metrics.rs | 66 ++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 25 deletions(-) diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index dac8860b345..7adee28f917 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -2,7 +2,8 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi"), target_has_atomic = "64"))] -use std::sync::{Arc, Barrier}; +use std::sync::mpsc; +use std::time::Duration; use tokio::runtime::Runtime; #[test] @@ -68,36 +69,51 @@ fn global_queue_depth_multi_thread() { let rt = threaded(); let metrics = rt.metrics(); - let barrier1 = Arc::new(Barrier::new(3)); - let barrier2 = Arc::new(Barrier::new(3)); - - // Spawn a task per runtime worker to block it. - for _ in 0..2 { - let barrier1 = barrier1.clone(); - let barrier2 = barrier2.clone(); - rt.spawn(async move { - barrier1.wait(); - barrier2.wait(); - }); - } - - barrier1.wait(); + for _ in 0..10 { + if let Ok(_blocking_tasks) = try_block_threaded(&rt) { + for i in 0..10 { + assert_eq!(i, metrics.global_queue_depth()); + rt.spawn(async {}); + } - let mut fail: Option = None; - for i in 0..10 { - let depth = metrics.global_queue_depth(); - if i != depth { - fail = Some(format!("{i} is not equal to {depth}")); - break; + return; } - rt.spawn(async {}); } - barrier2.wait(); + panic!("exhausted every try to block the runtime"); +} - if let Some(fail) = fail { - panic!("{fail}"); +fn try_block_threaded(rt: &Runtime) -> Result>, mpsc::RecvTimeoutError> { + let (tx, rx) = mpsc::channel(); + + let blocking_tasks = (0..rt.metrics().num_workers()) + .map(|_| { + let tx = tx.clone(); + let (task, barrier) = mpsc::channel(); + + // Spawn a task per runtime worker to block it. + rt.spawn(async move { + tx.send(()).unwrap(); + barrier.recv().ok(); + }); + + task + }) + .collect(); + + // Make sure the previously spawned tasks are blocking the runtime by + // receiving a message from each blocking task. + // + // If this times out we were unsuccessful in blocking the runtime and hit + // a deadlock instead (which might happen and is expected behaviour). + for _ in 0..rt.metrics().num_workers() { + rx.recv_timeout(Duration::from_secs(1))?; } + + // Return senders of the mpsc channels used for blocking the runtime as a + // surrogate handle for the tasks. Sending a message or dropping the senders + // will unblock the runtime. + Ok(blocking_tasks) } fn current_thread() -> Runtime { From da745ff335dea94378f5ba2b79e2c9f97cb217aa Mon Sep 17 00:00:00 2001 From: Russell Cohen Date: Mon, 21 Oct 2024 08:05:45 -0400 Subject: [PATCH 3/3] metrics: add H2 Histogram option to improve histogram granularity (#6897) --- spellcheck.dic | 9 +- tokio/Cargo.toml | 14 +- tokio/src/runtime/builder.rs | 89 +++- tokio/src/runtime/metrics/batch.rs | 2 +- tokio/src/runtime/metrics/histogram.rs | 294 +++++++---- .../runtime/metrics/histogram/h2_histogram.rs | 480 ++++++++++++++++++ tokio/src/runtime/metrics/mod.rs | 2 +- tokio/src/runtime/metrics/runtime.rs | 12 +- tokio/src/runtime/mod.rs | 2 +- tokio/tests/rt_unstable_metrics.rs | 92 +++- 10 files changed, 869 insertions(+), 127 deletions(-) create mode 100644 tokio/src/runtime/metrics/histogram/h2_histogram.rs diff --git a/spellcheck.dic b/spellcheck.dic index 23c54b54ad0..f368d2d7214 100644 --- a/spellcheck.dic +++ b/spellcheck.dic @@ -1,4 +1,4 @@ -287 +292 & + < @@ -12,12 +12,17 @@ 0xA 0xD 100ms +100ns 10ms +10μs ~12 +120s +12.5% ±1m ±1ms 1ms 1s +25% 250ms 2x ~4 @@ -116,9 +121,11 @@ GID goroutines Growable gzip +H2 hashmaps HashMaps hashsets +HdrHistogram ie Illumos impl diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index b07f50150b7..4edbd25ea8d 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -96,7 +96,7 @@ mio = { version = "1.0.1", optional = true, default-features = false } parking_lot = { version = "0.12.0", optional = true } [target.'cfg(not(target_family = "wasm"))'.dependencies] -socket2 = { version = "0.5.5", optional = true, features = [ "all" ] } +socket2 = { version = "0.5.5", optional = true, features = ["all"] } # Currently unstable. The API exposed by these features may be broken at any time. # Requires `--cfg tokio_unstable` to enable. @@ -123,8 +123,8 @@ optional = true [target.'cfg(windows)'.dev-dependencies.windows-sys] version = "0.52" features = [ - "Win32_Foundation", - "Win32_Security_Authorization", + "Win32_Foundation", + "Win32_Security_Authorization", ] [dev-dependencies] @@ -137,6 +137,7 @@ async-stream = "0.3" [target.'cfg(not(target_family = "wasm"))'.dev-dependencies] socket2 = "0.5.5" tempfile = "3.1.0" +proptest = "1" [target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dev-dependencies] rand = "0.8.0" @@ -165,8 +166,7 @@ features = ["full", "test-util"] # The following are types that are allowed to be exposed in Tokio's public API. # The standard library is allowed by default. allowed_external_types = [ - "bytes::buf::buf_impl::Buf", - "bytes::buf::buf_mut::BufMut", - - "tokio_macros::*", + "bytes::buf::buf_impl::Buf", + "bytes::buf::buf_mut::BufMut", + "tokio_macros::*", ] diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index 4d35120b1f9..883713e8230 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -3,7 +3,7 @@ use crate::runtime::handle::Handle; use crate::runtime::{blocking, driver, Callback, HistogramBuilder, Runtime, TaskCallback}; #[cfg(tokio_unstable)] -use crate::runtime::{LocalOptions, LocalRuntime, TaskMeta}; +use crate::runtime::{metrics::HistogramConfiguration, LocalOptions, LocalRuntime, TaskMeta}; use crate::util::rand::{RngSeed, RngSeedGenerator}; use crate::runtime::blocking::BlockingPool; @@ -1102,6 +1102,11 @@ impl Builder { /// `metrics_poll_count_histogram_` builder methods to configure the /// histogram details. /// + /// By default, a linear histogram with 10 buckets each 100 microseconds wide will be used. + /// This has an extremely low memory footprint, but may not provide enough granularity. For + /// better granularity with low memory usage, use [`metrics_poll_count_histogram_configuration()`] + /// to select [`LogHistogram`] instead. + /// /// # Examples /// /// ``` @@ -1121,6 +1126,8 @@ impl Builder { /// /// [`Handle::metrics()`]: crate::runtime::Handle::metrics /// [`Instant::now()`]: std::time::Instant::now + /// [`LogHistogram`]: crate::runtime::LogHistogram + /// [`metrics_poll_count_histogram_configuration()`]: Builder::metrics_poll_count_histogram_configuration pub fn enable_metrics_poll_count_histogram(&mut self) -> &mut Self { self.metrics_poll_count_histogram_enable = true; self @@ -1142,14 +1149,83 @@ impl Builder { /// ``` /// use tokio::runtime::{self, HistogramScale}; /// + /// # #[allow(deprecated)] /// let rt = runtime::Builder::new_multi_thread() /// .enable_metrics_poll_count_histogram() /// .metrics_poll_count_histogram_scale(HistogramScale::Log) /// .build() /// .unwrap(); /// ``` + #[deprecated(note = "use metrics_poll_count_histogram_configuration")] pub fn metrics_poll_count_histogram_scale(&mut self, histogram_scale: crate::runtime::HistogramScale) -> &mut Self { - self.metrics_poll_count_histogram.scale = histogram_scale; + self.metrics_poll_count_histogram.legacy_mut(|b|b.scale = histogram_scale); + self + } + + /// Configure the histogram for tracking poll times + /// + /// By default, a linear histogram with 10 buckets each 100 microseconds wide will be used. + /// This has an extremely low memory footprint, but may not provide enough granularity. For + /// better granularity with low memory usage, use [`LogHistogram`] instead. + /// + /// # Examples + /// Configure a [`LogHistogram`] with [default configuration]: + /// ``` + /// use tokio::runtime; + /// use tokio::runtime::{HistogramConfiguration, LogHistogram}; + /// + /// let rt = runtime::Builder::new_multi_thread() + /// .enable_metrics_poll_count_histogram() + /// .metrics_poll_count_histogram_configuration( + /// HistogramConfiguration::log(LogHistogram::default()) + /// ) + /// .build() + /// .unwrap(); + /// ``` + /// + /// Configure a linear histogram with 100 buckets, each 10μs wide + /// ``` + /// use tokio::runtime; + /// use std::time::Duration; + /// use tokio::runtime::HistogramConfiguration; + /// + /// let rt = runtime::Builder::new_multi_thread() + /// .enable_metrics_poll_count_histogram() + /// .metrics_poll_count_histogram_configuration( + /// HistogramConfiguration::linear(Duration::from_micros(10), 100) + /// ) + /// .build() + /// .unwrap(); + /// ``` + /// + /// Configure a [`LogHistogram`] with the following settings: + /// - Measure times from 100ns to 120s + /// - Max error of 0.1 + /// - No more than 1024 buckets + /// ``` + /// use std::time::Duration; + /// use tokio::runtime; + /// use tokio::runtime::{HistogramConfiguration, LogHistogram}; + /// + /// let rt = runtime::Builder::new_multi_thread() + /// .enable_metrics_poll_count_histogram() + /// .metrics_poll_count_histogram_configuration( + /// HistogramConfiguration::log(LogHistogram::builder() + /// .max_value(Duration::from_secs(120)) + /// .min_value(Duration::from_nanos(100)) + /// .max_error(0.1) + /// .max_buckets(1024) + /// .expect("configuration uses 488 buckets") + /// ) + /// ) + /// .build() + /// .unwrap(); + /// ``` + /// + /// [`LogHistogram`]: crate::runtime::LogHistogram + /// [default configuration]: crate::runtime::LogHistogramBuilder + pub fn metrics_poll_count_histogram_configuration(&mut self, configuration: HistogramConfiguration) -> &mut Self { + self.metrics_poll_count_histogram.histogram_type = configuration.inner; self } @@ -1173,19 +1249,22 @@ impl Builder { /// use tokio::runtime; /// use std::time::Duration; /// + /// # #[allow(deprecated)] /// let rt = runtime::Builder::new_multi_thread() /// .enable_metrics_poll_count_histogram() /// .metrics_poll_count_histogram_resolution(Duration::from_micros(100)) /// .build() /// .unwrap(); /// ``` + #[deprecated(note = "use metrics_poll_count_histogram_configuration")] pub fn metrics_poll_count_histogram_resolution(&mut self, resolution: Duration) -> &mut Self { assert!(resolution > Duration::from_secs(0)); // Sanity check the argument and also make the cast below safe. assert!(resolution <= Duration::from_secs(1)); let resolution = resolution.as_nanos() as u64; - self.metrics_poll_count_histogram.resolution = resolution; + + self.metrics_poll_count_histogram.legacy_mut(|b|b.resolution = resolution); self } @@ -1204,14 +1283,16 @@ impl Builder { /// ``` /// use tokio::runtime; /// + /// # #[allow(deprecated)] /// let rt = runtime::Builder::new_multi_thread() /// .enable_metrics_poll_count_histogram() /// .metrics_poll_count_histogram_buckets(15) /// .build() /// .unwrap(); /// ``` + #[deprecated(note = "use `metrics_poll_count_histogram_configuration")] pub fn metrics_poll_count_histogram_buckets(&mut self, buckets: usize) -> &mut Self { - self.metrics_poll_count_histogram.num_buckets = buckets; + self.metrics_poll_count_histogram.legacy_mut(|b|b.num_buckets = buckets); self } } diff --git a/tokio/src/runtime/metrics/batch.rs b/tokio/src/runtime/metrics/batch.rs index 6118bcd04ca..aa6b78db779 100644 --- a/tokio/src/runtime/metrics/batch.rs +++ b/tokio/src/runtime/metrics/batch.rs @@ -171,6 +171,6 @@ cfg_rt_multi_thread! { } } -fn duration_as_u64(dur: Duration) -> u64 { +pub(crate) fn duration_as_u64(dur: Duration) -> u64 { u64::try_from(dur.as_nanos()).unwrap_or(u64::MAX) } diff --git a/tokio/src/runtime/metrics/histogram.rs b/tokio/src/runtime/metrics/histogram.rs index 4cfd769a94e..5e612492418 100644 --- a/tokio/src/runtime/metrics/histogram.rs +++ b/tokio/src/runtime/metrics/histogram.rs @@ -1,38 +1,53 @@ +mod h2_histogram; + +pub use h2_histogram::{InvalidHistogramConfiguration, LogHistogram, LogHistogramBuilder}; + use crate::util::metric_atomics::MetricAtomicU64; use std::sync::atomic::Ordering::Relaxed; +use crate::runtime::metrics::batch::duration_as_u64; use std::cmp; use std::ops::Range; +use std::time::Duration; #[derive(Debug)] pub(crate) struct Histogram { /// The histogram buckets buckets: Box<[MetricAtomicU64]>, - /// Bucket scale, linear or log - scale: HistogramScale, - - /// Minimum resolution - resolution: u64, + /// The type of the histogram + /// + /// This handles `fn(bucket) -> Range` and `fn(value) -> bucket` + histogram_type: HistogramType, } #[derive(Debug, Clone)] pub(crate) struct HistogramBuilder { - /// Histogram scale - pub(crate) scale: HistogramScale, + pub(crate) histogram_type: HistogramType, + pub(crate) legacy: Option, +} - /// Must be a power of 2 +#[derive(Debug, Clone)] +pub(crate) struct LegacyBuilder { pub(crate) resolution: u64, - - /// Number of buckets + pub(crate) scale: HistogramScale, pub(crate) num_buckets: usize, } +impl Default for LegacyBuilder { + fn default() -> Self { + Self { + resolution: 100_000, + num_buckets: 10, + scale: HistogramScale::Linear, + } + } +} + #[derive(Debug)] pub(crate) struct HistogramBatch { buckets: Box<[u64]>, - scale: HistogramScale, - resolution: u64, + configuration: HistogramType, } cfg_unstable! { @@ -47,53 +62,154 @@ cfg_unstable! { /// Logarithmic bucket scale Log, } -} -impl Histogram { - pub(crate) fn num_buckets(&self) -> usize { - self.buckets.len() + /// Configuration for the poll count histogram + #[derive(Debug, Clone)] + pub struct HistogramConfiguration { + pub(crate) inner: HistogramType } - cfg_64bit_metrics! { - pub(crate) fn get(&self, bucket: usize) -> u64 { - self.buckets[bucket].load(Relaxed) + impl HistogramConfiguration { + /// Create a linear bucketed histogram + /// + /// # Arguments + /// + /// * `bucket_width`: The width of each bucket + /// * `num_buckets`: The number of buckets + pub fn linear(bucket_width: Duration, num_buckets: usize) -> Self { + Self { + inner: HistogramType::Linear(LinearHistogram { + num_buckets, + bucket_width: duration_as_u64(bucket_width), + }), + } + } + + /// Creates a log-scaled bucketed histogram + /// + /// See [`LogHistogramBuilder`] for information about configuration & defaults + pub fn log(configuration: impl Into) -> Self { + Self { + inner: HistogramType::H2(configuration.into()), + } } } +} - pub(crate) fn bucket_range(&self, bucket: usize) -> Range { - match self.scale { - HistogramScale::Log => Range { - start: if bucket == 0 { +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub(crate) enum HistogramType { + /// Linear histogram with fixed width buckets + Linear(LinearHistogram), + + /// Old log histogram where each bucket doubles in size + LogLegacy(LegacyLogHistogram), + + /// Log histogram implementation based on H2 Histograms + H2(LogHistogram), +} + +impl HistogramType { + pub(crate) fn num_buckets(&self) -> usize { + match self { + HistogramType::Linear(linear) => linear.num_buckets, + HistogramType::LogLegacy(log) => log.num_buckets, + HistogramType::H2(h2) => h2.num_buckets, + } + } + fn value_to_bucket(&self, value: u64) -> usize { + match self { + HistogramType::Linear(LinearHistogram { + num_buckets, + bucket_width, + }) => { + let max = num_buckets - 1; + cmp::min(value / *bucket_width, max as u64) as usize + } + HistogramType::LogLegacy(LegacyLogHistogram { + num_buckets, + first_bucket_width, + }) => { + let max = num_buckets - 1; + if value < *first_bucket_width { 0 } else { - self.resolution << (bucket - 1) - }, - end: if bucket == self.buckets.len() - 1 { + let significant_digits = 64 - value.leading_zeros(); + let bucket_digits = 64 - (first_bucket_width - 1).leading_zeros(); + cmp::min(significant_digits as usize - bucket_digits as usize, max) + } + } + HistogramType::H2(log_histogram) => log_histogram.value_to_bucket(value), + } + } + + fn bucket_range(&self, bucket: usize) -> Range { + match self { + HistogramType::Linear(LinearHistogram { + num_buckets, + bucket_width, + }) => Range { + start: bucket_width * bucket as u64, + end: if bucket == num_buckets - 1 { u64::MAX } else { - self.resolution << bucket + bucket_width * (bucket as u64 + 1) }, }, - HistogramScale::Linear => Range { - start: self.resolution * bucket as u64, - end: if bucket == self.buckets.len() - 1 { + HistogramType::LogLegacy(LegacyLogHistogram { + num_buckets, + first_bucket_width, + }) => Range { + start: if bucket == 0 { + 0 + } else { + first_bucket_width << (bucket - 1) + }, + end: if bucket == num_buckets - 1 { u64::MAX } else { - self.resolution * (bucket as u64 + 1) + first_bucket_width << bucket }, }, + HistogramType::H2(log) => log.bucket_range(bucket), } } } +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) struct LinearHistogram { + num_buckets: usize, + bucket_width: u64, +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) struct LegacyLogHistogram { + num_buckets: usize, + first_bucket_width: u64, +} + +impl Histogram { + pub(crate) fn num_buckets(&self) -> usize { + self.buckets.len() + } + + cfg_64bit_metrics! { + pub(crate) fn get(&self, bucket: usize) -> u64 { + self.buckets[bucket].load(Relaxed) + } + } + + pub(crate) fn bucket_range(&self, bucket: usize) -> Range { + self.histogram_type.bucket_range(bucket) + } +} + impl HistogramBatch { pub(crate) fn from_histogram(histogram: &Histogram) -> HistogramBatch { let buckets = vec![0; histogram.buckets.len()].into_boxed_slice(); HistogramBatch { buckets, - scale: histogram.scale, - resolution: histogram.resolution, + configuration: histogram.histogram_type, } } @@ -102,8 +218,7 @@ impl HistogramBatch { } pub(crate) fn submit(&self, histogram: &Histogram) { - debug_assert_eq!(self.scale, histogram.scale); - debug_assert_eq!(self.resolution, histogram.resolution); + debug_assert_eq!(self.configuration, histogram.histogram_type); debug_assert_eq!(self.buckets.len(), histogram.buckets.len()); for i in 0..self.buckets.len() { @@ -112,52 +227,51 @@ impl HistogramBatch { } fn value_to_bucket(&self, value: u64) -> usize { - match self.scale { - HistogramScale::Linear => { - let max = self.buckets.len() - 1; - cmp::min(value / self.resolution, max as u64) as usize - } - HistogramScale::Log => { - let max = self.buckets.len() - 1; - - if value < self.resolution { - 0 - } else { - let significant_digits = 64 - value.leading_zeros(); - let bucket_digits = 64 - (self.resolution - 1).leading_zeros(); - cmp::min(significant_digits as usize - bucket_digits as usize, max) - } - } - } + self.configuration.value_to_bucket(value) } } impl HistogramBuilder { pub(crate) fn new() -> HistogramBuilder { HistogramBuilder { - scale: HistogramScale::Linear, - // Resolution is in nanoseconds. - resolution: 100_000, - num_buckets: 10, + histogram_type: HistogramType::Linear(LinearHistogram { + num_buckets: 10, + bucket_width: 100_000, + }), + legacy: None, } } - pub(crate) fn build(&self) -> Histogram { - let mut resolution = self.resolution; - - assert!(resolution > 0); + pub(crate) fn legacy_mut(&mut self, f: impl Fn(&mut LegacyBuilder)) { + let legacy = self.legacy.get_or_insert_with(|| LegacyBuilder::default()); + f(legacy); + } - if matches!(self.scale, HistogramScale::Log) { - resolution = resolution.next_power_of_two(); - } + pub(crate) fn build(&self) -> Histogram { + let histogram_type = match &self.legacy { + Some(legacy) => { + assert!(legacy.resolution > 0); + match legacy.scale { + HistogramScale::Linear => HistogramType::Linear(LinearHistogram { + num_buckets: legacy.num_buckets, + bucket_width: legacy.resolution, + }), + HistogramScale::Log => HistogramType::LogLegacy(LegacyLogHistogram { + num_buckets: legacy.num_buckets, + first_bucket_width: legacy.resolution.next_power_of_two(), + }), + } + } + None => self.histogram_type, + }; + let num_buckets = self.histogram_type.num_buckets(); Histogram { - buckets: (0..self.num_buckets) + buckets: (0..num_buckets) .map(|_| MetricAtomicU64::new(0)) .collect::>() .into_boxed_slice(), - resolution, - scale: self.scale, + histogram_type: histogram_type, } } } @@ -178,12 +292,25 @@ mod test { }}; } + fn linear(resolution: u64, num_buckets: usize) -> Histogram { + HistogramBuilder { + histogram_type: HistogramType::Linear(LinearHistogram { + bucket_width: resolution, + num_buckets, + }), + legacy: None, + } + .build() + } + #[test] fn log_scale_resolution_1() { let h = HistogramBuilder { - scale: HistogramScale::Log, - resolution: 1, - num_buckets: 10, + histogram_type: HistogramType::LogLegacy(LegacyLogHistogram { + first_bucket_width: 1, + num_buckets: 10, + }), + legacy: None, } .build(); @@ -233,9 +360,11 @@ mod test { #[test] fn log_scale_resolution_2() { let h = HistogramBuilder { - scale: HistogramScale::Log, - resolution: 2, - num_buckets: 10, + histogram_type: HistogramType::LogLegacy(LegacyLogHistogram { + num_buckets: 10, + first_bucket_width: 2, + }), + legacy: None, } .build(); @@ -319,12 +448,7 @@ mod test { #[test] fn linear_scale_resolution_1() { - let h = HistogramBuilder { - scale: HistogramScale::Linear, - resolution: 1, - num_buckets: 10, - } - .build(); + let h = linear(1, 10); assert_eq!(h.bucket_range(0), 0..1); assert_eq!(h.bucket_range(1), 1..2); @@ -380,12 +504,7 @@ mod test { #[test] fn linear_scale_resolution_100() { - let h = HistogramBuilder { - scale: HistogramScale::Linear, - resolution: 100, - num_buckets: 10, - } - .build(); + let h = linear(100, 10); assert_eq!(h.bucket_range(0), 0..100); assert_eq!(h.bucket_range(1), 100..200); @@ -459,12 +578,7 @@ mod test { #[test] fn inc_by_more_than_one() { - let h = HistogramBuilder { - scale: HistogramScale::Linear, - resolution: 100, - num_buckets: 10, - } - .build(); + let h = linear(100, 10); let mut b = HistogramBatch::from_histogram(&h); diff --git a/tokio/src/runtime/metrics/histogram/h2_histogram.rs b/tokio/src/runtime/metrics/histogram/h2_histogram.rs new file mode 100644 index 00000000000..09e554440f4 --- /dev/null +++ b/tokio/src/runtime/metrics/histogram/h2_histogram.rs @@ -0,0 +1,480 @@ +use crate::runtime::metrics::batch::duration_as_u64; +use std::cmp; +use std::error::Error; +use std::fmt::{Display, Formatter}; +use std::time::Duration; + +const DEFAULT_MIN_VALUE: Duration = Duration::from_nanos(100); +const DEFAULT_MAX_VALUE: Duration = Duration::from_secs(60); + +/// Default precision is 2^-2 = 25% max error +const DEFAULT_PRECISION: u32 = 2; + +/// Log Histogram +/// +/// This implements an [H2 Histogram](https://iop.systems/blog/h2-histogram/), a histogram similar +/// to HdrHistogram, but with better performance. It guarantees an error bound of `2^-p`. +/// +/// Unlike a traditional H2 histogram this has two small changes: +/// 1. The 0th bucket runs for `0..min_value`. This allows truncating a large number of buckets that +/// would cover extremely short timescales that customers usually don't care about. +/// 2. The final bucket runs all the way to `u64::MAX` — traditional H2 histograms would truncate +/// or reject these values. +/// +/// For information about the default configuration, see [`LogHistogramBuilder`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct LogHistogram { + /// Number of buckets in the histogram + pub(crate) num_buckets: usize, + + /// Precision of histogram. Error is bounded to 2^-p. + pub(crate) p: u32, + + /// All buckets `idx < bucket_offset` are grouped into bucket 0. + /// + /// This increases the smallest measurable value of the histogram. + pub(crate) bucket_offset: usize, +} + +impl Default for LogHistogram { + fn default() -> Self { + LogHistogramBuilder::default().build() + } +} + +impl LogHistogram { + /// Create a Histogram configuration directly from values for `n` and `p`. + /// + /// # Panics + /// - If `bucket_offset` is greater than the specified number of buckets, `(n - p + 1) * 2^p` + fn from_n_p(n: u32, p: u32, bucket_offset: usize) -> Self { + assert!(n >= p, "{n} (n) must be at least as large as {p} (p)"); + let num_buckets = ((n - p + 1) * 1 << p) as usize - bucket_offset; + Self { + num_buckets, + p, + bucket_offset, + } + } + + /// Creates a builder for [`LogHistogram`] + pub fn builder() -> LogHistogramBuilder { + LogHistogramBuilder::default() + } + + /// The maximum value that can be stored before truncation in this histogram + pub fn max_value(&self) -> u64 { + let n = (self.num_buckets / (1 << self.p)) - 1 + self.p as usize; + (1_u64 << n) - 1 + } + + pub(crate) fn value_to_bucket(&self, value: u64) -> usize { + let index = bucket_index(value, self.p); + let offset_bucket = if index < self.bucket_offset as u64 { + 0 + } else { + index - self.bucket_offset as u64 + }; + let max = self.num_buckets - 1; + offset_bucket.min(max as u64) as usize + } + + pub(crate) fn bucket_range(&self, bucket: usize) -> std::ops::Range { + let LogHistogram { + p, + bucket_offset, + num_buckets, + } = self; + let input_bucket = bucket; + let bucket = bucket + bucket_offset; + let range_start_0th_bucket = match input_bucket { + 0 => Some(0_u64), + _ => None, + }; + let range_end_last_bucket = match input_bucket { + n if n == num_buckets - 1 => Some(u64::MAX), + _ => None, + }; + if bucket < 1 << p { + // The first set of buckets are all size 1 + let bucket = bucket as u64; + range_start_0th_bucket.unwrap_or(bucket)..range_end_last_bucket.unwrap_or(bucket + 1) + } else { + // Determine which range of buckets we're in, then determine which bucket in the range it is + let bucket = bucket as u64; + let p = *p as u64; + let w = (bucket >> p) - 1; + let base_bucket = (w + 1) * (1_u64 << p); + let offset = bucket - base_bucket; + let s = 1_u64 << (w + p); + let start = s + (offset << w); + let end = s + ((offset + 1) << w); + + range_start_0th_bucket.unwrap_or(start)..range_end_last_bucket.unwrap_or(end) + } + } +} + +/// Configuration for a [`LogHistogram`] +/// +/// The log-scaled histogram implements an H2 histogram where the first bucket covers +/// the range from 0 to [`LogHistogramBuilder::min_value`] and the final bucket covers +/// [`LogHistogramBuilder::max_value`] to infinity. The precision is bounded to the specified +/// [`LogHistogramBuilder::max_error`]. Specifically, the precision is the next smallest value +/// of `2^-p` such that it is smaller than the requested max error. You can also select `p` directly +/// with [`LogHistogramBuilder::precision_exact`]. +/// +/// Depending on the selected parameters, the number of buckets required is variable. To ensure +/// that the histogram size is acceptable, callers may call [`LogHistogramBuilder::max_buckets`]. +/// If the resulting histogram would require more buckets, then the method will return an error. +/// +/// ## Default values +/// The default configuration provides the following settings: +/// 1. `min_value`: 100ns +/// 2. `max_value`: 68 seconds. The final bucket covers all values >68 seconds +/// 3. `precision`: max error of 25% +/// +/// This uses 237 64-bit buckets. +#[derive(Default, Debug, Copy, Clone)] +pub struct LogHistogramBuilder { + max_value: Option, + min_value: Option, + precision: Option, +} + +impl From for LogHistogram { + fn from(value: LogHistogramBuilder) -> Self { + value.build() + } +} + +impl LogHistogramBuilder { + /// Set the precision for this histogram + /// + /// This function determines the smallest value of `p` that would satisfy the requested precision + /// such that `2^-p` is less than `precision`. To set `p` directly, use + /// [`LogHistogramBuilder::precision_exact`]. + /// + /// The default value is 25% (2^-2) + /// + /// The highest supported precision is `0.0977%` `(2^-10)`. Provided values + /// less than this will be truncated. + /// + /// # Panics + /// - `precision` < 0 + /// - `precision` > 1 + pub fn max_error(mut self, max_error: f64) -> Self { + if max_error < 0.0 { + panic!("precision must be >= 0"); + }; + if max_error > 1.0 { + panic!("precision must be > 1"); + }; + let mut p = 2; + while 2_f64.powf(-1.0 * p as f64) > max_error && p <= 10 { + p += 1; + } + self.precision = Some(p); + self + } + + /// Sets the precision of this histogram directly. + /// + /// # Panics + /// - `p` < 2 + /// - `p` > 10 + pub fn precision_exact(mut self, p: u32) -> Self { + if p < 2 { + panic!("precision must be >= 2"); + }; + if p > 10 { + panic!("precision must be <= 10"); + }; + self.precision = Some(p); + self + } + + /// Sets the minimum duration that can be accurately stored by this histogram. + /// + /// This sets the resolution. The first bucket will be no larger than + /// the provided duration. Setting this value will reduce the number of required buckets, + /// sometimes quite significantly. + pub fn min_value(mut self, duration: Duration) -> Self { + self.min_value = Some(duration); + self + } + + /// Sets the maximum value that can by this histogram without truncation + /// + /// Values greater than this fall in the final bucket that stretches to `u64::MAX`. + /// + /// # Panics + /// The provided value is 0 + pub fn max_value(mut self, duration: Duration) -> Self { + if duration.is_zero() { + panic!("max value must be greater than 0"); + } + self.max_value = Some(duration); + self + } + + /// Builds the log histogram, enforcing the max buckets requirement + pub fn max_buckets( + &mut self, + max_buckets: usize, + ) -> Result { + let histogram = self.build(); + if histogram.num_buckets > max_buckets { + return Err(InvalidHistogramConfiguration::TooManyBuckets { + required_bucket_count: histogram.num_buckets, + }); + } + Ok(histogram) + } + + /// Builds the log histogram + pub fn build(&self) -> LogHistogram { + let max_value = duration_as_u64(self.max_value.unwrap_or(DEFAULT_MAX_VALUE)); + let max_value = max_value.next_power_of_two(); + let min_value = duration_as_u64(self.min_value.unwrap_or(DEFAULT_MIN_VALUE)); + let p = self.precision.unwrap_or(DEFAULT_PRECISION); + // determine the bucket offset by finding the bucket for the minimum value. We need to lower + // this by one to ensure we are at least as granular as requested. + let bucket_offset = cmp::max(bucket_index(min_value, p), 1) - 1; + // n must be at least as large as p + let n = max_value.ilog2().max(p); + LogHistogram::from_n_p(n, p, bucket_offset as usize) + } +} + +/// Error constructing a histogram +#[derive(Debug)] +pub enum InvalidHistogramConfiguration { + /// This histogram required more than the specified number of buckets + TooManyBuckets { + /// The number of buckets that would have been required + required_bucket_count: usize, + }, +} + +impl Display for InvalidHistogramConfiguration { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + InvalidHistogramConfiguration::TooManyBuckets { required_bucket_count } => + write!(f, "The configuration for this histogram would have required {required_bucket_count} buckets") + } + } +} + +impl Error for InvalidHistogramConfiguration {} + +/// Compute the index for a given value + p combination +/// +/// This function does NOT enforce that the value is within the number of expected buckets. +fn bucket_index(value: u64, p: u32) -> u64 { + // Algorithm described here: https://iop.systems/blog/h2-histogram/ + // find the highest non-zero digit + if value == 0 { + return 0; + } + let h = 63 - value.leading_zeros(); + if h <= p { + value + } else { + let w = h - p; + ((w + 1) * (1_u32 << p)) as u64 + ((value - (1_u64 << h)) >> w) + } +} + +#[cfg(test)] +mod test { + use super::InvalidHistogramConfiguration; + use crate::runtime::metrics::histogram::h2_histogram::LogHistogram; + use crate::runtime::metrics::histogram::HistogramType; + + #[cfg(not(target_family = "wasm"))] + mod proptests { + use super::*; + use proptest::prelude::*; + fn valid_log_histogram_strategy() -> impl Strategy { + (2..=50u32, 2..=16u32, 0..100usize).prop_map(|(n, p, bucket_offset)| { + let p = p.min(n); + let base = LogHistogram::from_n_p(n, p, 0); + LogHistogram::from_n_p(n, p, bucket_offset.min(base.num_buckets - 1)) + }) + } + + // test against a wide assortment of different histogram configurations to ensure invariants hold + proptest! { + #[test] + fn proptest_log_histogram_invariants(histogram in valid_log_histogram_strategy()) { + // 1. Assert that the first bucket always starts at 0 + let first_range = histogram.bucket_range(0); + prop_assert_eq!(first_range.start, 0, "First bucket doesn't start at 0"); + + // Check that bucket ranges are disjoint and contiguous + let mut prev_end = 0; + let mut prev_size = 0; + for bucket in 0..histogram.num_buckets { + let range = histogram.bucket_range(bucket); + prop_assert_eq!(range.start, prev_end, "Bucket ranges are not contiguous"); + prop_assert!(range.start < range.end, "Bucket range is empty or reversed"); + + let size = range.end - range.start; + + // 2. Assert that the sizes of the buckets are always powers of 2 + if bucket > 0 && bucket < histogram.num_buckets - 1 { + prop_assert!(size.is_power_of_two(), "Bucket size is not a power of 2"); + } + + if bucket > 1 { + // Assert that the sizes of the buckets are monotonically increasing + // (after the first bucket, which may be smaller than the 0 bucket) + prop_assert!(size >= prev_size, "Bucket sizes are not monotonically increasing: This size {size} (previous: {prev_size}). Bucket: {bucket}"); + } + + + // 4. Assert that the size of the buckets is always within the error bound of 2^-p + if bucket > 0 && bucket < histogram.num_buckets - 1 { + let p = histogram.p as f64; + let error_bound = 2.0_f64.powf(-p); + // the most it could be wrong is by the length of the range / 2 + let relative_error = ((size as f64 - 1.0) / 2.0) / range.start as f64; + prop_assert!( + relative_error <= error_bound, + "Bucket size error exceeds bound: {:?} > {:?} ({range:?})", + relative_error, + error_bound + ); + } + + prev_end = range.end; + prev_size = size; + } + prop_assert_eq!(prev_end, u64::MAX, "Last bucket should end at u64::MAX"); + + // Check bijection between value_to_bucket and bucket_range + for bucket in 0..histogram.num_buckets { + let range = histogram.bucket_range(bucket); + for value in [range.start, range.end - 1] { + prop_assert_eq!( + histogram.value_to_bucket(value), + bucket, + "value_to_bucket is not consistent with bucket_range" + ); + } + } + } + } + } + + #[test] + fn bucket_ranges_are_correct() { + let p = 2; + let config = HistogramType::H2(LogHistogram { + num_buckets: 1024, + p, + bucket_offset: 0, + }); + + // check precise buckets. There are 2^(p+1) precise buckets + for i in 0..2_usize.pow(p + 1) { + assert_eq!( + config.value_to_bucket(i as u64), + i, + "{} should be in bucket {}", + i, + i + ); + } + + let mut value = 2_usize.pow(p + 1); + let current_bucket = value; + while value < current_bucket * 2 { + assert_eq!( + config.value_to_bucket(value as u64), + current_bucket + ((value - current_bucket) / 2), + "bucket for {value}" + ); + value += 1; + } + } + + // test buckets against known values + #[test] + fn bucket_computation_spot_check() { + let p = 9; + let config = HistogramType::H2(LogHistogram { + num_buckets: 4096, + p, + bucket_offset: 0, + }); + struct T { + v: u64, + bucket: usize, + } + let tests = [ + T { v: 1, bucket: 1 }, + T { + v: 1023, + bucket: 1023, + }, + T { + v: 1024, + bucket: 1024, + }, + T { + v: 2048, + bucket: 1536, + }, + T { + v: 2052, + bucket: 1537, + }, + ]; + for test in tests { + assert_eq!(config.value_to_bucket(test.v), test.bucket); + } + } + + #[test] + fn last_bucket_goes_to_infinity() { + let conf = HistogramType::H2(LogHistogram::from_n_p(16, 3, 10)); + assert_eq!(conf.bucket_range(conf.num_buckets() - 1).end, u64::MAX); + } + + #[test] + fn bucket_offset() { + // skip the first 10 buckets + let conf = HistogramType::H2(LogHistogram::from_n_p(16, 3, 10)); + for i in 0..10 { + assert_eq!(conf.value_to_bucket(i), 0); + } + // There are 16 1-element buckets. We skipped 10 of them. The first 2 element bucket starts + // at 16 + assert_eq!(conf.value_to_bucket(10), 0); + assert_eq!(conf.value_to_bucket(16), 6); + assert_eq!(conf.value_to_bucket(17), 6); + assert_eq!(conf.bucket_range(6), 16..18); + } + + #[test] + fn max_buckets_enforcement() { + let error = LogHistogram::builder() + .max_error(0.001) + .max_buckets(5) + .expect_err("this produces way more than 5 buckets"); + let num_buckets = match error { + InvalidHistogramConfiguration::TooManyBuckets { + required_bucket_count, + } => required_bucket_count, + }; + assert_eq!(num_buckets, 27549); + } + + #[test] + fn default_configuration_size() { + let conf = LogHistogram::builder().build(); + assert_eq!(conf.num_buckets, 119); + } +} diff --git a/tokio/src/runtime/metrics/mod.rs b/tokio/src/runtime/metrics/mod.rs index 295c97cce88..c4634a4b1ea 100644 --- a/tokio/src/runtime/metrics/mod.rs +++ b/tokio/src/runtime/metrics/mod.rs @@ -18,7 +18,7 @@ cfg_unstable_metrics! { mod histogram; pub(crate) use histogram::{Histogram, HistogramBatch, HistogramBuilder}; #[allow(unreachable_pub)] // rust-lang/rust#57411 - pub use histogram::HistogramScale; + pub use histogram::{HistogramScale, HistogramConfiguration, LogHistogram, LogHistogramBuilder, InvalidHistogramConfiguration}; mod scheduler; diff --git a/tokio/src/runtime/metrics/runtime.rs b/tokio/src/runtime/metrics/runtime.rs index c5614bcbef3..53f82648481 100644 --- a/tokio/src/runtime/metrics/runtime.rs +++ b/tokio/src/runtime/metrics/runtime.rs @@ -767,7 +767,7 @@ impl RuntimeMetrics { /// task poll times. /// /// This value is configured by calling - /// [`metrics_poll_count_histogram_buckets()`] when building the runtime. + /// [`metrics_poll_count_histogram_configuration()`] when building the runtime. /// /// # Examples /// @@ -788,8 +788,8 @@ impl RuntimeMetrics { /// } /// ``` /// - /// [`metrics_poll_count_histogram_buckets()`]: - /// crate::runtime::Builder::metrics_poll_count_histogram_buckets + /// [`metrics_poll_count_histogram_configuration()`]: + /// crate::runtime::Builder::metrics_poll_count_histogram_configuration pub fn poll_count_histogram_num_buckets(&self) -> usize { self.handle .inner @@ -803,7 +803,7 @@ impl RuntimeMetrics { /// Returns the range of task poll times tracked by the given bucket. /// /// This value is configured by calling - /// [`metrics_poll_count_histogram_resolution()`] when building the runtime. + /// [`metrics_poll_count_histogram_configuration()`] when building the runtime. /// /// # Panics /// @@ -832,8 +832,8 @@ impl RuntimeMetrics { /// } /// ``` /// - /// [`metrics_poll_count_histogram_resolution()`]: - /// crate::runtime::Builder::metrics_poll_count_histogram_resolution + /// [`metrics_poll_count_histogram_configuration()`]: + /// crate::runtime::Builder::metrics_poll_count_histogram_configuration #[track_caller] pub fn poll_count_histogram_bucket_range(&self, bucket: usize) -> Range { self.handle diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index c8efbe2f1cd..08117973afa 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -410,7 +410,7 @@ cfg_rt! { pub use metrics::RuntimeMetrics; cfg_unstable_metrics! { - pub use metrics::HistogramScale; + pub use metrics::{HistogramScale, HistogramConfiguration, LogHistogram, LogHistogramBuilder, InvalidHistogramConfiguration} ; cfg_net! { pub(crate) use metrics::IoDriverMetrics; diff --git a/tokio/tests/rt_unstable_metrics.rs b/tokio/tests/rt_unstable_metrics.rs index 5503a9a5b03..b7dcceef85c 100644 --- a/tokio/tests/rt_unstable_metrics.rs +++ b/tokio/tests/rt_unstable_metrics.rs @@ -13,7 +13,7 @@ use std::task::Poll; use std::thread; use tokio::macros::support::poll_fn; -use tokio::runtime::Runtime; +use tokio::runtime::{HistogramConfiguration, LogHistogram, Runtime}; use tokio::task::consume_budget; use tokio::time::{self, Duration}; @@ -390,6 +390,62 @@ fn worker_poll_count_and_time() { } } +#[test] +fn log_histogram() { + const N: u64 = 50; + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .enable_metrics_poll_count_histogram() + .metrics_poll_count_histogram_configuration(HistogramConfiguration::log( + LogHistogram::builder() + .max_value(Duration::from_secs(60)) + .min_value(Duration::from_nanos(100)) + .max_error(0.25), + )) + .build() + .unwrap(); + let metrics = rt.metrics(); + let num_buckets = rt.metrics().poll_count_histogram_num_buckets(); + assert_eq!(num_buckets, 119); + rt.block_on(async { + for _ in 0..N { + tokio::spawn(async {}).await.unwrap(); + } + }); + drop(rt); + assert_eq!( + metrics.poll_count_histogram_bucket_range(0), + Duration::from_nanos(0)..Duration::from_nanos(96) + ); + assert_eq!( + metrics.poll_count_histogram_bucket_range(1), + Duration::from_nanos(96)..Duration::from_nanos(96 + 2_u64.pow(4)) + ); + assert_eq!( + metrics.poll_count_histogram_bucket_range(118).end, + Duration::from_nanos(u64::MAX) + ); + let n = (0..metrics.num_workers()) + .flat_map(|i| (0..num_buckets).map(move |j| (i, j))) + .map(|(worker, bucket)| metrics.poll_count_histogram_bucket_count(worker, bucket)) + .sum(); + assert_eq!(N, n); +} + +#[test] +fn log_histogram_default_configuration() { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .enable_metrics_poll_count_histogram() + .metrics_poll_count_histogram_configuration(HistogramConfiguration::log( + LogHistogram::default(), + )) + .build() + .unwrap(); + let num_buckets = rt.metrics().poll_count_histogram_num_buckets(); + assert_eq!(num_buckets, 119); +} + #[test] fn worker_poll_count_histogram() { const N: u64 = 5; @@ -398,18 +454,20 @@ fn worker_poll_count_histogram() { tokio::runtime::Builder::new_current_thread() .enable_all() .enable_metrics_poll_count_histogram() - .metrics_poll_count_histogram_scale(tokio::runtime::HistogramScale::Linear) - .metrics_poll_count_histogram_buckets(3) - .metrics_poll_count_histogram_resolution(Duration::from_millis(50)) + .metrics_poll_count_histogram_configuration(HistogramConfiguration::linear( + Duration::from_millis(50), + 3, + )) .build() .unwrap(), tokio::runtime::Builder::new_multi_thread() .worker_threads(2) .enable_all() .enable_metrics_poll_count_histogram() - .metrics_poll_count_histogram_scale(tokio::runtime::HistogramScale::Linear) - .metrics_poll_count_histogram_buckets(3) - .metrics_poll_count_histogram_resolution(Duration::from_millis(50)) + .metrics_poll_count_histogram_configuration(HistogramConfiguration::linear( + Duration::from_millis(50), + 3, + )) .build() .unwrap(), ]; @@ -444,9 +502,7 @@ fn worker_poll_count_histogram_range() { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .enable_metrics_poll_count_histogram() - .metrics_poll_count_histogram_scale(tokio::runtime::HistogramScale::Linear) - .metrics_poll_count_histogram_buckets(3) - .metrics_poll_count_histogram_resolution(us(50)) + .metrics_poll_count_histogram_configuration(HistogramConfiguration::linear(us(50), 3)) .build() .unwrap(); let metrics = rt.metrics(); @@ -458,6 +514,8 @@ fn worker_poll_count_histogram_range() { ); assert_eq!(metrics.poll_count_histogram_bucket_range(2), us(100)..max); + // ensure the old methods work too + #[allow(deprecated)] let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .enable_metrics_poll_count_histogram() @@ -481,17 +539,19 @@ fn worker_poll_count_histogram_disabled_without_explicit_enable() { let rts = [ tokio::runtime::Builder::new_current_thread() .enable_all() - .metrics_poll_count_histogram_scale(tokio::runtime::HistogramScale::Linear) - .metrics_poll_count_histogram_buckets(3) - .metrics_poll_count_histogram_resolution(Duration::from_millis(50)) + .metrics_poll_count_histogram_configuration(HistogramConfiguration::linear( + Duration::from_millis(50), + 3, + )) .build() .unwrap(), tokio::runtime::Builder::new_multi_thread() .worker_threads(2) .enable_all() - .metrics_poll_count_histogram_scale(tokio::runtime::HistogramScale::Linear) - .metrics_poll_count_histogram_buckets(3) - .metrics_poll_count_histogram_resolution(Duration::from_millis(50)) + .metrics_poll_count_histogram_configuration(HistogramConfiguration::linear( + Duration::from_millis(50), + 3, + )) .build() .unwrap(), ];