diff --git a/.cargo/audit.toml b/.cargo/audit.toml index 4fd083d9481..25e764be2b1 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -2,6 +2,7 @@ [advisories] ignore = [ - # https://github.com/tokio-rs/tokio/issues/4177 - "RUSTSEC-2020-0159", + # We depend on nix 0.22 only via mio-aio, a dev-dependency. + # https://github.com/tokio-rs/tokio/pull/4255#issuecomment-974786349 + "RUSTSEC-2021-0119", ] diff --git a/.cirrus.yml b/.cirrus.yml index 4bef869c24f..1f431b2d201 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -29,7 +29,7 @@ task: setup_script: - pkg install -y bash curl - curl https://sh.rustup.rs -sSf --output rustup.sh - - sh rustup.sh -y --profile minimal --default-toolchain nightly-2021-10-25 + - sh rustup.sh -y --profile minimal --default-toolchain nightly-2022-01-12 - . $HOME/.cargo/env - | echo "~~~~ rustc --version ~~~~" diff --git a/.clippy.toml b/.clippy.toml index 1cf14c6d01e..eb66960ac8d 100644 --- a/.clippy.toml +++ b/.clippy.toml @@ -1 +1 @@ -msrv = "1.45" +msrv = "1.46" diff --git a/.github/labeler.yml b/.github/labeler.yml index 604d3631a27..6e53c92aaf7 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,9 +1,8 @@ R-loom: -- ./tokio/src/sync/* -- ./tokio/src/sync/**/* -- ./tokio-util/src/sync/* -- ./tokio-util/src/sync/**/* -- ./tokio/src/runtime/* -- ./tokio/src/runtime/**/* - +- tokio/src/sync/* +- tokio/src/sync/**/* +- tokio-util/src/sync/* +- tokio-util/src/sync/**/* +- tokio/src/runtime/* +- tokio/src/runtime/**/* diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9fec5837e3c..f9eded120da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,8 +9,12 @@ name: CI env: RUSTFLAGS: -Dwarnings RUST_BACKTRACE: 1 - nightly: nightly-2021-10-25 - minrust: 1.45.2 + nightly: nightly-2022-01-12 + minrust: 1.46 + +defaults: + run: + shell: bash jobs: # Depends on all action sthat are required for a "successful" CI run. @@ -20,6 +24,7 @@ jobs: needs: - test - test-unstable + - test-parking_lot - miri - cross - features @@ -30,6 +35,7 @@ jobs: - valgrind - loom-compile - check-readme + - test-hyper steps: - run: exit 0 @@ -77,6 +83,27 @@ jobs: # bench.yml workflow runs benchmarks only on linux. if: startsWith(matrix.os, 'ubuntu') + test-parking_lot: + # The parking_lot crate has a feature called send_guard which changes when + # some of its types are Send. Tokio has some measures in place to prevent + # this from affecting when Tokio types are Send, and this test exists to + # ensure that those measures are working. + # + # This relies on the potentially affected Tokio type being listed in + # `tokio/tokio/tests/async_send_sync.rs`. + name: compile tests with parking lot send guards + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install Rust + run: rustup update stable + - uses: Swatinem/rust-cache@v1 + - name: Enable parking_lot send_guard feature + # Inserts the line "plsend = ["parking_lot/send_guard"]" right after [features] + run: sed -i '/\[features\]/a plsend = ["parking_lot/send_guard"]' tokio/Cargo.toml + - name: Compile tests with all features enabled + run: cargo build --workspace --all-features --tests + valgrind: name: valgrind runs-on: ubuntu-latest @@ -267,8 +294,6 @@ jobs: - name: Install Rust run: rustup update stable - uses: Swatinem/rust-cache@v1 - - name: Install rustfmt - run: rustup component add rustfmt # Check fmt - name: "rustfmt --check" @@ -285,7 +310,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Install Rust - run: rustup update 1.52.1 && rustup default 1.52.1 + run: rustup update 1.57 && rustup default 1.57 - uses: Swatinem/rust-cache@v1 - name: Install clippy run: rustup component add clippy @@ -335,3 +360,34 @@ jobs: - name: Verify that Tokio version is up to date in README working-directory: tokio run: grep -q "$(sed '/^version = /!d' Cargo.toml | head -n1)" README.md + + test-hyper: + name: Test hyper + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - windows-latest + - ubuntu-latest + - macos-latest + steps: + - uses: actions/checkout@v2 + - name: Install Rust + run: rustup update stable + - uses: Swatinem/rust-cache@v1 + - name: Test hyper + run: | + set -x + git clone https://github.com/hyperium/hyper.git + cd hyper + # checkout the latest release because HEAD maybe contains breakage. + tag=$(git describe --abbrev=0 --tags) + git checkout "${tag}" + echo '[workspace]' >>Cargo.toml + echo '[patch.crates-io]' >>Cargo.toml + echo 'tokio = { path = "../tokio" }' >>Cargo.toml + echo 'tokio-util = { path = "../tokio-util" }' >>Cargo.toml + echo 'tokio-stream = { path = "../tokio-stream" }' >>Cargo.toml + echo 'tokio-test = { path = "../tokio-test" }' >>Cargo.toml + git diff + cargo test --features full diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 4386c381510..6d5dd6fbe1b 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -11,3 +11,4 @@ jobs: - uses: actions/labeler@v3 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" + sync-labels: true diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66ec614e585..289e069a3cf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -139,6 +139,14 @@ correctly, use this command: RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features ``` +To build documentation including Tokio's unstable features, it is necessary to +pass `--cfg tokio_unstable` to both RustDoc *and* rustc. To build the +documentation for unstable features, use this command: + +``` +RUSTDOCFLAGS="--cfg docsrs --cfg tokio_unstable" RUSTFLAGS="--cfg tokio_unstable" cargo +nightly doc --all-features +``` + There is currently a [bug in cargo] that means documentation cannot be built from the root of the workspace. If you `cd` into the `tokio` subdirectory the command shown above will work. diff --git a/README.md b/README.md index 19f049cba78..ad192fec706 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.14.0", features = ["full"] } +tokio = { version = "1.15.0", features = ["full"] } ``` Then, on your main.rs: @@ -164,7 +164,7 @@ several other libraries, including: ## Supported Rust Versions Tokio is built against the latest stable release. The minimum supported version -is 1.45. The current Tokio version is not guaranteed to build on Rust versions +is 1.46. The current Tokio version is not guaranteed to build on Rust versions earlier than the minimum supported version. ## Release schedule @@ -181,6 +181,7 @@ released as a new patch release for each LTS minor version. Our current LTS releases are: * `1.8.x` - LTS release until February 2022. + * `1.14.x` - LTS release until June 2022. Each LTS release will continue to receive backported fixes for at least half a year. If you wish to use a fixed minor release in your project, we recommend diff --git a/benches/Cargo.toml b/benches/Cargo.toml index eb2784dc50f..2b98cfd3934 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -9,7 +9,7 @@ tokio = { version = "1.5.0", path = "../tokio", features = ["full"] } bencher = "0.1.5" [dev-dependencies] -tokio-util = { version = "0.6.6", path = "../tokio-util", features = ["full"] } +tokio-util = { version = "0.7.0", path = "../tokio-util", features = ["full"] } tokio-stream = { path = "../tokio-stream" } [target.'cfg(unix)'.dependencies] diff --git a/benches/fs.rs b/benches/fs.rs index 305668f9a54..026814ff468 100644 --- a/benches/fs.rs +++ b/benches/fs.rs @@ -21,7 +21,7 @@ fn rt() -> tokio::runtime::Runtime { const BLOCK_COUNT: usize = 1_000; const BUFFER_SIZE: usize = 4096; -const DEV_ZERO: &'static str = "/dev/zero"; +const DEV_ZERO: &str = "/dev/zero"; fn async_read_codec(b: &mut Bencher) { let rt = rt(); diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 1d155a2b17b..d2aca69d84a 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -5,14 +5,14 @@ publish = false edition = "2018" # If you copy one of the examples into a new project, you should be using -# [dependencies] instead. +# [dependencies] instead, and delete the **path**. [dev-dependencies] -tokio = { version = "1.0.0", path = "../tokio",features = ["full", "tracing"] } -tokio-util = { version = "0.6.3", path = "../tokio-util",features = ["full"] } +tokio = { version = "1.0.0", path = "../tokio", features = ["full", "tracing"] } +tokio-util = { version = "0.7.0", path = "../tokio-util", features = ["full"] } tokio-stream = { version = "0.1", path = "../tokio-stream" } tracing = "0.1" -tracing-subscriber = { version = "0.2.7", default-features = false, features = ["fmt", "ansi", "env-filter", "chrono", "tracing-log"] } +tracing-subscriber = { version = "0.3.1", default-features = false, features = ["fmt", "ansi", "env-filter", "tracing-log"] } bytes = "1.0.0" futures = { version = "0.3.0", features = ["thread-pool"]} http = "0.2" diff --git a/examples/tinydb.rs b/examples/tinydb.rs index 9da429ace69..5a1983df6b4 100644 --- a/examples/tinydb.rs +++ b/examples/tinydb.rs @@ -149,7 +149,7 @@ async fn main() -> Result<(), Box> { } fn handle_request(line: &str, db: &Arc) -> Response { - let request = match Request::parse(&line) { + let request = match Request::parse(line) { Ok(req) => req, Err(e) => return Response::Error { msg: e }, }; diff --git a/tests-build/tests/fail/macros_type_mismatch.stderr b/tests-build/tests/fail/macros_type_mismatch.stderr index a8fa99bc63b..f98031514ff 100644 --- a/tests-build/tests/fail/macros_type_mismatch.stderr +++ b/tests-build/tests/fail/macros_type_mismatch.stderr @@ -1,5 +1,5 @@ error[E0308]: mismatched types - --> $DIR/macros_type_mismatch.rs:5:5 + --> tests/fail/macros_type_mismatch.rs:5:5 | 5 | Ok(()) | ^^^^^^ expected `()`, found enum `Result` @@ -16,7 +16,7 @@ help: try adding a return type | ++++++++++++++++ error[E0308]: mismatched types - --> $DIR/macros_type_mismatch.rs:10:5 + --> tests/fail/macros_type_mismatch.rs:10:5 | 9 | async fn missing_return_type() { | - help: try adding a return type: `-> Result<(), _>` @@ -27,7 +27,7 @@ error[E0308]: mismatched types found enum `Result<(), _>` error[E0308]: mismatched types - --> $DIR/macros_type_mismatch.rs:23:5 + --> tests/fail/macros_type_mismatch.rs:23:5 | 14 | async fn extra_semicolon() -> Result<(), ()> { | -------------- expected `Result<(), ()>` because of return type @@ -37,9 +37,8 @@ error[E0308]: mismatched types | = note: expected enum `Result<(), ()>` found unit type `()` -help: try using a variant of the expected enum +help: try adding an expression at the end of the block | -23 | Ok(Ok(());) - | -23 | Err(Ok(());) +23 ~ Ok(());; +24 + Ok(()) | diff --git a/tokio-macros/CHANGELOG.md b/tokio-macros/CHANGELOG.md index eb5504cc5da..633b43fbbdb 100644 --- a/tokio-macros/CHANGELOG.md +++ b/tokio-macros/CHANGELOG.md @@ -1,3 +1,9 @@ +# 1.7.0 (December 15th, 2021) + +- macros: address remainging clippy::semicolon_if_nothing_returned warning ([#4252]) + +[#4252]: https://github.com/tokio-rs/tokio/pull/4252 + # 1.6.0 (November 16th, 2021) - macros: fix mut patterns in `select!` macro ([#4211]) diff --git a/tokio-macros/Cargo.toml b/tokio-macros/Cargo.toml index 2114cd2942c..d9b05795bf9 100644 --- a/tokio-macros/Cargo.toml +++ b/tokio-macros/Cargo.toml @@ -2,17 +2,15 @@ name = "tokio-macros" # When releasing to crates.io: # - Remove path dependencies -# - Update doc url -# - Cargo.toml # - Update CHANGELOG.md. # - Create "tokio-macros-1.0.x" git tag. -version = "1.6.0" +version = "1.7.0" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio-macros/1.6.0/tokio_macros" description = """ Tokio's proc macros. """ diff --git a/tokio-macros/src/entry.rs b/tokio-macros/src/entry.rs index 01f8ee4c1eb..5cb4a49b430 100644 --- a/tokio-macros/src/entry.rs +++ b/tokio-macros/src/entry.rs @@ -339,17 +339,17 @@ fn parse_knobs(mut input: syn::ItemFn, is_test: bool, config: FinalConfig) -> To let body = &input.block; let brace_token = input.block.brace_token; let (tail_return, tail_semicolon) = match body.stmts.last() { - Some(syn::Stmt::Semi(expr, _)) => match expr { - syn::Expr::Return(_) => (quote! { return }, quote! { ; }), - _ => match &input.sig.output { + Some(syn::Stmt::Semi(syn::Expr::Return(_), _)) => (quote! { return }, quote! { ; }), + Some(syn::Stmt::Semi(..)) | Some(syn::Stmt::Local(..)) | None => { + match &input.sig.output { syn::ReturnType::Type(_, ty) if matches!(&**ty, syn::Type::Tuple(ty) if ty.elems.is_empty()) => { (quote! {}, quote! { ; }) // unit } syn::ReturnType::Default => (quote! {}, quote! { ; }), // unit syn::ReturnType::Type(..) => (quote! {}, quote! {}), // ! or another - }, - }, + } + } _ => (quote! {}, quote! {}), }; input.block = syn::parse2(quote_spanned! {last_stmt_end_span=> diff --git a/tokio-macros/src/lib.rs b/tokio-macros/src/lib.rs index f8ba8eab18a..38638a1df8a 100644 --- a/tokio-macros/src/lib.rs +++ b/tokio-macros/src/lib.rs @@ -5,7 +5,6 @@ rust_2018_idioms, unreachable_pub )] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) diff --git a/tokio-stream/Cargo.toml b/tokio-stream/Cargo.toml index 83f8551826c..91a86bbbba5 100644 --- a/tokio-stream/Cargo.toml +++ b/tokio-stream/Cargo.toml @@ -2,17 +2,15 @@ name = "tokio-stream" # When releasing to crates.io: # - Remove path dependencies -# - Update doc url -# - Cargo.toml # - Update CHANGELOG.md. # - Create "tokio-stream-0.1.x" git tag. version = "0.1.8" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio-stream/0.1.8/tokio_stream" description = """ Utilities to work with `Stream` and `tokio`. """ @@ -31,7 +29,7 @@ signal = ["tokio/signal"] futures-core = { version = "0.3.0" } pin-project-lite = "0.2.0" tokio = { version = "1.8.0", path = "../tokio", features = ["sync"] } -tokio-util = { version = "0.6.3", path = "../tokio-util", optional = true } +tokio-util = { version = "0.7.0", path = "../tokio-util", optional = true } [dev-dependencies] tokio = { version = "1.2.0", path = "../tokio", features = ["full", "test-util"] } diff --git a/tokio-stream/src/lib.rs b/tokio-stream/src/lib.rs index b7f232fdadc..f600ccb8d36 100644 --- a/tokio-stream/src/lib.rs +++ b/tokio-stream/src/lib.rs @@ -10,7 +10,6 @@ unreachable_pub )] #![cfg_attr(docsrs, feature(doc_cfg))] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) diff --git a/tokio-stream/src/stream_ext.rs b/tokio-stream/src/stream_ext.rs index 1157c9ee353..b79883bd6e8 100644 --- a/tokio-stream/src/stream_ext.rs +++ b/tokio-stream/src/stream_ext.rs @@ -1,3 +1,4 @@ +use core::future::Future; use futures_core::Stream; mod all; @@ -27,6 +28,9 @@ use fuse::Fuse; mod map; use map::Map; +mod map_while; +use map_while::MapWhile; + mod merge; use merge::Merge; @@ -39,15 +43,18 @@ use skip::Skip; mod skip_while; use skip_while::SkipWhile; -mod try_next; -use try_next::TryNext; - mod take; use take::Take; mod take_while; use take_while::TakeWhile; +mod then; +use then::Then; + +mod try_next; +use try_next::TryNext; + cfg_time! { mod timeout; use timeout::Timeout; @@ -197,6 +204,93 @@ pub trait StreamExt: Stream { Map::new(self, f) } + /// Map this stream's items to a different type for as long as determined by + /// the provided closure. A stream of the target type will be returned, + /// which will yield elements until the closure returns `None`. + /// + /// The provided closure is executed over all elements of this stream as + /// they are made available, until it returns `None`. It is executed inline + /// with calls to [`poll_next`](Stream::poll_next). Once `None` is returned, + /// the underlying stream will not be polled again. + /// + /// Note that this function consumes the stream passed into it and returns a + /// wrapped version of it, similar to the [`Iterator::map_while`] method in the + /// standard library. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio_stream::{self as stream, StreamExt}; + /// + /// let stream = stream::iter(1..=10); + /// let mut stream = stream.map_while(|x| { + /// if x < 4 { + /// Some(x + 3) + /// } else { + /// None + /// } + /// }); + /// assert_eq!(stream.next().await, Some(4)); + /// assert_eq!(stream.next().await, Some(5)); + /// assert_eq!(stream.next().await, Some(6)); + /// assert_eq!(stream.next().await, None); + /// # } + /// ``` + fn map_while(self, f: F) -> MapWhile + where + F: FnMut(Self::Item) -> Option, + Self: Sized, + { + MapWhile::new(self, f) + } + + /// Maps this stream's items asynchronously to a different type, returning a + /// new stream of the resulting type. + /// + /// The provided closure is executed over all elements of this stream as + /// they are made available, and the returned future is executed. Only one + /// future is executed at the time. + /// + /// Note that this function consumes the stream passed into it and returns a + /// wrapped version of it, similar to the existing `then` methods in the + /// standard library. + /// + /// Be aware that if the future is not `Unpin`, then neither is the `Stream` + /// returned by this method. To handle this, you can use `tokio::pin!` as in + /// the example below or put the stream in a `Box` with `Box::pin(stream)`. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio_stream::{self as stream, StreamExt}; + /// + /// async fn do_async_work(value: i32) -> i32 { + /// value + 3 + /// } + /// + /// let stream = stream::iter(1..=3); + /// let stream = stream.then(do_async_work); + /// + /// tokio::pin!(stream); + /// + /// assert_eq!(stream.next().await, Some(4)); + /// assert_eq!(stream.next().await, Some(5)); + /// assert_eq!(stream.next().await, Some(6)); + /// # } + /// ``` + fn then(self, f: F) -> Then + where + F: FnMut(Self::Item) -> Fut, + Fut: Future, + Self: Sized, + { + Then::new(self, f) + } + /// Combine two streams into one by interleaving the output of both as it /// is produced. /// diff --git a/tokio-stream/src/stream_ext/collect.rs b/tokio-stream/src/stream_ext/collect.rs index a33a6d6692a..4b157a9aacc 100644 --- a/tokio-stream/src/stream_ext/collect.rs +++ b/tokio-stream/src/stream_ext/collect.rs @@ -66,17 +66,17 @@ where use Poll::Ready; loop { - let mut me = self.as_mut().project(); + let me = self.as_mut().project(); let item = match ready!(me.stream.poll_next(cx)) { Some(item) => item, None => { - return Ready(U::finalize(sealed::Internal, &mut me.collection)); + return Ready(U::finalize(sealed::Internal, me.collection)); } }; - if !U::extend(sealed::Internal, &mut me.collection, item) { - return Ready(U::finalize(sealed::Internal, &mut me.collection)); + if !U::extend(sealed::Internal, me.collection, item) { + return Ready(U::finalize(sealed::Internal, me.collection)); } } } diff --git a/tokio-stream/src/stream_ext/map_while.rs b/tokio-stream/src/stream_ext/map_while.rs new file mode 100644 index 00000000000..d4fd8256560 --- /dev/null +++ b/tokio-stream/src/stream_ext/map_while.rs @@ -0,0 +1,52 @@ +use crate::Stream; + +use core::fmt; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream for the [`map_while`](super::StreamExt::map_while) method. + #[must_use = "streams do nothing unless polled"] + pub struct MapWhile { + #[pin] + stream: St, + f: F, + } +} + +impl fmt::Debug for MapWhile +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapWhile") + .field("stream", &self.stream) + .finish() + } +} + +impl MapWhile { + pub(super) fn new(stream: St, f: F) -> Self { + MapWhile { stream, f } + } +} + +impl Stream for MapWhile +where + St: Stream, + F: FnMut(St::Item) -> Option, +{ + type Item = T; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let me = self.project(); + let f = me.f; + me.stream.poll_next(cx).map(|opt| opt.and_then(f)) + } + + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.stream.size_hint(); + (0, upper) + } +} diff --git a/tokio-stream/src/stream_ext/then.rs b/tokio-stream/src/stream_ext/then.rs new file mode 100644 index 00000000000..7f6b5a2394f --- /dev/null +++ b/tokio-stream/src/stream_ext/then.rs @@ -0,0 +1,83 @@ +use crate::Stream; + +use core::fmt; +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream for the [`then`](super::StreamExt::then) method. + #[must_use = "streams do nothing unless polled"] + pub struct Then { + #[pin] + stream: St, + #[pin] + future: Option, + f: F, + } +} + +impl fmt::Debug for Then +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Then") + .field("stream", &self.stream) + .finish() + } +} + +impl Then { + pub(super) fn new(stream: St, f: F) -> Self { + Then { + stream, + future: None, + f, + } + } +} + +impl Stream for Then +where + St: Stream, + Fut: Future, + F: FnMut(St::Item) -> Fut, +{ + type Item = Fut::Output; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut me = self.project(); + + loop { + if let Some(future) = me.future.as_mut().as_pin_mut() { + match future.poll(cx) { + Poll::Ready(item) => { + me.future.set(None); + return Poll::Ready(Some(item)); + } + Poll::Pending => return Poll::Pending, + } + } + + match me.stream.as_mut().poll_next(cx) { + Poll::Ready(Some(item)) => { + me.future.set(Some((me.f)(item))); + } + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let future_len = if self.future.is_some() { 1 } else { 0 }; + let (lower, upper) = self.stream.size_hint(); + + let lower = lower.saturating_add(future_len); + let upper = upper.and_then(|upper| upper.checked_add(future_len)); + + (lower, upper) + } +} diff --git a/tokio-stream/src/stream_map.rs b/tokio-stream/src/stream_map.rs index 80a521ee17a..215980474b1 100644 --- a/tokio-stream/src/stream_map.rs +++ b/tokio-stream/src/stream_map.rs @@ -585,6 +585,15 @@ where } } +impl Extend<(K, V)> for StreamMap { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + self.entries.extend(iter); + } +} + mod rand { use std::cell::Cell; diff --git a/tokio-test/Cargo.toml b/tokio-test/Cargo.toml index 55d5aafc557..16d688b6ed5 100644 --- a/tokio-test/Cargo.toml +++ b/tokio-test/Cargo.toml @@ -2,17 +2,15 @@ name = "tokio-test" # When releasing to crates.io: # - Remove path dependencies -# - Update doc url -# - Cargo.toml # - Update CHANGELOG.md. # - Create "tokio-test-0.4.x" git tag. version = "0.4.2" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio-test/0.4.2/tokio_test" description = """ Testing utilities for Tokio- and futures-based code """ @@ -20,7 +18,7 @@ categories = ["asynchronous", "testing"] [dependencies] tokio = { version = "1.2.0", path = "../tokio", features = ["rt", "sync", "time", "test-util"] } -tokio-stream = { version = "0.1", path = "../tokio-stream" } +tokio-stream = { version = "0.1.1", path = "../tokio-stream" } async-stream = "0.3" bytes = "1.0.0" diff --git a/tokio-test/src/lib.rs b/tokio-test/src/lib.rs index 7bba3eedac8..de3f0864a94 100644 --- a/tokio-test/src/lib.rs +++ b/tokio-test/src/lib.rs @@ -4,7 +4,6 @@ rust_2018_idioms, unreachable_pub )] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 676b0e2ec94..f3c19c1e3f3 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -2,21 +2,20 @@ name = "tokio-util" # When releasing to crates.io: # - Remove path dependencies -# - Update doc url -# - Cargo.toml # - Update CHANGELOG.md. -# - Create "tokio-util-0.6.x" git tag. -version = "0.6.9" +# - Create "tokio-util-0.7.x" git tag. +version = "0.7.0" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio-util/0.6.9/tokio_util" description = """ Additional utilities for working with Tokio. """ categories = ["asynchronous"] +publish = false [features] # No features on by default @@ -45,7 +44,7 @@ futures-io = { version = "0.3.0", optional = true } futures-util = { version = "0.3.0", optional = true } log = "0.4" pin-project-lite = "0.2.0" -slab = { version = "0.4.1", optional = true } # Backs `DelayQueue` +slab = { version = "0.4.4", optional = true } # Backs `DelayQueue` [dev-dependencies] tokio = { version = "1.0.0", path = "../tokio", features = ["full"] } diff --git a/tokio-util/src/codec/framed.rs b/tokio-util/src/codec/framed.rs index aff577f22cd..3f7e4207d31 100644 --- a/tokio-util/src/codec/framed.rs +++ b/tokio-util/src/codec/framed.rs @@ -204,6 +204,15 @@ impl Framed { &mut self.inner.codec } + /// Returns a mutable reference to the underlying codec wrapped by + /// `Framed`. + /// + /// Note that care should be taken to not tamper with the underlying codec + /// as it may corrupt the stream of frames otherwise being worked with. + pub fn codec_pin_mut(self: Pin<&mut Self>) -> &mut U { + self.project().inner.project().codec + } + /// Returns a reference to the read buffer. pub fn read_buffer(&self) -> &BytesMut { &self.inner.state.read.buffer diff --git a/tokio-util/src/codec/framed_read.rs b/tokio-util/src/codec/framed_read.rs index 502a073d0f7..d6e34dbafc4 100644 --- a/tokio-util/src/codec/framed_read.rs +++ b/tokio-util/src/codec/framed_read.rs @@ -108,6 +108,11 @@ impl FramedRead { &mut self.inner.codec } + /// Returns a mutable reference to the underlying decoder. + pub fn decoder_pin_mut(self: Pin<&mut Self>) -> &mut D { + self.project().inner.project().codec + } + /// Returns a reference to the read buffer. pub fn read_buffer(&self) -> &BytesMut { &self.inner.state.buffer diff --git a/tokio-util/src/codec/framed_write.rs b/tokio-util/src/codec/framed_write.rs index d2f6cb2d564..b827d9736ac 100644 --- a/tokio-util/src/codec/framed_write.rs +++ b/tokio-util/src/codec/framed_write.rs @@ -88,6 +88,11 @@ impl FramedWrite { &mut self.inner.codec } + /// Returns a mutable reference to the underlying encoder. + pub fn encoder_pin_mut(self: Pin<&mut Self>) -> &mut E { + self.project().inner.project().codec + } + /// Returns a reference to the write buffer. pub fn write_buffer(&self) -> &BytesMut { &self.inner.state.buffer diff --git a/tokio-util/src/codec/length_delimited.rs b/tokio-util/src/codec/length_delimited.rs index 7135d7ce47c..de0eb4e9201 100644 --- a/tokio-util/src/codec/length_delimited.rs +++ b/tokio-util/src/codec/length_delimited.rs @@ -746,7 +746,7 @@ impl Builder { } } - /// Sets the max frame length + /// Sets the max frame length in bytes /// /// This configuration option applies to both encoding and decoding. The /// default value is 8MB. @@ -767,7 +767,7 @@ impl Builder { /// /// # fn bind_read(io: T) { /// LengthDelimitedCodec::builder() - /// .max_frame_length(8 * 1024) + /// .max_frame_length(8 * 1024 * 1024) /// .new_read(io); /// # } /// # pub fn main() {} diff --git a/tokio-util/src/lib.rs b/tokio-util/src/lib.rs index c1970cfd108..1099fe4993c 100644 --- a/tokio-util/src/lib.rs +++ b/tokio-util/src/lib.rs @@ -5,7 +5,6 @@ rust_2018_idioms, unreachable_pub )] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) diff --git a/tokio-util/src/time/delay_queue.rs b/tokio-util/src/time/delay_queue.rs index 6da83a574d0..697670d7581 100644 --- a/tokio-util/src/time/delay_queue.rs +++ b/tokio-util/src/time/delay_queue.rs @@ -9,8 +9,13 @@ use crate::time::wheel::{self, Wheel}; use futures_core::ready; use tokio::time::{error::Error, sleep_until, Duration, Instant, Sleep}; +use core::ops::{Index, IndexMut}; use slab::Slab; use std::cmp; +use std::collections::HashMap; +use std::convert::From; +use std::fmt; +use std::fmt::Debug; use std::future::Future; use std::marker::PhantomData; use std::pin::Pin; @@ -128,7 +133,7 @@ use std::task::{self, Poll, Waker}; #[derive(Debug)] pub struct DelayQueue { /// Stores data associated with entries - slab: Slab>, + slab: SlabStorage, /// Lookup structure tracking all delays in the queue wheel: Wheel>, @@ -152,6 +157,216 @@ pub struct DelayQueue { waker: Option, } +#[derive(Default)] +struct SlabStorage { + inner: Slab>, + + // A `compact` call requires a re-mapping of the `Key`s that were changed + // during the `compact` call of the `slab`. Since the keys that were given out + // cannot be changed retroactively we need to keep track of these re-mappings. + // The keys of `key_map` correspond to the old keys that were given out and + // the values to the `Key`s that were re-mapped by the `compact` call. + key_map: HashMap, + + // Index used to create new keys to hand out. + next_key_index: usize, + + // Whether `compact` has been called, necessary in order to decide whether + // to include keys in `key_map`. + compact_called: bool, +} + +impl SlabStorage { + pub(crate) fn with_capacity(capacity: usize) -> SlabStorage { + SlabStorage { + inner: Slab::with_capacity(capacity), + key_map: HashMap::new(), + next_key_index: 0, + compact_called: false, + } + } + + // Inserts data into the inner slab and re-maps keys if necessary + pub(crate) fn insert(&mut self, val: Data) -> Key { + let mut key = KeyInternal::new(self.inner.insert(val)); + let key_contained = self.key_map.contains_key(&key.into()); + + if key_contained { + // It's possible that a `compact` call creates capacitiy in `self.inner` in + // such a way that a `self.inner.insert` call creates a `key` which was + // previously given out during an `insert` call prior to the `compact` call. + // If `key` is contained in `self.key_map`, we have encountered this exact situation, + // We need to create a new key `key_to_give_out` and include the relation + // `key_to_give_out` -> `key` in `self.key_map`. + let key_to_give_out = self.create_new_key(); + assert!(!self.key_map.contains_key(&key_to_give_out.into())); + self.key_map.insert(key_to_give_out.into(), key); + key = key_to_give_out; + } else if self.compact_called { + // Include an identity mapping in `self.key_map` in order to allow us to + // panic if a key that was handed out is removed more than once. + self.key_map.insert(key.into(), key); + } + + key.into() + } + + // Re-map the key in case compact was previously called. + // Note: Since we include identity mappings in key_map after compact was called, + // we have information about all keys that were handed out. In the case in which + // compact was called and we try to remove a Key that was previously removed + // we can detect invalid keys if no key is found in `key_map`. This is necessary + // in order to prevent situations in which a previously removed key + // corresponds to a re-mapped key internally and which would then be incorrectly + // removed from the slab. + // + // Example to illuminate this problem: + // + // Let's assume our `key_map` is {1 -> 2, 2 -> 1} and we call remove(1). If we + // were to remove 1 again, we would not find it inside `key_map` anymore. + // If we were to imply from this that no re-mapping was necessary, we would + // incorrectly remove 1 from `self.slab.inner`, which corresponds to the + // handed-out key 2. + pub(crate) fn remove(&mut self, key: &Key) -> Data { + let remapped_key = if self.compact_called { + match self.key_map.remove(key) { + Some(key_internal) => key_internal, + None => panic!("invalid key"), + } + } else { + (*key).into() + }; + + self.inner.remove(remapped_key.index) + } + + pub(crate) fn shrink_to_fit(&mut self) { + self.inner.shrink_to_fit(); + self.key_map.shrink_to_fit(); + } + + pub(crate) fn compact(&mut self) { + if !self.compact_called { + for (key, _) in self.inner.iter() { + self.key_map.insert(Key::new(key), KeyInternal::new(key)); + } + } + + let mut remapping = HashMap::new(); + self.inner.compact(|_, from, to| { + remapping.insert(from, to); + true + }); + + // At this point `key_map` contains a mapping for every element. + for internal_key in self.key_map.values_mut() { + if let Some(new_internal_key) = remapping.get(&internal_key.index) { + *internal_key = KeyInternal::new(*new_internal_key); + } + } + + if self.key_map.capacity() > 2 * self.key_map.len() { + self.key_map.shrink_to_fit(); + } + + self.compact_called = true; + } + + // Tries to re-map a `Key` that was given out to the user to its + // corresponding internal key. + fn remap_key(&self, key: &Key) -> Option { + let key_map = &self.key_map; + if self.compact_called { + key_map.get(&*key).copied() + } else { + Some((*key).into()) + } + } + + fn create_new_key(&mut self) -> KeyInternal { + while self.key_map.contains_key(&Key::new(self.next_key_index)) { + self.next_key_index = self.next_key_index.wrapping_add(1); + } + + KeyInternal::new(self.next_key_index) + } + + pub(crate) fn len(&self) -> usize { + self.inner.len() + } + + pub(crate) fn capacity(&self) -> usize { + self.inner.capacity() + } + + pub(crate) fn clear(&mut self) { + self.inner.clear(); + self.key_map.clear(); + self.compact_called = false; + } + + pub(crate) fn reserve(&mut self, additional: usize) { + self.inner.reserve(additional); + + if self.compact_called { + self.key_map.reserve(additional); + } + } + + pub(crate) fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub(crate) fn contains(&self, key: &Key) -> bool { + let remapped_key = self.remap_key(key); + + match remapped_key { + Some(internal_key) => self.inner.contains(internal_key.index), + None => false, + } + } +} + +impl fmt::Debug for SlabStorage +where + T: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + if fmt.alternate() { + fmt.debug_map().entries(self.inner.iter()).finish() + } else { + fmt.debug_struct("Slab") + .field("len", &self.len()) + .field("cap", &self.capacity()) + .finish() + } + } +} + +impl Index for SlabStorage { + type Output = Data; + + fn index(&self, key: Key) -> &Self::Output { + let remapped_key = self.remap_key(&key); + + match remapped_key { + Some(internal_key) => &self.inner[internal_key.index], + None => panic!("Invalid index {}", key.index), + } + } +} + +impl IndexMut for SlabStorage { + fn index_mut(&mut self, key: Key) -> &mut Data { + let remapped_key = self.remap_key(&key); + + match remapped_key { + Some(internal_key) => &mut self.inner[internal_key.index], + None => panic!("Invalid index {}", key.index), + } + } +} + /// An entry in `DelayQueue` that has expired and been removed. /// /// Values are returned by [`DelayQueue::poll_expired`]. @@ -176,15 +391,23 @@ pub struct Expired { /// /// [`DelayQueue`]: struct@DelayQueue /// [`DelayQueue::insert`]: method@DelayQueue::insert -#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Key { index: usize, } +// Whereas `Key` is given out to users that use `DelayQueue`, internally we use +// `KeyInternal` as the key type in order to make the logic of mapping between keys +// as a result of `compact` calls clearer. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +struct KeyInternal { + index: usize, +} + #[derive(Debug)] struct Stack { /// Head of the stack - head: Option, + head: Option, _p: PhantomData T>, } @@ -201,10 +424,10 @@ struct Data { expired: bool, /// Next entry in the stack - next: Option, + next: Option, /// Previous entry in the stack - prev: Option, + prev: Option, } /// Maximum number of entries the queue can handle @@ -253,7 +476,7 @@ impl DelayQueue { pub fn with_capacity(capacity: usize) -> DelayQueue { DelayQueue { wheel: Wheel::new(), - slab: Slab::with_capacity(capacity), + slab: SlabStorage::with_capacity(capacity), expired: Stack::default(), delay: None, wheel_now: 0, @@ -348,7 +571,7 @@ impl DelayQueue { } } - Key::new(key) + key } /// Attempts to pull out the next value of the delay queue, registering the @@ -369,13 +592,13 @@ impl DelayQueue { let item = ready!(self.poll_idx(cx)); Poll::Ready(item.map(|result| { - result.map(|idx| { - let data = self.slab.remove(idx); + result.map(|key| { + let data = self.slab.remove(&key); debug_assert!(data.next.is_none()); debug_assert!(data.prev.is_none()); Expired { - key: Key::new(idx), + key, data: data.inner, deadline: self.start + Duration::from_millis(data.when), } @@ -437,7 +660,7 @@ impl DelayQueue { self.insert_at(value, Instant::now() + timeout) } - fn insert_idx(&mut self, when: u64, key: usize) { + fn insert_idx(&mut self, when: u64, key: Key) { use self::wheel::{InsertError, Stack}; // Register the deadline with the timer wheel @@ -462,10 +685,10 @@ impl DelayQueue { use crate::time::wheel::Stack; // Special case the `expired` queue - if self.slab[key.index].expired { - self.expired.remove(&key.index, &mut self.slab); + if self.slab[*key].expired { + self.expired.remove(key, &mut self.slab); } else { - self.wheel.remove(&key.index, &mut self.slab); + self.wheel.remove(key, &mut self.slab); } } @@ -501,7 +724,7 @@ impl DelayQueue { let prev_deadline = self.next_deadline(); self.remove_key(key); - let data = self.slab.remove(key.index); + let data = self.slab.remove(key); let next_deadline = self.next_deadline(); if prev_deadline != next_deadline { @@ -559,10 +782,10 @@ impl DelayQueue { // Normalize the deadline. Values cannot be set to expire in the past. let when = self.normalize_deadline(when); - self.slab[key.index].when = when; - self.slab[key.index].expired = false; + self.slab[*key].when = when; + self.slab[*key].expired = false; - self.insert_idx(when, key.index); + self.insert_idx(when, *key); let next_deadline = self.next_deadline(); if let (Some(ref mut delay), Some(deadline)) = (&mut self.delay, next_deadline) { @@ -571,6 +794,50 @@ impl DelayQueue { } } + /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation. + /// This function is not guaranteed to, and in most cases, won't decrease the capacity of the slab + /// to the number of elements still contained in it, because elements cannot be moved to a different + /// index. To decrease the capacity to the size of the slab use [`compact`]. + /// + /// This function can take O(n) time even when the capacity cannot be reduced or the allocation is + /// shrunk in place. Repeated calls run in O(1) though. + /// + /// [`compact`]: method@Self::compact + pub fn shrink_to_fit(&mut self) { + self.slab.shrink_to_fit(); + } + + /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation, + /// to the number of elements that are contained in it. + /// + /// This methods runs in O(n). + /// + /// # Examples + /// + /// Basic usage + /// + /// ```rust + /// use tokio_util::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::with_capacity(10); + /// + /// let key1 = delay_queue.insert(5, Duration::from_secs(5)); + /// let key2 = delay_queue.insert(10, Duration::from_secs(10)); + /// let key3 = delay_queue.insert(15, Duration::from_secs(15)); + /// + /// delay_queue.remove(&key2); + /// + /// delay_queue.compact(); + /// assert_eq!(delay_queue.capacity(), 2); + /// # } + /// ``` + pub fn compact(&mut self) { + self.slab.compact(); + } + /// Returns the next time to poll as determined by the wheel fn next_deadline(&mut self) -> Option { self.wheel @@ -750,7 +1017,7 @@ impl DelayQueue { /// should be returned. /// /// A slot should be returned when the associated deadline has been reached. - fn poll_idx(&mut self, cx: &mut task::Context<'_>) -> Poll>> { + fn poll_idx(&mut self, cx: &mut task::Context<'_>) -> Poll>> { use self::wheel::Stack; let expired = self.expired.pop(&mut self.slab); @@ -816,9 +1083,9 @@ impl futures_core::Stream for DelayQueue { } impl wheel::Stack for Stack { - type Owned = usize; - type Borrowed = usize; - type Store = Slab>; + type Owned = Key; + type Borrowed = Key; + type Store = SlabStorage; fn is_empty(&self) -> bool { self.head.is_none() @@ -837,28 +1104,29 @@ impl wheel::Stack for Stack { } store[item].next = old; - self.head = Some(item) + self.head = Some(item); } fn pop(&mut self, store: &mut Self::Store) -> Option { - if let Some(idx) = self.head { - self.head = store[idx].next; + if let Some(key) = self.head { + self.head = store[key].next; if let Some(idx) = self.head { store[idx].prev = None; } - store[idx].next = None; - debug_assert!(store[idx].prev.is_none()); + store[key].next = None; + debug_assert!(store[key].prev.is_none()); - Some(idx) + Some(key) } else { None } } fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store) { - assert!(store.contains(*item)); + let key = *item; + assert!(store.contains(item)); // Ensure that the entry is in fact contained by the stack debug_assert!({ @@ -867,29 +1135,31 @@ impl wheel::Stack for Stack { let mut contains = false; while let Some(idx) = next { + let data = &store[idx]; + if idx == *item { debug_assert!(!contains); contains = true; } - next = store[idx].next; + next = data.next; } contains }); - if let Some(next) = store[*item].next { - store[next].prev = store[*item].prev; + if let Some(next) = store[key].next { + store[next].prev = store[key].prev; } - if let Some(prev) = store[*item].prev { - store[prev].next = store[*item].next; + if let Some(prev) = store[key].prev { + store[prev].next = store[key].next; } else { - self.head = store[*item].next; + self.head = store[key].next; } - store[*item].next = None; - store[*item].prev = None; + store[key].next = None; + store[key].prev = None; } fn when(item: &Self::Borrowed, store: &Self::Store) -> u64 { @@ -912,6 +1182,24 @@ impl Key { } } +impl KeyInternal { + pub(crate) fn new(index: usize) -> KeyInternal { + KeyInternal { index } + } +} + +impl From for KeyInternal { + fn from(item: Key) -> Self { + KeyInternal::new(item.index) + } +} + +impl From for Key { + fn from(item: KeyInternal) -> Self { + Key::new(item.index) + } +} + impl Expired { /// Returns a reference to the inner value. pub fn get_ref(&self) -> &T { diff --git a/tokio-util/src/time/wheel/mod.rs b/tokio-util/src/time/wheel/mod.rs index 8fed0bf431d..4191e401df4 100644 --- a/tokio-util/src/time/wheel/mod.rs +++ b/tokio-util/src/time/wheel/mod.rs @@ -6,6 +6,7 @@ mod stack; pub(crate) use self::stack::Stack; use std::borrow::Borrow; +use std::fmt::Debug; use std::usize; /// Timing wheel implementation. diff --git a/tokio-util/src/time/wheel/stack.rs b/tokio-util/src/time/wheel/stack.rs index 6e55c38ccda..c87adcafda8 100644 --- a/tokio-util/src/time/wheel/stack.rs +++ b/tokio-util/src/time/wheel/stack.rs @@ -1,4 +1,6 @@ use std::borrow::Borrow; +use std::cmp::Eq; +use std::hash::Hash; /// Abstracts the stack operations needed to track timeouts. pub(crate) trait Stack: Default { @@ -6,7 +8,7 @@ pub(crate) trait Stack: Default { type Owned: Borrow; /// Borrowed item - type Borrowed; + type Borrowed: Eq + Hash; /// Item storage, this allows a slab to be used instead of just the heap type Store; diff --git a/tokio-util/tests/time_delay_queue.rs b/tokio-util/tests/time_delay_queue.rs index 901afcaaa94..1c30446af59 100644 --- a/tokio-util/tests/time_delay_queue.rs +++ b/tokio-util/tests/time_delay_queue.rs @@ -109,6 +109,7 @@ async fn multi_delay_at_start() { let start = Instant::now(); for elapsed in 0..1200 { + println!("elapsed: {:?}", elapsed); let elapsed = elapsed + 1; tokio::time::sleep_until(start + ms(elapsed)).await; @@ -128,10 +129,12 @@ async fn multi_delay_at_start() { assert_pending!(poll!(queue)); } } + println!("finished multi_delay_start"); } #[tokio::test] async fn insert_in_past_fires_immediately() { + println!("running insert_in_past_fires_immediately"); time::pause(); let mut queue = task::spawn(DelayQueue::new()); @@ -142,6 +145,7 @@ async fn insert_in_past_fires_immediately() { queue.insert_at("foo", now); assert_ready!(poll!(queue)); + println!("finished insert_in_past_fires_immediately"); } #[tokio::test] @@ -640,6 +644,175 @@ async fn delay_queue_poll_expired_when_empty() { assert!(assert_ready!(poll!(delay_queue)).is_none()); } +#[tokio::test(start_paused = true)] +async fn compact_expire_empty() { + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + queue.insert_at("foo1", now + ms(10)); + queue.insert_at("foo2", now + ms(10)); + + sleep(ms(10)).await; + + let mut res = vec![]; + while res.len() < 2 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + queue.compact(); + + assert_eq!(queue.len(), 0); + assert_eq!(queue.capacity(), 0); +} + +#[tokio::test(start_paused = true)] +async fn compact_remove_empty() { + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + let key1 = queue.insert_at("foo1", now + ms(10)); + let key2 = queue.insert_at("foo2", now + ms(10)); + + queue.remove(&key1); + queue.remove(&key2); + + queue.compact(); + + assert_eq!(queue.len(), 0); + assert_eq!(queue.capacity(), 0); +} + +#[tokio::test(start_paused = true)] +// Trigger a re-mapping of keys in the slab due to a `compact` call and +// test removal of re-mapped keys +async fn compact_remove_remapped_keys() { + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + queue.insert_at("foo1", now + ms(10)); + queue.insert_at("foo2", now + ms(10)); + + // should be assigned indices 3 and 4 + let key3 = queue.insert_at("foo3", now + ms(20)); + let key4 = queue.insert_at("foo4", now + ms(20)); + + sleep(ms(10)).await; + + let mut res = vec![]; + while res.len() < 2 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + // items corresponding to `foo3` and `foo4` will be assigned + // new indices here + queue.compact(); + + queue.insert_at("foo5", now + ms(10)); + + // test removal of re-mapped keys + let expired3 = queue.remove(&key3); + let expired4 = queue.remove(&key4); + + assert_eq!(expired3.into_inner(), "foo3"); + assert_eq!(expired4.into_inner(), "foo4"); + + queue.compact(); + assert_eq!(queue.len(), 1); + assert_eq!(queue.capacity(), 1); +} + +#[tokio::test(start_paused = true)] +async fn compact_change_deadline() { + let mut queue = task::spawn(DelayQueue::new()); + + let mut now = Instant::now(); + + queue.insert_at("foo1", now + ms(10)); + queue.insert_at("foo2", now + ms(10)); + + // should be assigned indices 3 and 4 + queue.insert_at("foo3", now + ms(20)); + let key4 = queue.insert_at("foo4", now + ms(20)); + + sleep(ms(10)).await; + + let mut res = vec![]; + while res.len() < 2 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + // items corresponding to `foo3` and `foo4` should be assigned + // new indices + queue.compact(); + + now = Instant::now(); + + queue.insert_at("foo5", now + ms(10)); + let key6 = queue.insert_at("foo6", now + ms(10)); + + queue.reset_at(&key4, now + ms(20)); + queue.reset_at(&key6, now + ms(20)); + + // foo3 and foo5 will expire + sleep(ms(10)).await; + + while res.len() < 4 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + sleep(ms(10)).await; + + while res.len() < 6 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + let entry = assert_ready!(poll!(queue)); + assert!(entry.is_none()); +} + +#[tokio::test(start_paused = true)] +async fn remove_after_compact() { + let now = Instant::now(); + let mut queue = DelayQueue::new(); + + let foo_key = queue.insert_at("foo", now + ms(10)); + queue.insert_at("bar", now + ms(20)); + queue.remove(&foo_key); + queue.compact(); + + let panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + queue.remove(&foo_key); + })); + assert!(panic.is_err()); +} + +#[tokio::test(start_paused = true)] +async fn remove_after_compact_poll() { + let now = Instant::now(); + let mut queue = task::spawn(DelayQueue::new()); + + let foo_key = queue.insert_at("foo", now + ms(10)); + queue.insert_at("bar", now + ms(20)); + + sleep(ms(10)).await; + assert_eq!(assert_ready_ok!(poll!(queue)).key(), foo_key); + + queue.compact(); + + let panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + queue.remove(&foo_key); + })); + assert!(panic.is_err()); +} + fn ms(n: u64) -> Duration { Duration::from_millis(n) } diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index afa8bf0ce18..a17ffa9b8cd 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,31 @@ +# 1.15.0 (December 15, 2021) + +### Fixed + +- io: add cooperative yielding support to `io::empty()` ([#4300]) +- time: make timeout robust against budget-depleting tasks ([#4314]) + +### Changed + +- update minimum supported Rust version to 1.46. + +### Added + +- time: add `Interval::reset()` ([#4248]) +- io: add explicit lifetimes to `AsyncFdReadyGuard` ([#4267]) +- process: add `Command::as_std()` ([#4295]) + +### Added (unstable) + +- tracing: instrument `tokio::sync` types ([#4302]) + +[#4302]: https://github.com/tokio-rs/tokio/pull/4302 +[#4300]: https://github.com/tokio-rs/tokio/pull/4300 +[#4295]: https://github.com/tokio-rs/tokio/pull/4295 +[#4267]: https://github.com/tokio-rs/tokio/pull/4267 +[#4248]: https://github.com/tokio-rs/tokio/pull/4248 +[#4314]: https://github.com/tokio-rs/tokio/pull/4314 + # 1.14.0 (November 15, 2021) ### Fixed diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 348ec46576a..2a88b766c0a 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -3,16 +3,15 @@ name = "tokio" # When releasing to crates.io: # - Remove path dependencies # - Update doc url -# - Cargo.toml # - README.md # - Update CHANGELOG.md. # - Create "v1.0.x" git tag. -version = "1.14.0" +version = "1.15.0" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" readme = "README.md" -documentation = "https://docs.rs/tokio/1.14.0/tokio/" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" description = """ @@ -87,7 +86,7 @@ test-util = ["rt", "sync", "time"] time = [] [dependencies] -tokio-macros = { version = "1.6.0", path = "../tokio-macros", optional = true } +tokio-macros = { version = "1.7.0", path = "../tokio-macros", optional = true } pin-project-lite = "0.2.0" @@ -110,7 +109,7 @@ signal-hook-registry = { version = "1.1.1", optional = true } [target.'cfg(unix)'.dev-dependencies] libc = { version = "0.2.42" } -nix = { version = "0.22.0" } +nix = { version = "0.23" } [target.'cfg(windows)'.dependencies.winapi] version = "0.3.8" @@ -137,12 +136,13 @@ mio-aio = { version = "0.6.0", features = ["tokio"] } [target.'cfg(loom)'.dev-dependencies] loom = { version = "0.5", features = ["futures", "checkpoint"] } -[build-dependencies] -autocfg = "1" # Needed for conditionally enabling `track-caller` - [package.metadata.docs.rs] all-features = true -rustdoc-args = ["--cfg", "docsrs"] +# enable unstable features in the documentation +rustdoc-args = ["--cfg", "docsrs", "--cfg", "tokio_unstable"] +# it's necessary to _also_ pass `--cfg tokio_unstable` to rustc, or else +# dependencies will not be enabled, and the docs build will fail. +rustc-args = ["--cfg", "tokio_unstable"] [package.metadata.playground] features = ["full", "test-util"] diff --git a/tokio/README.md b/tokio/README.md index 19f049cba78..ad192fec706 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.14.0", features = ["full"] } +tokio = { version = "1.15.0", features = ["full"] } ``` Then, on your main.rs: @@ -164,7 +164,7 @@ several other libraries, including: ## Supported Rust Versions Tokio is built against the latest stable release. The minimum supported version -is 1.45. The current Tokio version is not guaranteed to build on Rust versions +is 1.46. The current Tokio version is not guaranteed to build on Rust versions earlier than the minimum supported version. ## Release schedule @@ -181,6 +181,7 @@ released as a new patch release for each LTS minor version. Our current LTS releases are: * `1.8.x` - LTS release until February 2022. + * `1.14.x` - LTS release until June 2022. Each LTS release will continue to receive backported fixes for at least half a year. If you wish to use a fixed minor release in your project, we recommend diff --git a/tokio/build.rs b/tokio/build.rs deleted file mode 100644 index fe5c8300560..00000000000 --- a/tokio/build.rs +++ /dev/null @@ -1,22 +0,0 @@ -use autocfg::AutoCfg; - -fn main() { - match AutoCfg::new() { - Ok(ac) => { - // The #[track_caller] attribute was stabilized in rustc 1.46.0. - if ac.probe_rustc_version(1, 46) { - autocfg::emit("tokio_track_caller") - } - } - - Err(e) => { - // If we couldn't detect the compiler version and features, just - // print a warning. This isn't a fatal error: we can still build - // Tokio, we just can't enable cfgs automatically. - println!( - "cargo:warning=tokio: failed to detect compiler features: {}", - e - ); - } - } -} diff --git a/tokio/src/coop.rs b/tokio/src/coop.rs index 256e9620e75..145e703971b 100644 --- a/tokio/src/coop.rs +++ b/tokio/src/coop.rs @@ -59,13 +59,9 @@ impl Budget { const fn unconstrained() -> Budget { Budget(None) } -} -cfg_rt_multi_thread! { - impl Budget { - fn has_remaining(self) -> bool { - self.0.map(|budget| budget > 0).unwrap_or(true) - } + fn has_remaining(self) -> bool { + self.0.map(|budget| budget > 0).unwrap_or(true) } } @@ -107,16 +103,16 @@ fn with_budget(budget: Budget, f: impl FnOnce() -> R) -> R { }) } +#[inline(always)] +pub(crate) fn has_budget_remaining() -> bool { + CURRENT.with(|cell| cell.get().has_remaining()) +} + cfg_rt_multi_thread! { /// Sets the current task's budget. pub(crate) fn set(budget: Budget) { CURRENT.with(|cell| cell.set(budget)) } - - #[inline(always)] - pub(crate) fn has_budget_remaining() -> bool { - CURRENT.with(|cell| cell.get().has_remaining()) - } } cfg_rt! { diff --git a/tokio/src/io/async_fd.rs b/tokio/src/io/async_fd.rs index 9ec5b7f2387..93f9cb458a7 100644 --- a/tokio/src/io/async_fd.rs +++ b/tokio/src/io/async_fd.rs @@ -81,6 +81,7 @@ use std::{task::Context, task::Poll}; /// /// impl AsyncTcpStream { /// pub fn new(tcp: TcpStream) -> io::Result { +/// tcp.set_nonblocking(true)?; /// Ok(Self { /// inner: AsyncFd::new(tcp)?, /// }) @@ -525,7 +526,7 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> { #[cfg_attr(docsrs, doc(alias = "with_io"))] pub fn try_io( &mut self, - f: impl FnOnce(&AsyncFd) -> io::Result, + f: impl FnOnce(&'a AsyncFd) -> io::Result, ) -> Result, TryIoError> { let result = f(self.async_fd); @@ -542,12 +543,12 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> { } /// Returns a shared reference to the inner [`AsyncFd`]. - pub fn get_ref(&self) -> &AsyncFd { + pub fn get_ref(&self) -> &'a AsyncFd { self.async_fd } /// Returns a shared reference to the backing object of the inner [`AsyncFd`]. - pub fn get_inner(&self) -> &Inner { + pub fn get_inner(&self) -> &'a Inner { self.get_ref().get_ref() } } @@ -598,7 +599,7 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyMutGuard<'a, Inner> { &mut self, f: impl FnOnce(&mut AsyncFd) -> io::Result, ) -> Result, TryIoError> { - let result = f(&mut self.async_fd); + let result = f(self.async_fd); if let Err(e) = result.as_ref() { if e.kind() == io::ErrorKind::WouldBlock { diff --git a/tokio/src/io/poll_evented.rs b/tokio/src/io/poll_evented.rs index 44e68a2a9a0..ce4c1426acc 100644 --- a/tokio/src/io/poll_evented.rs +++ b/tokio/src/io/poll_evented.rs @@ -4,6 +4,7 @@ use mio::event::Source; use std::fmt; use std::io; use std::ops::Deref; +use std::panic::{RefUnwindSafe, UnwindSafe}; cfg_io_driver! { /// Associates an I/O resource that implements the [`std::io::Read`] and/or @@ -185,6 +186,10 @@ feature! { } } +impl UnwindSafe for PollEvented {} + +impl RefUnwindSafe for PollEvented {} + impl Deref for PollEvented { type Target = E; diff --git a/tokio/src/io/read_buf.rs b/tokio/src/io/read_buf.rs index ad58cbe757b..8c34ae6c817 100644 --- a/tokio/src/io/read_buf.rs +++ b/tokio/src/io/read_buf.rs @@ -1,9 +1,5 @@ -// This lint claims ugly casting is somehow safer than transmute, but there's -// no evidence that is the case. Shush. -#![allow(clippy::transmute_ptr_to_ptr)] - use std::fmt; -use std::mem::{self, MaybeUninit}; +use std::mem::MaybeUninit; /// A wrapper around a byte buffer that is incrementally filled and initialized. /// @@ -35,7 +31,7 @@ impl<'a> ReadBuf<'a> { #[inline] pub fn new(buf: &'a mut [u8]) -> ReadBuf<'a> { let initialized = buf.len(); - let buf = unsafe { mem::transmute::<&mut [u8], &mut [MaybeUninit]>(buf) }; + let buf = unsafe { slice_to_uninit_mut(buf) }; ReadBuf { buf, filled: 0, @@ -67,8 +63,7 @@ impl<'a> ReadBuf<'a> { let slice = &self.buf[..self.filled]; // safety: filled describes how far into the buffer that the // user has filled with bytes, so it's been initialized. - // TODO: This could use `MaybeUninit::slice_get_ref` when it is stable. - unsafe { mem::transmute::<&[MaybeUninit], &[u8]>(slice) } + unsafe { slice_assume_init(slice) } } /// Returns a mutable reference to the filled portion of the buffer. @@ -77,8 +72,7 @@ impl<'a> ReadBuf<'a> { let slice = &mut self.buf[..self.filled]; // safety: filled describes how far into the buffer that the // user has filled with bytes, so it's been initialized. - // TODO: This could use `MaybeUninit::slice_get_mut` when it is stable. - unsafe { mem::transmute::<&mut [MaybeUninit], &mut [u8]>(slice) } + unsafe { slice_assume_init_mut(slice) } } /// Returns a new `ReadBuf` comprised of the unfilled section up to `n`. @@ -97,8 +91,7 @@ impl<'a> ReadBuf<'a> { let slice = &self.buf[..self.initialized]; // safety: initialized describes how far into the buffer that the // user has at some point initialized with bytes. - // TODO: This could use `MaybeUninit::slice_get_ref` when it is stable. - unsafe { mem::transmute::<&[MaybeUninit], &[u8]>(slice) } + unsafe { slice_assume_init(slice) } } /// Returns a mutable reference to the initialized portion of the buffer. @@ -109,15 +102,14 @@ impl<'a> ReadBuf<'a> { let slice = &mut self.buf[..self.initialized]; // safety: initialized describes how far into the buffer that the // user has at some point initialized with bytes. - // TODO: This could use `MaybeUninit::slice_get_mut` when it is stable. - unsafe { mem::transmute::<&mut [MaybeUninit], &mut [u8]>(slice) } + unsafe { slice_assume_init_mut(slice) } } /// Returns a mutable reference to the entire buffer, without ensuring that it has been fully /// initialized. /// /// The elements between 0 and `self.filled().len()` are filled, and those between 0 and - /// `self.initialized().len()` are initialized (and so can be transmuted to a `&mut [u8]`). + /// `self.initialized().len()` are initialized (and so can be converted to a `&mut [u8]`). /// /// The caller of this method must ensure that these invariants are upheld. For example, if the /// caller initializes some of the uninitialized section of the buffer, it must call @@ -178,7 +170,7 @@ impl<'a> ReadBuf<'a> { let slice = &mut self.buf[self.filled..end]; // safety: just above, we checked that the end of the buf has // been initialized to some value. - unsafe { mem::transmute::<&mut [MaybeUninit], &mut [u8]>(slice) } + unsafe { slice_assume_init_mut(slice) } } /// Returns the number of bytes at the end of the slice that have not yet been filled. @@ -283,3 +275,17 @@ impl fmt::Debug for ReadBuf<'_> { .finish() } } + +unsafe fn slice_to_uninit_mut(slice: &mut [u8]) -> &mut [MaybeUninit] { + &mut *(slice as *mut [u8] as *mut [MaybeUninit]) +} + +// TODO: This could use `MaybeUninit::slice_assume_init` when it is stable. +unsafe fn slice_assume_init(slice: &[MaybeUninit]) -> &[u8] { + &*(slice as *const [MaybeUninit] as *const [u8]) +} + +// TODO: This could use `MaybeUninit::slice_assume_init_mut` when it is stable. +unsafe fn slice_assume_init_mut(slice: &mut [MaybeUninit]) -> &mut [u8] { + &mut *(slice as *mut [MaybeUninit] as *mut [u8]) +} diff --git a/tokio/src/io/util/buf_reader.rs b/tokio/src/io/util/buf_reader.rs index 7df610b143a..60879c0fdc2 100644 --- a/tokio/src/io/util/buf_reader.rs +++ b/tokio/src/io/util/buf_reader.rs @@ -204,7 +204,6 @@ impl AsyncSeek for BufReader { self.as_mut() .get_pin_mut() .start_seek(SeekFrom::Current(offset))?; - self.as_mut().get_pin_mut().poll_complete(cx)? } else { // seek backwards by our remainder, and then by the offset self.as_mut() @@ -221,8 +220,8 @@ impl AsyncSeek for BufReader { self.as_mut() .get_pin_mut() .start_seek(SeekFrom::Current(n))?; - self.as_mut().get_pin_mut().poll_complete(cx)? } + self.as_mut().get_pin_mut().poll_complete(cx)? } SeekState::PendingOverflowed(n) => { if self.as_mut().get_pin_mut().poll_complete(cx)?.is_pending() { diff --git a/tokio/src/io/util/empty.rs b/tokio/src/io/util/empty.rs index f964d18e6ef..77db60e40b4 100644 --- a/tokio/src/io/util/empty.rs +++ b/tokio/src/io/util/empty.rs @@ -50,16 +50,18 @@ impl AsyncRead for Empty { #[inline] fn poll_read( self: Pin<&mut Self>, - _: &mut Context<'_>, + cx: &mut Context<'_>, _: &mut ReadBuf<'_>, ) -> Poll> { + ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(())) } } impl AsyncBufRead for Empty { #[inline] - fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(&[])) } @@ -73,6 +75,20 @@ impl fmt::Debug for Empty { } } +cfg_coop! { + fn poll_proceed_and_make_progress(cx: &mut Context<'_>) -> Poll<()> { + let coop = ready!(crate::coop::poll_proceed(cx)); + coop.made_progress(); + Poll::Ready(()) + } +} + +cfg_not_coop! { + fn poll_proceed_and_make_progress(_: &mut Context<'_>) -> Poll<()> { + Poll::Ready(()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/tokio/src/io/util/read_exact.rs b/tokio/src/io/util/read_exact.rs index 1e8150eb20f..dbdd58bae99 100644 --- a/tokio/src/io/util/read_exact.rs +++ b/tokio/src/io/util/read_exact.rs @@ -51,13 +51,13 @@ where type Output = io::Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut me = self.project(); + let me = self.project(); loop { // if our buffer is empty, then we need to read some data to continue. let rem = me.buf.remaining(); if rem != 0 { - ready!(Pin::new(&mut *me.reader).poll_read(cx, &mut me.buf))?; + ready!(Pin::new(&mut *me.reader).poll_read(cx, me.buf))?; if me.buf.remaining() == rem { return Err(eof()).into(); } diff --git a/tokio/src/io/util/write_all.rs b/tokio/src/io/util/write_all.rs index e59d41e4d7b..abd3e39d310 100644 --- a/tokio/src/io/util/write_all.rs +++ b/tokio/src/io/util/write_all.rs @@ -42,7 +42,7 @@ where while !me.buf.is_empty() { let n = ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf))?; { - let (_, rest) = mem::replace(&mut *me.buf, &[]).split_at(n); + let (_, rest) = mem::take(&mut *me.buf).split_at(n); *me.buf = rest; } if n == 0 { diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 9821c1a62f5..01878524e13 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -10,16 +10,11 @@ unreachable_pub )] #![deny(unused_must_use)] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] #![cfg_attr(docsrs, feature(doc_cfg))] -#![cfg_attr(docsrs, feature(doc_cfg_hide))] -#![cfg_attr(docsrs, doc(cfg_hide(docsrs)))] -#![cfg_attr(docsrs, doc(cfg_hide(loom)))] -#![cfg_attr(docsrs, doc(cfg_hide(not(loom))))] #![cfg_attr(docsrs, allow(unused_attributes))] //! A runtime for writing reliable network applications without compromising speed. @@ -312,7 +307,7 @@ //! Beware though that this will pull in many extra dependencies that you may not //! need. //! -//! - `full`: Enables all Tokio public API features listed below except `test-util`. +//! - `full`: Enables all features listed below except `test-util` and `tracing`. //! - `rt`: Enables `tokio::spawn`, the basic (current thread) scheduler, //! and non-scheduler utilities. //! - `rt-multi-thread`: Enables the heavier, multi-threaded, work-stealing scheduler. @@ -351,6 +346,7 @@ //! `RUSTFLAGS="--cfg tokio_unstable"`. //! //! - `tracing`: Enables tracing events. +//! - `stats`: Enables runtime stats collection. ([RFC](https://github.com/tokio-rs/tokio/pull/3845)) //! //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section diff --git a/tokio/src/loom/std/parking_lot.rs b/tokio/src/loom/std/parking_lot.rs index 8448bed53d7..034a0ce69a5 100644 --- a/tokio/src/loom/std/parking_lot.rs +++ b/tokio/src/loom/std/parking_lot.rs @@ -3,83 +3,143 @@ //! //! This can be extended to additional types/methods as required. +use std::fmt; +use std::marker::PhantomData; +use std::ops::{Deref, DerefMut}; use std::sync::LockResult; use std::time::Duration; +// All types in this file are marked with PhantomData to ensure that +// parking_lot's send_guard feature does not leak through and affect when Tokio +// types are Send. +// +// See for more info. + // Types that do not need wrapping -pub(crate) use parking_lot::{MutexGuard, RwLockReadGuard, RwLockWriteGuard, WaitTimeoutResult}; +pub(crate) use parking_lot::WaitTimeoutResult; + +#[derive(Debug)] +pub(crate) struct Mutex(PhantomData>, parking_lot::Mutex); + +#[derive(Debug)] +pub(crate) struct RwLock(PhantomData>, parking_lot::RwLock); + +#[derive(Debug)] +pub(crate) struct Condvar(PhantomData, parking_lot::Condvar); -/// Adapter for `parking_lot::Mutex` to the `std::sync::Mutex` interface. #[derive(Debug)] -pub(crate) struct Mutex(parking_lot::Mutex); +pub(crate) struct MutexGuard<'a, T: ?Sized>( + PhantomData>, + parking_lot::MutexGuard<'a, T>, +); #[derive(Debug)] -pub(crate) struct RwLock(parking_lot::RwLock); +pub(crate) struct RwLockReadGuard<'a, T: ?Sized>( + PhantomData>, + parking_lot::RwLockReadGuard<'a, T>, +); -/// Adapter for `parking_lot::Condvar` to the `std::sync::Condvar` interface. #[derive(Debug)] -pub(crate) struct Condvar(parking_lot::Condvar); +pub(crate) struct RwLockWriteGuard<'a, T: ?Sized>( + PhantomData>, + parking_lot::RwLockWriteGuard<'a, T>, +); impl Mutex { #[inline] pub(crate) fn new(t: T) -> Mutex { - Mutex(parking_lot::Mutex::new(t)) + Mutex(PhantomData, parking_lot::Mutex::new(t)) } #[inline] #[cfg(all(feature = "parking_lot", not(all(loom, test)),))] #[cfg_attr(docsrs, doc(cfg(all(feature = "parking_lot",))))] pub(crate) const fn const_new(t: T) -> Mutex { - Mutex(parking_lot::const_mutex(t)) + Mutex(PhantomData, parking_lot::const_mutex(t)) } #[inline] pub(crate) fn lock(&self) -> MutexGuard<'_, T> { - self.0.lock() + MutexGuard(PhantomData, self.1.lock()) } #[inline] pub(crate) fn try_lock(&self) -> Option> { - self.0.try_lock() + self.1 + .try_lock() + .map(|guard| MutexGuard(PhantomData, guard)) } #[inline] pub(crate) fn get_mut(&mut self) -> &mut T { - self.0.get_mut() + self.1.get_mut() } // Note: Additional methods `is_poisoned` and `into_inner`, can be // provided here as needed. } +impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + self.1.deref() + } +} + +impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + self.1.deref_mut() + } +} + impl RwLock { pub(crate) fn new(t: T) -> RwLock { - RwLock(parking_lot::RwLock::new(t)) + RwLock(PhantomData, parking_lot::RwLock::new(t)) } pub(crate) fn read(&self) -> LockResult> { - Ok(self.0.read()) + Ok(RwLockReadGuard(PhantomData, self.1.read())) } pub(crate) fn write(&self) -> LockResult> { - Ok(self.0.write()) + Ok(RwLockWriteGuard(PhantomData, self.1.write())) + } +} + +impl<'a, T: ?Sized> Deref for RwLockReadGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + self.1.deref() + } +} + +impl<'a, T: ?Sized> Deref for RwLockWriteGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + self.1.deref() + } +} + +impl<'a, T: ?Sized> DerefMut for RwLockWriteGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + self.1.deref_mut() } } impl Condvar { #[inline] pub(crate) fn new() -> Condvar { - Condvar(parking_lot::Condvar::new()) + Condvar(PhantomData, parking_lot::Condvar::new()) } #[inline] pub(crate) fn notify_one(&self) { - self.0.notify_one(); + self.1.notify_one(); } #[inline] pub(crate) fn notify_all(&self) { - self.0.notify_all(); + self.1.notify_all(); } #[inline] @@ -87,7 +147,7 @@ impl Condvar { &self, mut guard: MutexGuard<'a, T>, ) -> LockResult> { - self.0.wait(&mut guard); + self.1.wait(&mut guard.1); Ok(guard) } @@ -97,10 +157,28 @@ impl Condvar { mut guard: MutexGuard<'a, T>, timeout: Duration, ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> { - let wtr = self.0.wait_for(&mut guard, timeout); + let wtr = self.1.wait_for(&mut guard.1, timeout); Ok((guard, wtr)) } // Note: Additional methods `wait_timeout_ms`, `wait_timeout_until`, // `wait_until` can be provided here as needed. } + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.1, f) + } +} + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.1, f) + } +} + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.1, f) + } +} diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index 606bce7689d..9fa30ca27d6 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -99,7 +99,6 @@ macro_rules! cfg_io_driver_impl { feature = "process", all(unix, feature = "signal"), ))] - #[cfg_attr(docsrs, doc(cfg(all())))] $item )* } @@ -179,7 +178,7 @@ macro_rules! cfg_stats { ($($item:item)*) => { $( #[cfg(all(tokio_unstable, feature = "stats"))] - #[cfg_attr(docsrs, doc(cfg(feature = "stats")))] + #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "stats"))))] $item )* } @@ -366,10 +365,10 @@ macro_rules! cfg_trace { ($($item:item)*) => { $( #[cfg(all(tokio_unstable, feature = "tracing"))] - #[cfg_attr(docsrs, doc(cfg(feature = "tracing")))] + #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] $item )* - } + }; } macro_rules! cfg_not_trace { diff --git a/tokio/src/macros/trace.rs b/tokio/src/macros/trace.rs index 31dde2f255a..80a257e1899 100644 --- a/tokio/src/macros/trace.rs +++ b/tokio/src/macros/trace.rs @@ -1,9 +1,8 @@ cfg_trace! { macro_rules! trace_op { - ($name:literal, $readiness:literal, $parent:expr) => { + ($name:expr, $readiness:literal) => { tracing::trace!( target: "runtime::resource::poll_op", - parent: $parent, op_name = $name, is_ready = $readiness ); @@ -11,14 +10,14 @@ cfg_trace! { } macro_rules! trace_poll_op { - ($name:literal, $poll:expr, $parent:expr $(,)*) => { + ($name:expr, $poll:expr $(,)*) => { match $poll { std::task::Poll::Ready(t) => { - trace_op!($name, true, $parent); + trace_op!($name, true); std::task::Poll::Ready(t) } std::task::Poll::Pending => { - trace_op!($name, false, $parent); + trace_op!($name, false); return std::task::Poll::Pending; } } diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index fb75f75a5b8..3c6870221c2 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -8,6 +8,7 @@ use std::net::SocketAddr; use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; #[cfg(windows)] use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket}; +use std::time::Duration; cfg_net! { /// A TCP socket that has not yet been converted to a `TcpStream` or @@ -349,6 +350,28 @@ impl TcpSocket { self.inner.get_recv_buffer_size() } + /// Sets the linger duration of this socket by setting the SO_LINGER option. + /// + /// This option controls the action taken when a stream has unsent messages and the stream is + /// closed. If SO_LINGER is set, the system shall block the process until it can transmit the + /// data or until the time expires. + /// + /// If SO_LINGER is not specified, and the socket is closed, the system handles the call in a + /// way that allows the process to continue as quickly as possible. + pub fn set_linger(&self, dur: Option) -> io::Result<()> { + self.inner.set_linger(dur) + } + + /// Reads the linger duration for this socket by getting the `SO_LINGER` + /// option. + /// + /// For more information about this option, see [`set_linger`]. + /// + /// [`set_linger`]: TcpSocket::set_linger + pub fn linger(&self) -> io::Result> { + self.inner.get_linger() + } + /// Gets the local address of this socket. /// /// Will fail on windows if called before `bind`. diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index 504d74eb491..12af5152c28 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -128,6 +128,10 @@ impl UdpSocket { /// This function will create a new UDP socket and attempt to bind it to /// the `addr` provided. /// + /// Binding with a port number of 0 will request that the OS assigns a port + /// to this listener. The port allocated can be queried via the `local_addr` + /// method. + /// /// # Example /// /// ```no_run diff --git a/tokio/src/process/mod.rs b/tokio/src/process/mod.rs index 6eeefdbef71..67d43b4d80e 100644 --- a/tokio/src/process/mod.rs +++ b/tokio/src/process/mod.rs @@ -264,6 +264,12 @@ impl Command { Self::from(StdCommand::new(program)) } + /// Cheaply convert to a `&std::process::Command` for places where the type from the standard + /// library is expected. + pub fn as_std(&self) -> &StdCommand { + &self.std + } + /// Adds an argument to pass to the program. /// /// Only one argument can be passed per use. So instead of: @@ -1124,19 +1130,26 @@ impl Child { pub async fn wait_with_output(mut self) -> io::Result { use crate::future::try_join3; - async fn read_to_end(io: Option) -> io::Result> { + async fn read_to_end(io: &mut Option) -> io::Result> { let mut vec = Vec::new(); - if let Some(mut io) = io { - crate::io::util::read_to_end(&mut io, &mut vec).await?; + if let Some(io) = io.as_mut() { + crate::io::util::read_to_end(io, &mut vec).await?; } Ok(vec) } - let stdout_fut = read_to_end(self.stdout.take()); - let stderr_fut = read_to_end(self.stderr.take()); + let mut stdout_pipe = self.stdout.take(); + let mut stderr_pipe = self.stderr.take(); + + let stdout_fut = read_to_end(&mut stdout_pipe); + let stderr_fut = read_to_end(&mut stderr_pipe); let (status, stdout, stderr) = try_join3(self.wait(), stdout_fut, stderr_fut).await?; + // Drop happens after `try_join` due to + drop(stdout_pipe); + drop(stderr_pipe); + Ok(Output { status, stdout, diff --git a/tokio/src/runtime/basic_scheduler.rs b/tokio/src/runtime/basic_scheduler.rs index 872d0d5b897..f70fa656925 100644 --- a/tokio/src/runtime/basic_scheduler.rs +++ b/tokio/src/runtime/basic_scheduler.rs @@ -3,10 +3,12 @@ use crate::loom::sync::atomic::AtomicBool; use crate::loom::sync::Mutex; use crate::park::{Park, Unpark}; use crate::runtime::context::EnterGuard; +use crate::runtime::driver::Driver; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task}; use crate::runtime::Callback; use crate::sync::notify::Notify; +use crate::util::atomic_cell::AtomicCell; use crate::util::{waker_ref, Wake, WakerRef}; use std::cell::RefCell; @@ -19,13 +21,12 @@ use std::task::Poll::{Pending, Ready}; use std::time::Duration; /// Executes tasks on the current thread -pub(crate) struct BasicScheduler { - /// Inner state guarded by a mutex that is shared - /// between all `block_on` calls. - inner: Mutex>>, +pub(crate) struct BasicScheduler { + /// Core scheduler data is acquired by a thread entering `block_on`. + core: AtomicCell, /// Notifier for waking up other threads to steal the - /// parker. + /// driver. notify: Notify, /// Sendable task spawner @@ -38,15 +39,11 @@ pub(crate) struct BasicScheduler { context_guard: Option, } -/// The inner scheduler that owns the task queue and the main parker P. -struct Inner { +/// Data required for executing the scheduler. The struct is passed around to +/// a function that will perform the scheduling work and acts as a capability token. +struct Core { /// Scheduler run queue - /// - /// When the scheduler is executed, the queue is removed from `self` and - /// moved into `Context`. - /// - /// This indirection is to allow `BasicScheduler` to be `Send`. - tasks: Option, + tasks: VecDeque>>, /// Sendable task spawner spawner: Spawner, @@ -54,13 +51,10 @@ struct Inner { /// Current tick tick: u8, - /// Thread park handle - park: P, - - /// Callback for a worker parking itself - before_park: Option, - /// Callback for a worker unparking itself - after_unpark: Option, + /// Runtime driver + /// + /// The driver is removed before starting to park the thread + driver: Option, /// Stats batcher stats: WorkerStatsBatcher, @@ -71,13 +65,6 @@ pub(crate) struct Spawner { shared: Arc, } -struct Tasks { - /// Local run queue. - /// - /// Tasks notified from the current thread are pushed into this queue. - queue: VecDeque>>, -} - /// A remote scheduler entry. /// /// These are filled in by remote threads sending instructions to the scheduler. @@ -100,22 +87,29 @@ struct Shared { owned: OwnedTasks>, /// Unpark the blocked thread. - unpark: Box, + unpark: ::Unpark, /// Indicates whether the blocked on thread was woken. woken: AtomicBool, + /// Callback for a worker parking itself + before_park: Option, + + /// Callback for a worker unparking itself + after_unpark: Option, + /// Keeps track of various runtime stats. stats: RuntimeStats, } /// Thread-local context. struct Context { - /// Shared scheduler state - shared: Arc, + /// Handle to the spawner + spawner: Spawner, - /// Local queue - tasks: RefCell, + /// Scheduler core, enabling the holder of `Context` to execute the + /// scheduler. + core: RefCell>>, } /// Initial queue capacity. @@ -133,38 +127,36 @@ const REMOTE_FIRST_INTERVAL: u8 = 31; // Tracks the current BasicScheduler. scoped_thread_local!(static CURRENT: Context); -impl BasicScheduler

{ +impl BasicScheduler { pub(crate) fn new( - park: P, + driver: Driver, before_park: Option, after_unpark: Option, - ) -> BasicScheduler

{ - let unpark = Box::new(park.unpark()); + ) -> BasicScheduler { + let unpark = driver.unpark(); let spawner = Spawner { shared: Arc::new(Shared { queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))), owned: OwnedTasks::new(), - unpark: unpark as Box, + unpark, woken: AtomicBool::new(false), + before_park, + after_unpark, stats: RuntimeStats::new(1), }), }; - let inner = Mutex::new(Some(Inner { - tasks: Some(Tasks { - queue: VecDeque::with_capacity(INITIAL_CAPACITY), - }), + let core = AtomicCell::new(Some(Box::new(Core { + tasks: VecDeque::with_capacity(INITIAL_CAPACITY), spawner: spawner.clone(), tick: 0, - park, - before_park, - after_unpark, + driver: Some(driver), stats: WorkerStatsBatcher::new(0), - })); + }))); BasicScheduler { - inner, + core, notify: Notify::new(), spawner, context_guard: None, @@ -178,12 +170,12 @@ impl BasicScheduler

{ pub(crate) fn block_on(&self, future: F) -> F::Output { pin!(future); - // Attempt to steal the dedicated parker and block_on the future if we can there, - // otherwise, lets select on a notification that the parker is available - // or the future is complete. + // Attempt to steal the scheduler core and block_on the future if we can + // there, otherwise, lets select on a notification that the core is + // available or the future is complete. loop { - if let Some(inner) = &mut self.take_inner() { - return inner.block_on(future); + if let Some(core) = self.take_core() { + return core.block_on(future); } else { let mut enter = crate::runtime::enter(false); @@ -210,11 +202,14 @@ impl BasicScheduler

{ } } - fn take_inner(&self) -> Option> { - let inner = self.inner.lock().take()?; + fn take_core(&self) -> Option> { + let core = self.core.take()?; - Some(InnerGuard { - inner: Some(inner), + Some(CoreGuard { + context: Context { + spawner: self.spawner.clone(), + core: RefCell::new(Some(core)), + }, basic_scheduler: self, }) } @@ -224,156 +219,109 @@ impl BasicScheduler

{ } } -impl Inner

{ - /// Blocks on the provided future and drives the runtime's driver. - fn block_on(&mut self, future: F) -> F::Output { - enter(self, |scheduler, context| { - let _enter = crate::runtime::enter(false); - let waker = scheduler.spawner.waker_ref(); - let mut cx = std::task::Context::from_waker(&waker); - - pin!(future); - - 'outer: loop { - if scheduler.spawner.reset_woken() { - scheduler.stats.incr_poll_count(); - if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) { - return v; - } - } +impl Context { + /// Execute the closure with the given scheduler core stored in the + /// thread-local context. + fn run_task(&self, mut core: Box, f: impl FnOnce() -> R) -> (Box, R) { + core.stats.incr_poll_count(); + self.enter(core, || crate::coop::budget(f)) + } - for _ in 0..MAX_TASKS_PER_TICK { - // Get and increment the current tick - let tick = scheduler.tick; - scheduler.tick = scheduler.tick.wrapping_add(1); + /// Blocks the current thread until an event is received by the driver, + /// including I/O events, timer events, ... + fn park(&self, mut core: Box) -> Box { + let mut driver = core.driver.take().expect("driver missing"); + + if let Some(f) = &self.spawner.shared.before_park { + // Incorrect lint, the closures are actually different types so `f` + // cannot be passed as an argument to `enter`. + #[allow(clippy::redundant_closure)] + let (c, _) = self.enter(core, || f()); + core = c; + } - let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { - scheduler.spawner.pop().or_else(|| { - context - .tasks - .borrow_mut() - .queue - .pop_front() - .map(RemoteMsg::Schedule) - }) - } else { - context - .tasks - .borrow_mut() - .queue - .pop_front() - .map(RemoteMsg::Schedule) - .or_else(|| scheduler.spawner.pop()) - }; + // This check will fail if `before_park` spawns a task for us to run + // instead of parking the thread + if core.tasks.is_empty() { + // Park until the thread is signaled + core.stats.about_to_park(); + core.stats.submit(&core.spawner.shared.stats); - let entry = match entry { - Some(entry) => entry, - None => { - if let Some(f) = &scheduler.before_park { - f(); - } - // This check will fail if `before_park` spawns a task for us to run - // instead of parking the thread - if context.tasks.borrow_mut().queue.is_empty() { - // Park until the thread is signaled - scheduler.stats.about_to_park(); - scheduler.stats.submit(&scheduler.spawner.shared.stats); - scheduler.park.park().expect("failed to park"); - scheduler.stats.returned_from_park(); - } - if let Some(f) = &scheduler.after_unpark { - f(); - } + let (c, _) = self.enter(core, || { + driver.park().expect("failed to park"); + }); - // Try polling the `block_on` future next - continue 'outer; - } - }; + core = c; + core.stats.returned_from_park(); + } - match entry { - RemoteMsg::Schedule(task) => { - scheduler.stats.incr_poll_count(); - let task = context.shared.owned.assert_owner(task); - crate::coop::budget(|| task.run()) - } - } - } + if let Some(f) = &self.spawner.shared.after_unpark { + // Incorrect lint, the closures are actually different types so `f` + // cannot be passed as an argument to `enter`. + #[allow(clippy::redundant_closure)] + let (c, _) = self.enter(core, || f()); + core = c; + } - // Yield to the park, this drives the timer and pulls any pending - // I/O events. - scheduler.stats.submit(&scheduler.spawner.shared.stats); - scheduler - .park - .park_timeout(Duration::from_millis(0)) - .expect("failed to park"); - } - }) + core.driver = Some(driver); + core } -} -/// Enters the scheduler context. This sets the queue and other necessary -/// scheduler state in the thread-local. -fn enter(scheduler: &mut Inner

, f: F) -> R -where - F: FnOnce(&mut Inner

, &Context) -> R, - P: Park, -{ - // Ensures the run queue is placed back in the `BasicScheduler` instance - // once `block_on` returns.` - struct Guard<'a, P: Park> { - context: Option, - scheduler: &'a mut Inner

, - } + /// Checks the driver for new events without blocking the thread. + fn park_yield(&self, mut core: Box) -> Box { + let mut driver = core.driver.take().expect("driver missing"); - impl Drop for Guard<'_, P> { - fn drop(&mut self) { - let Context { tasks, .. } = self.context.take().expect("context missing"); - self.scheduler.tasks = Some(tasks.into_inner()); - } - } + core.stats.submit(&core.spawner.shared.stats); + let (mut core, _) = self.enter(core, || { + driver + .park_timeout(Duration::from_millis(0)) + .expect("failed to park"); + }); - // Remove `tasks` from `self` and place it in a `Context`. - let tasks = scheduler.tasks.take().expect("invalid state"); + core.driver = Some(driver); + core + } - let guard = Guard { - context: Some(Context { - shared: scheduler.spawner.shared.clone(), - tasks: RefCell::new(tasks), - }), - scheduler, - }; + fn enter(&self, core: Box, f: impl FnOnce() -> R) -> (Box, R) { + // Store the scheduler core in the thread-local context + // + // A drop-guard is employed at a higher level. + *self.core.borrow_mut() = Some(core); - let context = guard.context.as_ref().unwrap(); - let scheduler = &mut *guard.scheduler; + // Execute the closure while tracking the execution budget + let ret = f(); - CURRENT.set(context, || f(scheduler, context)) + // Take the scheduler core back + let core = self.core.borrow_mut().take().expect("core missing"); + (core, ret) + } } -impl Drop for BasicScheduler

{ +impl Drop for BasicScheduler { fn drop(&mut self) { // Avoid a double panic if we are currently panicking and // the lock may be poisoned. - let mut inner = match self.inner.lock().take() { - Some(inner) => inner, + let core = match self.take_core() { + Some(core) => core, None if std::thread::panicking() => return, - None => panic!("Oh no! We never placed the Inner state back, this is a bug!"), + None => panic!("Oh no! We never placed the Core back, this is a bug!"), }; - enter(&mut inner, |scheduler, context| { + core.enter(|mut core, context| { // Drain the OwnedTasks collection. This call also closes the // collection, ensuring that no tasks are ever pushed after this // call returns. - context.shared.owned.close_and_shutdown_all(); + context.spawner.shared.owned.close_and_shutdown_all(); // Drain local queue // We already shut down every task, so we just need to drop the task. - for task in context.tasks.borrow_mut().queue.drain(..) { + while let Some(task) = core.tasks.pop_front() { drop(task); } // Drain remote queue and set it to None - let remote_queue = scheduler.spawner.shared.queue.lock().take(); + let remote_queue = core.spawner.shared.queue.lock().take(); // Using `Option::take` to replace the shared queue with `None`. // We already shut down every task, so we just need to drop the task. @@ -387,12 +335,14 @@ impl Drop for BasicScheduler

{ } } - assert!(context.shared.owned.is_empty()); + assert!(context.spawner.shared.owned.is_empty()); + + (core, ()) }); } } -impl fmt::Debug for BasicScheduler

{ +impl fmt::Debug for BasicScheduler { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BasicScheduler").finish() } @@ -455,8 +405,14 @@ impl Schedule for Arc { fn schedule(&self, task: task::Notified) { CURRENT.with(|maybe_cx| match maybe_cx { - Some(cx) if Arc::ptr_eq(self, &cx.shared) => { - cx.tasks.borrow_mut().queue.push_back(task); + Some(cx) if Arc::ptr_eq(self, &cx.spawner.shared) => { + let mut core = cx.core.borrow_mut(); + + // If `None`, the runtime is shutting down, so there is no need + // to schedule the task. + if let Some(core) = core.as_mut() { + core.tasks.push_back(task); + } } _ => { // If the queue is None, then the runtime has shut down. We @@ -484,35 +440,107 @@ impl Wake for Shared { } } -// ===== InnerGuard ===== +// ===== CoreGuard ===== -/// Used to ensure we always place the Inner value -/// back into its slot in `BasicScheduler`, even if the -/// future panics. -struct InnerGuard<'a, P: Park> { - inner: Option>, - basic_scheduler: &'a BasicScheduler

, +/// Used to ensure we always place the `Core` value back into its slot in +/// `BasicScheduler`, even if the future panics. +struct CoreGuard<'a> { + context: Context, + basic_scheduler: &'a BasicScheduler, } -impl InnerGuard<'_, P> { - fn block_on(&mut self, future: F) -> F::Output { - // The only time inner gets set to `None` is if we have dropped - // already so this unwrap is safe. - self.inner.as_mut().unwrap().block_on(future) +impl CoreGuard<'_> { + fn block_on(self, future: F) -> F::Output { + self.enter(|mut core, context| { + let _enter = crate::runtime::enter(false); + let waker = context.spawner.waker_ref(); + let mut cx = std::task::Context::from_waker(&waker); + + pin!(future); + + 'outer: loop { + if core.spawner.reset_woken() { + let (c, res) = context.run_task(core, || future.as_mut().poll(&mut cx)); + + core = c; + + if let Ready(v) = res { + return (core, v); + } + } + + for _ in 0..MAX_TASKS_PER_TICK { + // Get and increment the current tick + let tick = core.tick; + core.tick = core.tick.wrapping_add(1); + + let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { + core.spawner + .pop() + .or_else(|| core.tasks.pop_front().map(RemoteMsg::Schedule)) + } else { + core.tasks + .pop_front() + .map(RemoteMsg::Schedule) + .or_else(|| core.spawner.pop()) + }; + + let entry = match entry { + Some(entry) => entry, + None => { + core = context.park(core); + + // Try polling the `block_on` future next + continue 'outer; + } + }; + + match entry { + RemoteMsg::Schedule(task) => { + let task = context.spawner.shared.owned.assert_owner(task); + + let (c, _) = context.run_task(core, || { + task.run(); + }); + + core = c; + } + } + } + + // Yield to the driver, this drives the timer and pulls any + // pending I/O events. + core = context.park_yield(core); + } + }) + } + + /// Enters the scheduler context. This sets the queue and other necessary + /// scheduler state in the thread-local. + fn enter(self, f: F) -> R + where + F: FnOnce(Box, &Context) -> (Box, R), + { + // Remove `core` from `context` to pass into the closure. + let core = self.context.core.borrow_mut().take().expect("core missing"); + + // Call the closure and place `core` back + let (core, ret) = CURRENT.set(&self.context, || f(core, &self.context)); + + *self.context.core.borrow_mut() = Some(core); + + ret } } -impl Drop for InnerGuard<'_, P> { +impl Drop for CoreGuard<'_> { fn drop(&mut self) { - if let Some(scheduler) = self.inner.take() { - let mut lock = self.basic_scheduler.inner.lock(); - + if let Some(core) = self.context.core.borrow_mut().take() { // Replace old scheduler back into the state to allow // other threads to pick it up and drive it. - lock.replace(scheduler); + self.basic_scheduler.core.set(core); - // Wake up other possible threads that could steal - // the dedicated parker P. + // Wake up other possible threads that could steal the driver. self.basic_scheduler.notify.notify_one() } } diff --git a/tokio/src/runtime/blocking/pool.rs b/tokio/src/runtime/blocking/pool.rs index 77ab4958683..bb6c1ee6606 100644 --- a/tokio/src/runtime/blocking/pool.rs +++ b/tokio/src/runtime/blocking/pool.rs @@ -244,7 +244,7 @@ impl Spawner { rt.blocking_spawner.inner.run(id); drop(shutdown_tx); }) - .unwrap() + .expect("OS can't spawn a new worker thread") } } diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index cd1cb760a3b..612205cccfa 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -26,7 +26,11 @@ pub struct Handle { /// Handles to the signal drivers #[cfg_attr( - not(any(feature = "signal", all(unix, feature = "process"))), + any( + loom, + not(all(unix, feature = "signal")), + not(all(unix, feature = "process")), + ), allow(dead_code) )] pub(super) signal_handle: driver::SignalHandle, @@ -157,7 +161,7 @@ impl Handle { /// }); /// # } /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn(&self, future: F) -> JoinHandle where F: Future + Send + 'static, @@ -187,7 +191,7 @@ impl Handle { /// println!("now running on a worker thread"); /// }); /// # } - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_blocking(&self, func: F) -> JoinHandle where F: FnOnce() -> R + Send + 'static, @@ -200,7 +204,7 @@ impl Handle { } } - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub(crate) fn spawn_blocking_inner(&self, func: F, name: Option<&str>) -> JoinHandle where F: FnOnce() -> R + Send + 'static, @@ -211,9 +215,7 @@ impl Handle { #[cfg(all(tokio_unstable, feature = "tracing"))] let fut = { use tracing::Instrument; - #[cfg(tokio_track_caller)] let location = std::panic::Location::caller(); - #[cfg(tokio_track_caller)] let span = tracing::trace_span!( target: "tokio::task::blocking", "runtime.spawn", @@ -222,14 +224,6 @@ impl Handle { "fn" = %std::any::type_name::(), spawn.location = %format_args!("{}:{}:{}", location.file(), location.line(), location.column()), ); - #[cfg(not(tokio_track_caller))] - let span = tracing::trace_span!( - target: "tokio::task::blocking", - "runtime.spawn", - kind = %"blocking", - task.name = %name.unwrap_or_default(), - "fn" = %std::any::type_name::(), - ); fut.instrument(span) }; @@ -311,7 +305,7 @@ impl Handle { /// [`tokio::fs`]: crate::fs /// [`tokio::net`]: crate::net /// [`tokio::time`]: crate::time - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn block_on(&self, future: F) -> F::Output { #[cfg(all(tokio_unstable, feature = "tracing"))] let future = crate::util::trace::task(future, "block_on", None); diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index 96bb47c1ded..e77c5e3a0f8 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -283,7 +283,7 @@ cfg_rt! { #[derive(Debug)] enum Kind { /// Execute all tasks on the current-thread. - CurrentThread(BasicScheduler), + CurrentThread(BasicScheduler), /// Execute tasks across multiple threads. #[cfg(feature = "rt-multi-thread")] @@ -375,7 +375,7 @@ cfg_rt! { /// }); /// # } /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn(&self, future: F) -> JoinHandle where F: Future + Send + 'static, @@ -400,7 +400,7 @@ cfg_rt! { /// println!("now running on a worker thread"); /// }); /// # } - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_blocking(&self, func: F) -> JoinHandle where F: FnOnce() -> R + Send + 'static, @@ -450,7 +450,7 @@ cfg_rt! { /// ``` /// /// [handle]: fn@Handle::block_on - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn block_on(&self, future: F) -> F::Output { #[cfg(all(tokio_unstable, feature = "tracing"))] let future = crate::util::trace::task(future, "block_on", None); @@ -582,7 +582,7 @@ cfg_rt! { match self::context::try_enter(self.handle.clone()) { Some(guard) => basic.set_context_guard(guard), None => { - // The context thread-local has alread been destroyed. + // The context thread-local has already been destroyed. // // We don't set the guard in this case. Calls to tokio::spawn in task // destructors would fail regardless if this happens. diff --git a/tokio/src/runtime/stats/mod.rs b/tokio/src/runtime/stats/mod.rs index 5e08e8ec4d9..355e400602d 100644 --- a/tokio/src/runtime/stats/mod.rs +++ b/tokio/src/runtime/stats/mod.rs @@ -1,5 +1,11 @@ //! This module contains information need to view information about how the //! runtime is performing. +//! +//! **Note**: This is an [unstable API][unstable]. The public API of types in +//! this module may break in 1.x releases. See [the documentation on unstable +//! features][unstable] for details. +//! +//! [unstable]: crate#unstable-features #![allow(clippy::module_inception)] cfg_stats! { diff --git a/tokio/src/runtime/stats/stats.rs b/tokio/src/runtime/stats/stats.rs index b2bcaccaa73..375786300e7 100644 --- a/tokio/src/runtime/stats/stats.rs +++ b/tokio/src/runtime/stats/stats.rs @@ -5,12 +5,24 @@ use std::convert::TryFrom; use std::time::{Duration, Instant}; /// This type contains methods to retrieve stats from a Tokio runtime. +/// +/// **Note**: This is an [unstable API][unstable]. The public API of this type +/// may break in 1.x releases. See [the documentation on unstable +/// features][unstable] for details. +/// +/// [unstable]: crate#unstable-features #[derive(Debug)] pub struct RuntimeStats { workers: Box<[WorkerStats]>, } /// This type contains methods to retrieve stats from a worker thread on a Tokio runtime. +/// +/// **Note**: This is an [unstable API][unstable]. The public API of this type +/// may break in 1.x releases. See [the documentation on unstable +/// features][unstable] for details. +/// +/// [unstable]: crate#unstable-features #[derive(Debug)] #[repr(align(128))] pub struct WorkerStats { diff --git a/tokio/src/runtime/task/mod.rs b/tokio/src/runtime/task/mod.rs index 1f18209a6fb..0592cca1a09 100644 --- a/tokio/src/runtime/task/mod.rs +++ b/tokio/src/runtime/task/mod.rs @@ -25,7 +25,7 @@ //! //! The task uses a reference count to keep track of how many active references //! exist. The Unowned reference type takes up two ref-counts. All other -//! reference types take pu a single ref-count. +//! reference types take up a single ref-count. //! //! Besides the waker type, each task has at most one of each reference type. //! diff --git a/tokio/src/runtime/tests/loom_basic_scheduler.rs b/tokio/src/runtime/tests/loom_basic_scheduler.rs index d2894b9b27e..a772603f711 100644 --- a/tokio/src/runtime/tests/loom_basic_scheduler.rs +++ b/tokio/src/runtime/tests/loom_basic_scheduler.rs @@ -34,20 +34,22 @@ fn assert_at_most_num_polls(rt: Arc, at_most_polls: usize) { #[test] fn block_on_num_polls() { loom::model(|| { - // we expect at most 3 number of polls because there are - // three points at which we poll the future. At any of these - // points it can be ready: + // we expect at most 4 number of polls because there are three points at + // which we poll the future and an opportunity for a false-positive.. At + // any of these points it can be ready: // - // - when we fail to steal the parker and we block on a - // notification that it is available. + // - when we fail to steal the parker and we block on a notification + // that it is available. // // - when we steal the parker and we schedule the future // - // - when the future is woken up and we have ran the max - // number of tasks for the current tick or there are no - // more tasks to run. + // - when the future is woken up and we have ran the max number of tasks + // for the current tick or there are no more tasks to run. // - let at_most = 3; + // - a thread is notified that the parker is available but a third + // thread acquires it before the notified thread can. + // + let at_most = 4; let rt1 = Arc::new(Builder::new_current_thread().build().unwrap()); let rt2 = rt1.clone(); diff --git a/tokio/src/runtime/thread_pool/idle.rs b/tokio/src/runtime/thread_pool/idle.rs index 6b7ee1289ce..a57bf6a0b13 100644 --- a/tokio/src/runtime/thread_pool/idle.rs +++ b/tokio/src/runtime/thread_pool/idle.rs @@ -64,7 +64,7 @@ impl Idle { // A worker should be woken up, atomically increment the number of // searching workers as well as the number of unparked workers. - State::unpark_one(&self.state); + State::unpark_one(&self.state, 1); // Get the worker to unpark let ret = sleepers.pop(); @@ -111,7 +111,9 @@ impl Idle { /// Unpark a specific worker. This happens if tasks are submitted from /// within the worker's park routine. - pub(super) fn unpark_worker_by_id(&self, worker_id: usize) { + /// + /// Returns `true` if the worker was parked before calling the method. + pub(super) fn unpark_worker_by_id(&self, worker_id: usize) -> bool { let mut sleepers = self.sleepers.lock(); for index in 0..sleepers.len() { @@ -119,11 +121,13 @@ impl Idle { sleepers.swap_remove(index); // Update the state accordingly while the lock is held. - State::unpark_one(&self.state); + State::unpark_one(&self.state, 0); - return; + return true; } } + + false } /// Returns `true` if `worker_id` is contained in the sleep set. @@ -151,8 +155,8 @@ impl State { State(cell.load(ordering)) } - fn unpark_one(cell: &AtomicUsize) { - cell.fetch_add(1 | (1 << UNPARK_SHIFT), SeqCst); + fn unpark_one(cell: &AtomicUsize, num_searching: usize) { + cell.fetch_add(num_searching | (1 << UNPARK_SHIFT), SeqCst); } fn inc_num_searching(cell: &AtomicUsize, ordering: Ordering) { diff --git a/tokio/src/runtime/thread_pool/mod.rs b/tokio/src/runtime/thread_pool/mod.rs index 82e34c78d28..3e1ce448215 100644 --- a/tokio/src/runtime/thread_pool/mod.rs +++ b/tokio/src/runtime/thread_pool/mod.rs @@ -1,8 +1,5 @@ //! Threadpool -mod atomic_cell; -use atomic_cell::AtomicCell; - mod idle; use self::idle::Idle; diff --git a/tokio/src/runtime/thread_pool/worker.rs b/tokio/src/runtime/thread_pool/worker.rs index ae8efe6724f..f499e14b834 100644 --- a/tokio/src/runtime/thread_pool/worker.rs +++ b/tokio/src/runtime/thread_pool/worker.rs @@ -66,8 +66,9 @@ use crate::runtime::enter::EnterContext; use crate::runtime::park::{Parker, Unparker}; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{Inject, JoinHandle, OwnedTasks}; -use crate::runtime::thread_pool::{AtomicCell, Idle}; +use crate::runtime::thread_pool::Idle; use crate::runtime::{queue, task, Callback}; +use crate::util::atomic_cell::AtomicCell; use crate::util::FastRand; use std::cell::RefCell; @@ -510,8 +511,9 @@ impl Context { // Place `park` back in `core` core.park = Some(park); - // If there are tasks available to steal, notify a worker - if core.run_queue.is_stealable() { + // If there are tasks available to steal, but this worker is not + // looking for tasks to steal, notify another worker. + if !core.is_searching && core.run_queue.is_stealable() { self.worker.shared.notify_parked(); } @@ -620,8 +622,11 @@ impl Core { // If a task is in the lifo slot, then we must unpark regardless of // being notified if self.lifo_slot.is_some() { - worker.shared.idle.unpark_worker_by_id(worker.index); - self.is_searching = true; + // When a worker wakes, it should only transition to the "searching" + // state when the wake originates from another worker *or* a new task + // is pushed. We do *not* want the worker to transition to "searching" + // when it wakes when the I/O driver receives new events. + self.is_searching = !worker.shared.idle.unpark_worker_by_id(worker.index); return true; } diff --git a/tokio/src/sync/barrier.rs b/tokio/src/sync/barrier.rs index 0e39dac8bb5..dfc76a40ebf 100644 --- a/tokio/src/sync/barrier.rs +++ b/tokio/src/sync/barrier.rs @@ -1,5 +1,7 @@ use crate::loom::sync::Mutex; use crate::sync::watch; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; /// A barrier enables multiple tasks to synchronize the beginning of some computation. /// @@ -41,6 +43,8 @@ pub struct Barrier { state: Mutex, wait: watch::Receiver, n: usize, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } #[derive(Debug)] @@ -55,6 +59,7 @@ impl Barrier { /// /// A barrier will block `n`-1 tasks which call [`Barrier::wait`] and then wake up all /// tasks at once when the `n`th task calls `wait`. + #[track_caller] pub fn new(mut n: usize) -> Barrier { let (waker, wait) = crate::sync::watch::channel(0); @@ -65,6 +70,32 @@ impl Barrier { n = 1; } + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "Barrier", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + size = n, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + arrived = 0, + ) + }); + resource_span + }; + Barrier { state: Mutex::new(BarrierState { waker, @@ -73,6 +104,8 @@ impl Barrier { }), n, wait, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: resource_span, } } @@ -85,6 +118,20 @@ impl Barrier { /// [`BarrierWaitResult::is_leader`] when returning from this function, and all other tasks /// will receive a result that will return `false` from `is_leader`. pub async fn wait(&self) -> BarrierWaitResult { + #[cfg(all(tokio_unstable, feature = "tracing"))] + return trace::async_op( + || self.wait_internal(), + self.resource_span.clone(), + "Barrier::wait", + "poll", + false, + ) + .await; + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + return self.wait_internal().await; + } + async fn wait_internal(&self) -> BarrierWaitResult { // NOTE: we are taking a _synchronous_ lock here. // It is okay to do so because the critical section is fast and never yields, so it cannot // deadlock even if another future is concurrently holding the lock. @@ -96,7 +143,23 @@ impl Barrier { let mut state = self.state.lock(); let generation = state.generation; state.arrived += 1; + #[cfg(all(tokio_unstable, feature = "tracing"))] + tracing::trace!( + target: "runtime::resource::state_update", + arrived = 1, + arrived.op = "add", + ); + #[cfg(all(tokio_unstable, feature = "tracing"))] + tracing::trace!( + target: "runtime::resource::async_op::state_update", + arrived = true, + ); if state.arrived == self.n { + #[cfg(all(tokio_unstable, feature = "tracing"))] + tracing::trace!( + target: "runtime::resource::async_op::state_update", + is_leader = true, + ); // we are the leader for this generation // wake everyone, increment the generation, and return state diff --git a/tokio/src/sync/batch_semaphore.rs b/tokio/src/sync/batch_semaphore.rs index b5c39d2a05d..4f5effff319 100644 --- a/tokio/src/sync/batch_semaphore.rs +++ b/tokio/src/sync/batch_semaphore.rs @@ -19,6 +19,8 @@ use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::{Mutex, MutexGuard}; use crate::util::linked_list::{self, LinkedList}; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use crate::util::WakeList; use std::future::Future; @@ -35,6 +37,8 @@ pub(crate) struct Semaphore { waiters: Mutex, /// The current number of available permits in the semaphore. permits: AtomicUsize, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } struct Waitlist { @@ -101,6 +105,9 @@ struct Waiter { /// use `UnsafeCell` internally. pointers: linked_list::Pointers, + #[cfg(all(tokio_unstable, feature = "tracing"))] + ctx: trace::AsyncOpTracingCtx, + /// Should not be `Unpin`. _p: PhantomPinned, } @@ -129,12 +136,34 @@ impl Semaphore { "a semaphore may not have more than MAX_PERMITS permits ({})", Self::MAX_PERMITS ); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "Semaphore", + kind = "Sync", + is_internal = true + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + permits = permits, + permits.op = "override", + ) + }); + resource_span + }; + Self { permits: AtomicUsize::new(permits << Self::PERMIT_SHIFT), waiters: Mutex::new(Waitlist { queue: LinkedList::new(), closed: false, }), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -156,6 +185,8 @@ impl Semaphore { queue: LinkedList::new(), closed: false, }), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span::none(), } } @@ -224,7 +255,10 @@ impl Semaphore { let next = curr - num_permits; match self.permits.compare_exchange(curr, next, AcqRel, Acquire) { - Ok(_) => return Ok(()), + Ok(_) => { + // TODO: Instrument once issue has been solved} + return Ok(()); + } Err(actual) => curr = actual, } } @@ -283,6 +317,17 @@ impl Semaphore { rem, Self::MAX_PERMITS ); + + // add remaining permits back + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + permits = rem, + permits.op = "add", + ) + }); + rem = 0; } @@ -347,6 +392,20 @@ impl Semaphore { acquired += acq; if remaining == 0 { if !queued { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + permits = acquired, + permits.op = "sub", + ); + tracing::trace!( + target: "runtime::resource::async_op::state_update", + permits_obtained = acquired, + permits.op = "add", + ) + }); + return Ready(Ok(())); } else if lock.is_none() { break self.waiters.lock(); @@ -362,6 +421,15 @@ impl Semaphore { return Ready(Err(AcquireError::closed())); } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + permits = acquired, + permits.op = "sub", + ) + }); + if node.assign_permits(&mut acquired) { self.add_permits_locked(acquired, waiters); return Ready(Ok(())); @@ -406,11 +474,16 @@ impl fmt::Debug for Semaphore { } impl Waiter { - fn new(num_permits: u32) -> Self { + fn new( + num_permits: u32, + #[cfg(all(tokio_unstable, feature = "tracing"))] ctx: trace::AsyncOpTracingCtx, + ) -> Self { Waiter { waker: UnsafeCell::new(None), state: AtomicUsize::new(num_permits as usize), pointers: linked_list::Pointers::new(), + #[cfg(all(tokio_unstable, feature = "tracing"))] + ctx, _p: PhantomPinned, } } @@ -426,6 +499,14 @@ impl Waiter { match self.state.compare_exchange(curr, next, AcqRel, Acquire) { Ok(_) => { *n -= assign; + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.ctx.async_op_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::async_op::state_update", + permits_obtained = assign, + permits.op = "add", + ); + }); return next == 0; } Err(actual) => curr = actual, @@ -438,12 +519,26 @@ impl Future for Acquire<'_> { type Output = Result<(), AcquireError>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // First, ensure the current task has enough budget to proceed. - let coop = ready!(crate::coop::poll_proceed(cx)); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _resource_span = self.node.ctx.resource_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _async_op_span = self.node.ctx.async_op_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _async_op_poll_span = self.node.ctx.async_op_poll_span.clone().entered(); let (node, semaphore, needed, queued) = self.project(); - match semaphore.poll_acquire(cx, needed, node, *queued) { + // First, ensure the current task has enough budget to proceed. + #[cfg(all(tokio_unstable, feature = "tracing"))] + let coop = ready!(trace_poll_op!( + "poll_acquire", + crate::coop::poll_proceed(cx), + )); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let coop = ready!(crate::coop::poll_proceed(cx)); + + let result = match semaphore.poll_acquire(cx, needed, node, *queued) { Pending => { *queued = true; Pending @@ -454,18 +549,59 @@ impl Future for Acquire<'_> { *queued = false; Ready(Ok(())) } - } + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + return trace_poll_op!("poll_acquire", result); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + return result; } } impl<'a> Acquire<'a> { fn new(semaphore: &'a Semaphore, num_permits: u32) -> Self { - Self { + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + return Self { node: Waiter::new(num_permits), semaphore, num_permits, queued: false, - } + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + return semaphore.resource_span.in_scope(|| { + let async_op_span = + tracing::trace_span!("runtime.resource.async_op", source = "Acquire::new"); + let async_op_poll_span = async_op_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::async_op::state_update", + permits_requested = num_permits, + permits.op = "override", + ); + + tracing::trace!( + target: "runtime::resource::async_op::state_update", + permits_obtained = 0 as usize, + permits.op = "override", + ); + + tracing::trace_span!("runtime.resource.async_op.poll") + }); + + let ctx = trace::AsyncOpTracingCtx { + async_op_span, + async_op_poll_span, + resource_span: semaphore.resource_span.clone(), + }; + + Self { + node: Waiter::new(num_permits, ctx), + semaphore, + num_permits, + queued: false, + } + }); } fn project(self: Pin<&mut Self>) -> (Pin<&mut Waiter>, &Semaphore, u32, &mut bool) { diff --git a/tokio/src/sync/mutex.rs b/tokio/src/sync/mutex.rs index 4d9f9886d76..2476726bdea 100644 --- a/tokio/src/sync/mutex.rs +++ b/tokio/src/sync/mutex.rs @@ -1,6 +1,8 @@ #![cfg_attr(not(feature = "sync"), allow(unreachable_pub, dead_code))] use crate::sync::batch_semaphore as semaphore; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use std::cell::UnsafeCell; use std::error::Error; @@ -124,6 +126,8 @@ use std::{fmt, marker, mem}; /// [`Send`]: trait@std::marker::Send /// [`lock`]: method@Mutex::lock pub struct Mutex { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, s: semaphore::Semaphore, c: UnsafeCell, } @@ -138,6 +142,8 @@ pub struct Mutex { /// The lock is automatically released whenever the guard is dropped, at which /// point `lock` will succeed yet again. pub struct MutexGuard<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, lock: &'a Mutex, } @@ -157,6 +163,8 @@ pub struct MutexGuard<'a, T: ?Sized> { /// /// [`Arc`]: std::sync::Arc pub struct OwnedMutexGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, lock: Arc>, } @@ -242,13 +250,42 @@ impl Mutex { /// /// let lock = Mutex::new(5); /// ``` + #[track_caller] pub fn new(t: T) -> Self where T: Sized, { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + + tracing::trace_span!( + "runtime.resource", + concrete_type = "Mutex", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ) + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let s = resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = false, + ); + semaphore::Semaphore::new(1) + }); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let s = semaphore::Semaphore::new(1); + Self { c: UnsafeCell::new(t), - s: semaphore::Semaphore::new(1), + s, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -270,6 +307,8 @@ impl Mutex { Self { c: UnsafeCell::new(t), s: semaphore::Semaphore::const_new(1), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span::none(), } } @@ -297,8 +336,32 @@ impl Mutex { /// } /// ``` pub async fn lock(&self) -> MutexGuard<'_, T> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + trace::async_op( + || self.acquire(), + self.resource_span.clone(), + "Mutex::lock", + "poll", + false, + ) + .await; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = true, + ); + }); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] self.acquire().await; - MutexGuard { lock: self } + + MutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + } } /// Blocking lock this mutex. When the lock has been acquired, function returns a @@ -368,8 +431,35 @@ impl Mutex { /// /// [`Arc`]: std::sync::Arc pub async fn lock_owned(self: Arc) -> OwnedMutexGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + trace::async_op( + || self.acquire(), + self.resource_span.clone(), + "Mutex::lock_owned", + "poll", + false, + ) + .await; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = true, + ); + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] self.acquire().await; - OwnedMutexGuard { lock: self } + + OwnedMutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, + } } async fn acquire(&self) { @@ -399,7 +489,21 @@ impl Mutex { /// ``` pub fn try_lock(&self) -> Result, TryLockError> { match self.s.try_acquire(1) { - Ok(_) => Ok(MutexGuard { lock: self }), + Ok(_) => { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = true, + ); + }); + + Ok(MutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + }) + } Err(_) => Err(TryLockError(())), } } @@ -454,7 +558,24 @@ impl Mutex { /// # } pub fn try_lock_owned(self: Arc) -> Result, TryLockError> { match self.s.try_acquire(1) { - Ok(_) => Ok(OwnedMutexGuard { lock: self }), + Ok(_) => { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = true, + ); + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + + Ok(OwnedMutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, + }) + } Err(_) => Err(TryLockError(())), } } @@ -637,7 +758,14 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> { impl Drop for MutexGuard<'_, T> { fn drop(&mut self) { - self.lock.s.release(1) + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = false, + ); + }); + self.lock.s.release(1); } } @@ -699,6 +827,13 @@ impl OwnedMutexGuard { impl Drop for OwnedMutexGuard { fn drop(&mut self) { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = false, + ); + }); self.lock.s.release(1) } } diff --git a/tokio/src/sync/oneshot.rs b/tokio/src/sync/oneshot.rs index 08a2d9e49d7..2240074e733 100644 --- a/tokio/src/sync/oneshot.rs +++ b/tokio/src/sync/oneshot.rs @@ -122,6 +122,8 @@ use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::Arc; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use std::fmt; use std::future::Future; @@ -215,6 +217,8 @@ use std::task::{Context, Poll, Waker}; #[derive(Debug)] pub struct Sender { inner: Option>>, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } /// Receives a value from the associated [`Sender`]. @@ -305,6 +309,12 @@ pub struct Sender { #[derive(Debug)] pub struct Receiver { inner: Option>>, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + #[cfg(all(tokio_unstable, feature = "tracing"))] + async_op_span: tracing::Span, + #[cfg(all(tokio_unstable, feature = "tracing"))] + async_op_poll_span: tracing::Span, } pub mod error { @@ -442,7 +452,56 @@ struct State(usize); /// } /// } /// ``` +#[track_caller] pub fn channel() -> (Sender, Receiver) { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "Sender|Receiver", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + tx_dropped = false, + tx_dropped.op = "override", + ) + }); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + rx_dropped = false, + rx_dropped.op = "override", + ) + }); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + value_sent = false, + value_sent.op = "override", + ) + }); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + value_received = false, + value_received.op = "override", + ) + }); + + resource_span + }; + let inner = Arc::new(Inner { state: AtomicUsize::new(State::new().as_usize()), value: UnsafeCell::new(None), @@ -452,8 +511,27 @@ pub fn channel() -> (Sender, Receiver) { let tx = Sender { inner: Some(inner.clone()), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: resource_span.clone(), + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let async_op_span = resource_span + .in_scope(|| tracing::trace_span!("runtime.resource.async_op", source = "Receiver::await")); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let async_op_poll_span = + async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll")); + + let rx = Receiver { + inner: Some(inner), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: resource_span, + #[cfg(all(tokio_unstable, feature = "tracing"))] + async_op_span, + #[cfg(all(tokio_unstable, feature = "tracing"))] + async_op_poll_span, }; - let rx = Receiver { inner: Some(inner) }; (tx, rx) } @@ -525,6 +603,15 @@ impl Sender { } } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + value_sent = true, + value_sent.op = "override", + ) + }); + Ok(()) } @@ -598,7 +685,20 @@ impl Sender { pub async fn closed(&mut self) { use crate::future::poll_fn; - poll_fn(|cx| self.poll_closed(cx)).await + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let closed = trace::async_op( + || poll_fn(|cx| self.poll_closed(cx)), + resource_span, + "Sender::closed", + "poll_closed", + false, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let closed = poll_fn(|cx| self.poll_closed(cx)); + + closed.await } /// Returns `true` if the associated [`Receiver`] handle has been dropped. @@ -728,6 +828,14 @@ impl Drop for Sender { fn drop(&mut self) { if let Some(inner) = self.inner.as_ref() { inner.complete(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + tx_dropped = true, + tx_dropped.op = "override", + ) + }); } } } @@ -795,6 +903,14 @@ impl Receiver { pub fn close(&mut self) { if let Some(inner) = self.inner.as_ref() { inner.close(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + rx_dropped = true, + rx_dropped.op = "override", + ) + }); } } @@ -872,7 +988,17 @@ impl Receiver { // `UnsafeCell`. Therefore, it is now safe for us to access the // cell. match unsafe { inner.consume_value() } { - Some(value) => Ok(value), + Some(value) => { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + value_received = true, + value_received.op = "override", + ) + }); + Ok(value) + } None => Err(TryRecvError::Closed), } } else if state.is_closed() { @@ -888,12 +1014,50 @@ impl Receiver { self.inner = None; result } + + /// Blocking receive to call outside of asynchronous contexts. + /// + /// # Panics + /// + /// This function panics if called within an asynchronous execution + /// context. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use tokio::sync::oneshot; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = oneshot::channel::(); + /// + /// let sync_code = thread::spawn(move || { + /// assert_eq!(Ok(10), rx.blocking_recv()); + /// }); + /// + /// let _ = tx.send(10); + /// sync_code.join().unwrap(); + /// } + /// ``` + #[cfg(feature = "sync")] + pub fn blocking_recv(self) -> Result { + crate::future::block_on(self) + } } impl Drop for Receiver { fn drop(&mut self) { if let Some(inner) = self.inner.as_ref() { inner.close(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + rx_dropped = true, + rx_dropped.op = "override", + ) + }); } } } @@ -903,8 +1067,21 @@ impl Future for Receiver { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { // If `inner` is `None`, then `poll()` has already completed. + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _res_span = self.resource_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _ao_span = self.async_op_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _ao_poll_span = self.async_op_poll_span.clone().entered(); + let ret = if let Some(inner) = self.as_ref().get_ref().inner.as_ref() { - ready!(inner.poll_recv(cx))? + #[cfg(all(tokio_unstable, feature = "tracing"))] + let res = ready!(trace_poll_op!("poll_recv", inner.poll_recv(cx)))?; + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let res = ready!(inner.poll_recv(cx))?; + + res } else { panic!("called after complete"); }; diff --git a/tokio/src/sync/rwlock.rs b/tokio/src/sync/rwlock.rs index 120bc72b848..6991f0e8b6a 100644 --- a/tokio/src/sync/rwlock.rs +++ b/tokio/src/sync/rwlock.rs @@ -1,5 +1,7 @@ use crate::sync::batch_semaphore::{Semaphore, TryAcquireError}; use crate::sync::mutex::TryLockError; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use std::cell::UnsafeCell; use std::marker; use std::marker::PhantomData; @@ -86,6 +88,9 @@ const MAX_READS: u32 = 10; /// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies #[derive(Debug)] pub struct RwLock { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + // maximum number of concurrent readers mr: u32, @@ -197,14 +202,55 @@ impl RwLock { /// /// let lock = RwLock::new(5); /// ``` + #[track_caller] pub fn new(value: T) -> RwLock where T: Sized, { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "RwLock", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + max_readers = MAX_READS, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 0, + ); + }); + + resource_span + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let s = resource_span.in_scope(|| Semaphore::new(MAX_READS as usize)); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let s = Semaphore::new(MAX_READS as usize); + RwLock { mr: MAX_READS, c: UnsafeCell::new(value), - s: Semaphore::new(MAX_READS as usize), + s, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -222,6 +268,7 @@ impl RwLock { /// # Panics /// /// Panics if `max_reads` is more than `u32::MAX >> 3`. + #[track_caller] pub fn with_max_readers(value: T, max_reads: u32) -> RwLock where T: Sized, @@ -231,10 +278,52 @@ impl RwLock { "a RwLock may not be created with more than {} readers", MAX_READS ); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "RwLock", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + max_readers = max_reads, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 0, + ); + }); + + resource_span + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let s = resource_span.in_scope(|| Semaphore::new(max_reads as usize)); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let s = Semaphore::new(max_reads as usize); + RwLock { mr: max_reads, c: UnsafeCell::new(value), - s: Semaphore::new(max_reads as usize), + s, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -257,6 +346,8 @@ impl RwLock { mr: MAX_READS, c: UnsafeCell::new(value), s: Semaphore::const_new(MAX_READS as usize), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span::none(), } } @@ -281,6 +372,8 @@ impl RwLock { mr: max_reads, c: UnsafeCell::new(value), s: Semaphore::const_new(max_reads as usize), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span::none(), } } @@ -330,15 +423,39 @@ impl RwLock { ///} /// ``` pub async fn read(&self) -> RwLockReadGuard<'_, T> { - self.s.acquire(1).await.unwrap_or_else(|_| { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.s.acquire(1), + self.resource_span.clone(), + "RwLock::read", + "poll", + false, + ); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.s.acquire(1); + + inner.await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + RwLockReadGuard { s: &self.s, data: self.c.get(), marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), } } @@ -394,15 +511,42 @@ impl RwLock { ///} /// ``` pub async fn read_owned(self: Arc) -> OwnedRwLockReadGuard { - self.s.acquire(1).await.unwrap_or_else(|_| { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.s.acquire(1), + self.resource_span.clone(), + "RwLock::read_owned", + "poll", + false, + ); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.s.acquire(1); + + inner.await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + OwnedRwLockReadGuard { data: self.c.get(), lock: ManuallyDrop::new(self), _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -445,10 +589,21 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + Ok(RwLockReadGuard { s: &self.s, data: self.c.get(), marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), }) } @@ -497,10 +652,24 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + Ok(OwnedRwLockReadGuard { data: self.c.get(), lock: ManuallyDrop::new(self), _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } @@ -533,16 +702,40 @@ impl RwLock { ///} /// ``` pub async fn write(&self) -> RwLockWriteGuard<'_, T> { - self.s.acquire(self.mr).await.unwrap_or_else(|_| { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.s.acquire(self.mr), + self.resource_span.clone(), + "RwLock::write", + "poll", + false, + ); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.s.acquire(self.mr); + + inner.await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = true, + write_locked.op = "override", + ) + }); + RwLockWriteGuard { permits_acquired: self.mr, s: &self.s, data: self.c.get(), marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), } } @@ -582,16 +775,43 @@ impl RwLock { ///} /// ``` pub async fn write_owned(self: Arc) -> OwnedRwLockWriteGuard { - self.s.acquire(self.mr).await.unwrap_or_else(|_| { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.s.acquire(self.mr), + self.resource_span.clone(), + "RwLock::write_owned", + "poll", + false, + ); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.s.acquire(self.mr); + + inner.await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = true, + write_locked.op = "override", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + OwnedRwLockWriteGuard { permits_acquired: self.mr, data: self.c.get(), lock: ManuallyDrop::new(self), _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -625,11 +845,22 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = true, + write_locked.op = "override", + ) + }); + Ok(RwLockWriteGuard { permits_acquired: self.mr, s: &self.s, data: self.c.get(), marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), }) } @@ -670,11 +901,25 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = true, + write_locked.op = "override", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + Ok(OwnedRwLockWriteGuard { permits_acquired: self.mr, data: self.c.get(), lock: ManuallyDrop::new(self), _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } diff --git a/tokio/src/sync/rwlock/owned_read_guard.rs b/tokio/src/sync/rwlock/owned_read_guard.rs index 1881295846e..27b71bd988b 100644 --- a/tokio/src/sync/rwlock/owned_read_guard.rs +++ b/tokio/src/sync/rwlock/owned_read_guard.rs @@ -15,6 +15,8 @@ use std::sync::Arc; /// [`read_owned`]: method@crate::sync::RwLock::read_owned /// [`RwLock`]: struct@crate::sync::RwLock pub struct OwnedRwLockReadGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, // ManuallyDrop allows us to destructure into this field without running the destructor. pub(super) lock: ManuallyDrop>>, pub(super) data: *const U, @@ -56,12 +58,17 @@ impl OwnedRwLockReadGuard { { let data = f(&*this) as *const V; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + OwnedRwLockReadGuard { lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -105,12 +112,17 @@ impl OwnedRwLockReadGuard { None => return Err(this), }; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(OwnedRwLockReadGuard { lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } } @@ -145,5 +157,14 @@ impl Drop for OwnedRwLockReadGuard { fn drop(&mut self) { self.lock.s.release(1); unsafe { ManuallyDrop::drop(&mut self.lock) }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "sub", + ) + }); } } diff --git a/tokio/src/sync/rwlock/owned_write_guard.rs b/tokio/src/sync/rwlock/owned_write_guard.rs index 0a78d28e903..dbedab4cbb2 100644 --- a/tokio/src/sync/rwlock/owned_write_guard.rs +++ b/tokio/src/sync/rwlock/owned_write_guard.rs @@ -16,6 +16,8 @@ use std::sync::Arc; /// [`write_owned`]: method@crate::sync::RwLock::write_owned /// [`RwLock`]: struct@crate::sync::RwLock pub struct OwnedRwLockWriteGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, // ManuallyDrop allows us to destructure into this field without running the destructor. pub(super) lock: ManuallyDrop>>, @@ -64,13 +66,18 @@ impl OwnedRwLockWriteGuard { let data = f(&mut *this) as *mut U; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + OwnedRwLockMappedWriteGuard { permits_acquired, lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -123,13 +130,19 @@ impl OwnedRwLockWriteGuard { }; let permits_acquired = this.permits_acquired; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); + // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(OwnedRwLockMappedWriteGuard { permits_acquired, lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } @@ -181,15 +194,39 @@ impl OwnedRwLockWriteGuard { pub fn downgrade(mut self) -> OwnedRwLockReadGuard { let lock = unsafe { ManuallyDrop::take(&mut self.lock) }; let data = self.data; + let to_release = (self.permits_acquired - 1) as usize; // Release all but one of the permits held by the write guard - lock.s.release((self.permits_acquired - 1) as usize); + lock.s.release(to_release); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(self); + OwnedRwLockReadGuard { lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } } @@ -229,6 +266,14 @@ where impl Drop for OwnedRwLockWriteGuard { fn drop(&mut self) { self.lock.s.release(self.permits_acquired as usize); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); unsafe { ManuallyDrop::drop(&mut self.lock) }; } } diff --git a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs index d88ee01e1fd..55a24d96ac3 100644 --- a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs +++ b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs @@ -15,6 +15,8 @@ use std::sync::Arc; /// [mapping]: method@crate::sync::OwnedRwLockWriteGuard::map /// [`OwnedRwLockWriteGuard`]: struct@crate::sync::OwnedRwLockWriteGuard pub struct OwnedRwLockMappedWriteGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, // ManuallyDrop allows us to destructure into this field without running the destructor. pub(super) lock: ManuallyDrop>>, @@ -63,13 +65,18 @@ impl OwnedRwLockMappedWriteGuard { let data = f(&mut *this) as *mut V; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + OwnedRwLockMappedWriteGuard { permits_acquired, lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -120,13 +127,18 @@ impl OwnedRwLockMappedWriteGuard { }; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(OwnedRwLockMappedWriteGuard { permits_acquired, lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } } @@ -166,6 +178,14 @@ where impl Drop for OwnedRwLockMappedWriteGuard { fn drop(&mut self) { self.lock.s.release(self.permits_acquired as usize); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); unsafe { ManuallyDrop::drop(&mut self.lock) }; } } diff --git a/tokio/src/sync/rwlock/read_guard.rs b/tokio/src/sync/rwlock/read_guard.rs index 090b297e4af..36921319923 100644 --- a/tokio/src/sync/rwlock/read_guard.rs +++ b/tokio/src/sync/rwlock/read_guard.rs @@ -13,6 +13,8 @@ use std::ops; /// [`read`]: method@crate::sync::RwLock::read /// [`RwLock`]: struct@crate::sync::RwLock pub struct RwLockReadGuard<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) s: &'a Semaphore, pub(super) data: *const T, pub(super) marker: marker::PhantomData<&'a T>, @@ -59,12 +61,17 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { { let data = f(&*this) as *const U; let s = this.s; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + RwLockReadGuard { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -113,12 +120,17 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { None => return Err(this), }; let s = this.s; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(RwLockReadGuard { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } } @@ -152,5 +164,14 @@ where impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> { fn drop(&mut self) { self.s.release(1); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "sub", + ) + }); } } diff --git a/tokio/src/sync/rwlock/write_guard.rs b/tokio/src/sync/rwlock/write_guard.rs index 8c80ee70db4..7cadd74c60d 100644 --- a/tokio/src/sync/rwlock/write_guard.rs +++ b/tokio/src/sync/rwlock/write_guard.rs @@ -15,6 +15,8 @@ use std::ops; /// [`write`]: method@crate::sync::RwLock::write /// [`RwLock`]: struct@crate::sync::RwLock pub struct RwLockWriteGuard<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, pub(super) s: &'a Semaphore, pub(super) data: *mut T, @@ -66,6 +68,8 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { let data = f(&mut *this) as *mut U; let s = this.s; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); RwLockMappedWriteGuard { @@ -73,6 +77,8 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -129,6 +135,8 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { }; let s = this.s; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); Ok(RwLockMappedWriteGuard { @@ -136,6 +144,8 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } @@ -188,15 +198,38 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { /// [`RwLock`]: struct@crate::sync::RwLock pub fn downgrade(self) -> RwLockReadGuard<'a, T> { let RwLockWriteGuard { s, data, .. } = self; - + let to_release = (self.permits_acquired - 1) as usize; // Release all but one of the permits held by the write guard - s.release((self.permits_acquired - 1) as usize); + s.release(to_release); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(self); + RwLockReadGuard { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } } @@ -236,5 +269,14 @@ where impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> { fn drop(&mut self) { self.s.release(self.permits_acquired as usize); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); } } diff --git a/tokio/src/sync/rwlock/write_guard_mapped.rs b/tokio/src/sync/rwlock/write_guard_mapped.rs index 3cf69de4bdd..b5c644a9e83 100644 --- a/tokio/src/sync/rwlock/write_guard_mapped.rs +++ b/tokio/src/sync/rwlock/write_guard_mapped.rs @@ -14,6 +14,8 @@ use std::ops; /// [mapping]: method@crate::sync::RwLockWriteGuard::map /// [`RwLockWriteGuard`]: struct@crate::sync::RwLockWriteGuard pub struct RwLockMappedWriteGuard<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, pub(super) s: &'a Semaphore, pub(super) data: *mut T, @@ -64,13 +66,18 @@ impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> { let data = f(&mut *this) as *mut U; let s = this.s; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + RwLockMappedWriteGuard { permits_acquired, s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -126,13 +133,18 @@ impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> { }; let s = this.s; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(RwLockMappedWriteGuard { permits_acquired, s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } } @@ -172,5 +184,14 @@ where impl<'a, T: ?Sized> Drop for RwLockMappedWriteGuard<'a, T> { fn drop(&mut self) { self.s.release(self.permits_acquired as usize); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); } } diff --git a/tokio/src/sync/semaphore.rs b/tokio/src/sync/semaphore.rs index 839b523c4ce..860f46f3998 100644 --- a/tokio/src/sync/semaphore.rs +++ b/tokio/src/sync/semaphore.rs @@ -1,5 +1,7 @@ use super::batch_semaphore as ll; // low level implementation use super::{AcquireError, TryAcquireError}; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use std::sync::Arc; /// Counting semaphore performing asynchronous permit acquisition. @@ -77,6 +79,8 @@ use std::sync::Arc; pub struct Semaphore { /// The low level semaphore ll_sem: ll::Semaphore, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } /// A permit from the semaphore. @@ -120,9 +124,33 @@ fn bounds() { impl Semaphore { /// Creates a new semaphore with the initial number of permits. + #[track_caller] pub fn new(permits: usize) -> Self { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + + tracing::trace_span!( + "runtime.resource", + concrete_type = "Semaphore", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + inherits_child_attrs = true, + ) + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let ll_sem = resource_span.in_scope(|| ll::Semaphore::new(permits)); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let ll_sem = ll::Semaphore::new(permits); + Self { - ll_sem: ll::Semaphore::new(permits), + ll_sem, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -139,9 +167,16 @@ impl Semaphore { #[cfg(all(feature = "parking_lot", not(all(loom, test))))] #[cfg_attr(docsrs, doc(cfg(feature = "parking_lot")))] pub const fn const_new(permits: usize) -> Self { - Self { + #[cfg(all(tokio_unstable, feature = "tracing"))] + return Self { ll_sem: ll::Semaphore::const_new(permits), - } + resource_span: tracing::Span::none(), + }; + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + return Self { + ll_sem: ll::Semaphore::const_new(permits), + }; } /// Returns the current number of available permits. @@ -191,7 +226,18 @@ impl Semaphore { /// [`AcquireError`]: crate::sync::AcquireError /// [`SemaphorePermit`]: crate::sync::SemaphorePermit pub async fn acquire(&self) -> Result, AcquireError> { - self.ll_sem.acquire(1).await?; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.ll_sem.acquire(1), + self.resource_span.clone(), + "Semaphore::acquire", + "poll", + true, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.ll_sem.acquire(1); + + inner.await?; Ok(SemaphorePermit { sem: self, permits: 1, @@ -227,7 +273,19 @@ impl Semaphore { /// [`AcquireError`]: crate::sync::AcquireError /// [`SemaphorePermit`]: crate::sync::SemaphorePermit pub async fn acquire_many(&self, n: u32) -> Result, AcquireError> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + trace::async_op( + || self.ll_sem.acquire(n), + self.resource_span.clone(), + "Semaphore::acquire_many", + "poll", + true, + ) + .await?; + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] self.ll_sem.acquire(n).await?; + Ok(SemaphorePermit { sem: self, permits: n, @@ -350,7 +408,18 @@ impl Semaphore { /// [`AcquireError`]: crate::sync::AcquireError /// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit pub async fn acquire_owned(self: Arc) -> Result { - self.ll_sem.acquire(1).await?; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.ll_sem.acquire(1), + self.resource_span.clone(), + "Semaphore::acquire_owned", + "poll", + true, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.ll_sem.acquire(1); + + inner.await?; Ok(OwnedSemaphorePermit { sem: self, permits: 1, @@ -403,7 +472,18 @@ impl Semaphore { self: Arc, n: u32, ) -> Result { - self.ll_sem.acquire(n).await?; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.ll_sem.acquire(n), + self.resource_span.clone(), + "Semaphore::acquire_many_owned", + "poll", + true, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.ll_sem.acquire(n); + + inner.await?; Ok(OwnedSemaphorePermit { sem: self, permits: n, diff --git a/tokio/src/sync/watch.rs b/tokio/src/sync/watch.rs index 7e45c116c82..5673e0fca78 100644 --- a/tokio/src/sync/watch.rs +++ b/tokio/src/sync/watch.rs @@ -91,6 +91,23 @@ pub struct Sender { /// Outstanding borrows hold a read lock on the inner value. This means that /// long lived borrows could cause the produce half to block. It is recommended /// to keep the borrow as short lived as possible. +/// +/// The priority policy of the lock is dependent on the underlying lock +/// implementation, and this type does not guarantee that any particular policy +/// will be used. In particular, a producer which is waiting to acquire the lock +/// in `send` might or might not block concurrent calls to `borrow`, e.g.: +/// +///

Potential deadlock example +/// +/// ```text +/// // Task 1 (on thread A) | // Task 2 (on thread B) +/// let _ref1 = rx.borrow(); | +/// | // will block +/// | let _ = tx.send(()); +/// // may deadlock | +/// let _ref2 = rx.borrow(); | +/// ``` +///
#[derive(Debug)] pub struct Ref<'a, T> { inner: RwLockReadGuard<'a, T>, @@ -285,6 +302,23 @@ impl Receiver { /// could cause the send half to block. It is recommended to keep the borrow /// as short lived as possible. /// + /// The priority policy of the lock is dependent on the underlying lock + /// implementation, and this type does not guarantee that any particular policy + /// will be used. In particular, a producer which is waiting to acquire the lock + /// in `send` might or might not block concurrent calls to `borrow`, e.g.: + /// + ///
Potential deadlock example + /// + /// ```text + /// // Task 1 (on thread A) | // Task 2 (on thread B) + /// let _ref1 = rx.borrow(); | + /// | // will block + /// | let _ = tx.send(()); + /// // may deadlock | + /// let _ref2 = rx.borrow(); | + /// ``` + ///
+ /// /// [`changed`]: Receiver::changed /// /// # Examples @@ -311,6 +345,23 @@ impl Receiver { /// could cause the send half to block. It is recommended to keep the borrow /// as short lived as possible. /// + /// The priority policy of the lock is dependent on the underlying lock + /// implementation, and this type does not guarantee that any particular policy + /// will be used. In particular, a producer which is waiting to acquire the lock + /// in `send` might or might not block concurrent calls to `borrow`, e.g.: + /// + ///
Potential deadlock example + /// + /// ```text + /// // Task 1 (on thread A) | // Task 2 (on thread B) + /// let _ref1 = rx1.borrow_and_update(); | + /// | // will block + /// | let _ = tx.send(()); + /// // may deadlock | + /// let _ref2 = rx2.borrow_and_update(); | + /// ``` + ///
+ /// /// [`changed`]: Receiver::changed pub fn borrow_and_update(&mut self) -> Ref<'_, T> { let inner = self.shared.value.read().unwrap(); @@ -318,6 +369,48 @@ impl Receiver { Ref { inner } } + /// Checks if this channel contains a message that this receiver has not yet + /// seen. The new value is not marked as seen. + /// + /// Although this method is called `has_changed`, it does not check new + /// messages for equality, so this call will return true even if the new + /// message is equal to the old message. + /// + /// Returns an error if the channel has been closed. + /// # Examples + /// + /// ``` + /// use tokio::sync::watch; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = watch::channel("hello"); + /// + /// tx.send("goodbye").unwrap(); + /// + /// assert!(rx.has_changed().unwrap()); + /// assert_eq!(*rx.borrow_and_update(), "goodbye"); + /// + /// // The value has been marked as seen + /// assert!(!rx.has_changed().unwrap()); + /// + /// drop(tx); + /// // The `tx` handle has been dropped + /// assert!(rx.has_changed().is_err()); + /// } + /// ``` + pub fn has_changed(&self) -> Result { + // Load the version from the state + let state = self.shared.state.load(); + if state.is_closed() { + // The sender has dropped. + return Err(error::RecvError(())); + } + let new_version = state.version(); + + Ok(self.version != new_version) + } + /// Waits for a change notification, then marks the newest value as seen. /// /// If the newest value in the channel has not yet been marked seen when diff --git a/tokio/src/task/blocking.rs b/tokio/src/task/blocking.rs index 825f25f8c66..5fe358f3e50 100644 --- a/tokio/src/task/blocking.rs +++ b/tokio/src/task/blocking.rs @@ -188,7 +188,7 @@ cfg_rt! { /// worker.await.unwrap(); /// # } /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_blocking(f: F) -> JoinHandle where F: FnOnce() -> R + Send + 'static, diff --git a/tokio/src/task/builder.rs b/tokio/src/task/builder.rs index f991fc65e2a..0a7fe3c371a 100644 --- a/tokio/src/task/builder.rs +++ b/tokio/src/task/builder.rs @@ -4,6 +4,10 @@ use std::future::Future; /// Factory which is used to configure the properties of a new task. /// +/// **Note**: This is an [unstable API][unstable]. The public API of this type +/// may break in 1.x releases. See [the documentation on unstable +/// features][unstable] for details. +/// /// Methods can be chained in order to configure it. /// /// Currently, there is only one configuration option: @@ -45,7 +49,13 @@ use std::future::Future; /// } /// } /// ``` +/// [unstable API]: crate#unstable-features +/// [`name`]: Builder::name +/// [`spawn_local`]: Builder::spawn_local +/// [`spawn`]: Builder::spawn +/// [`spawn_blocking`]: Builder::spawn_blocking #[derive(Default, Debug)] +#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] pub struct Builder<'a> { name: Option<&'a str>, } @@ -65,7 +75,7 @@ impl<'a> Builder<'a> { /// /// See [`task::spawn`](crate::task::spawn) for /// more details. - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn(self, future: Fut) -> JoinHandle where Fut: Future + Send + 'static, @@ -78,7 +88,7 @@ impl<'a> Builder<'a> { /// /// See [`task::spawn_local`](crate::task::spawn_local) /// for more details. - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_local(self, future: Fut) -> JoinHandle where Fut: Future + 'static, @@ -91,7 +101,7 @@ impl<'a> Builder<'a> { /// /// See [`task::spawn_blocking`](crate::task::spawn_blocking) /// for more details. - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_blocking(self, function: Function) -> JoinHandle where Function: FnOnce() -> Output + Send + 'static, diff --git a/tokio/src/task/local.rs b/tokio/src/task/local.rs index 4a5d313c6e4..1beee6891ba 100644 --- a/tokio/src/task/local.rs +++ b/tokio/src/task/local.rs @@ -286,7 +286,7 @@ cfg_rt! { /// }).await; /// } /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_local(future: F) -> JoinHandle where F: Future + 'static, @@ -377,7 +377,7 @@ impl LocalSet { /// } /// ``` /// [`spawn_local`]: fn@spawn_local - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_local(&self, future: F) -> JoinHandle where F: Future + 'static, diff --git a/tokio/src/task/spawn.rs b/tokio/src/task/spawn.rs index 065d38f54b5..a9d736674c0 100644 --- a/tokio/src/task/spawn.rs +++ b/tokio/src/task/spawn.rs @@ -121,7 +121,7 @@ cfg_rt! { /// ```text /// error[E0391]: cycle detected when processing `main` /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn(future: T) -> JoinHandle where T: Future + Send + 'static, @@ -136,7 +136,7 @@ cfg_rt! { } } - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub(super) fn spawn_inner(future: T, name: Option<&str>) -> JoinHandle where T: Future + Send + 'static, diff --git a/tokio/src/task/task_local.rs b/tokio/src/task/task_local.rs index b6e7df43e18..949bbca3eee 100644 --- a/tokio/src/task/task_local.rs +++ b/tokio/src/task/task_local.rs @@ -258,14 +258,14 @@ impl TaskLocalFuture { } } - let mut project = self.project(); + let project = self.project(); let val = project.slot.take(); let prev = project.local.inner.with(|c| c.replace(val)); let _guard = Guard { prev, - slot: &mut project.slot, + slot: project.slot, local: *project.local, }; diff --git a/tokio/src/task/yield_now.rs b/tokio/src/task/yield_now.rs index 5eeb46a8983..148e3dc0c87 100644 --- a/tokio/src/task/yield_now.rs +++ b/tokio/src/task/yield_now.rs @@ -33,7 +33,6 @@ use std::task::{Context, Poll}; /// which order the runtime polls your tasks in. /// /// [`tokio::select!`]: macro@crate::select -#[must_use = "yield_now does nothing unless polled/`await`-ed"] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub async fn yield_now() { /// Yield implementation diff --git a/tokio/src/time/driver/entry.rs b/tokio/src/time/driver/entry.rs index 9e9f0dc8592..1beee57604b 100644 --- a/tokio/src/time/driver/entry.rs +++ b/tokio/src/time/driver/entry.rs @@ -5,9 +5,9 @@ //! //! # Ground rules //! -//! The heart of the timer implementation here is the `TimerShared` structure, -//! shared between the `TimerEntry` and the driver. Generally, we permit access -//! to `TimerShared` ONLY via either 1) a mutable reference to `TimerEntry` or +//! The heart of the timer implementation here is the [`TimerShared`] structure, +//! shared between the [`TimerEntry`] and the driver. Generally, we permit access +//! to [`TimerShared`] ONLY via either 1) a mutable reference to [`TimerEntry`] or //! 2) a held driver lock. //! //! It follows from this that any changes made while holding BOTH 1 and 2 will @@ -49,8 +49,10 @@ //! There is of course a race condition between timer reset and timer //! expiration. If the driver fails to observe the updated expiration time, it //! could trigger expiration of the timer too early. However, because -//! `mark_pending` performs a compare-and-swap, it will identify this race and +//! [`mark_pending`][mark_pending] performs a compare-and-swap, it will identify this race and //! refuse to mark the timer as pending. +//! +//! [mark_pending]: TimerHandle::mark_pending use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::AtomicU64; diff --git a/tokio/src/time/driver/handle.rs b/tokio/src/time/driver/handle.rs index 7aaf65a79e4..b61c0476e15 100644 --- a/tokio/src/time/driver/handle.rs +++ b/tokio/src/time/driver/handle.rs @@ -40,16 +40,19 @@ cfg_rt! { /// /// This function panics if there is no current timer set. /// - /// It can be triggered when `Builder::enable_time()` or - /// `Builder::enable_all()` are not included in the builder. + /// It can be triggered when [`Builder::enable_time`] or + /// [`Builder::enable_all`] are not included in the builder. /// /// It can also panic whenever a timer is created outside of a - /// Tokio runtime. That is why `rt.block_on(delay_for(...))` will panic, + /// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, /// since the function is executed outside of the runtime. - /// Whereas `rt.block_on(async {delay_for(...).await})` doesn't panic. + /// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. /// And this is because wrapping the function on an async makes it lazy, /// and so gets executed inside the runtime successfully without /// panicking. + /// + /// [`Builder::enable_time`]: crate::runtime::Builder::enable_time + /// [`Builder::enable_all`]: crate::runtime::Builder::enable_all pub(crate) fn current() -> Self { crate::runtime::context::time_handle() .expect("A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers.") @@ -65,16 +68,19 @@ cfg_not_rt! { /// /// This function panics if there is no current timer set. /// - /// It can be triggered when `Builder::enable_time()` or - /// `Builder::enable_all()` are not included in the builder. + /// It can be triggered when [`Builder::enable_time`] or + /// [`Builder::enable_all`] are not included in the builder. /// - /// It can also panic whenever a timer is created outside of a Tokio - /// runtime. That is why `rt.block_on(delay_for(...))` will panic, + /// It can also panic whenever a timer is created outside of a + /// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, /// since the function is executed outside of the runtime. - /// Whereas `rt.block_on(async {delay_for(...).await})` doesn't - /// panic. And this is because wrapping the function on an async makes it - /// lazy, and so outside executed inside the runtime successfully without + /// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. + /// And this is because wrapping the function on an async makes it lazy, + /// and so gets executed inside the runtime successfully without /// panicking. + /// + /// [`Builder::enable_time`]: crate::runtime::Builder::enable_time + /// [`Builder::enable_all`]: crate::runtime::Builder::enable_all pub(crate) fn current() -> Self { panic!("{}", crate::util::error::CONTEXT_MISSING_ERROR) } diff --git a/tokio/src/time/driver/sleep.rs b/tokio/src/time/driver/sleep.rs index 43ff694ffc6..7f27ef201f7 100644 --- a/tokio/src/time/driver/sleep.rs +++ b/tokio/src/time/driver/sleep.rs @@ -1,3 +1,5 @@ +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::time::driver::ClockTime; use crate::time::driver::{Handle, TimerEntry}; use crate::time::{error::Error, Duration, Instant}; use crate::util::trace; @@ -8,10 +10,6 @@ use std::panic::Location; use std::pin::Pin; use std::task::{self, Poll}; -cfg_trace! { - use crate::time::driver::ClockTime; -} - /// Waits until `deadline` is reached. /// /// No work is performed while awaiting on the sleep future to complete. `Sleep` @@ -41,11 +39,28 @@ cfg_trace! { /// /// See the documentation for the [`Sleep`] type for more examples. /// +/// # Panics +/// +/// This function panics if there is no current timer set. +/// +/// It can be triggered when [`Builder::enable_time`] or +/// [`Builder::enable_all`] are not included in the builder. +/// +/// It can also panic whenever a timer is created outside of a +/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, +/// since the function is executed outside of the runtime. +/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. +/// And this is because wrapping the function on an async makes it lazy, +/// and so gets executed inside the runtime successfully without +/// panicking. +/// /// [`Sleep`]: struct@crate::time::Sleep /// [`interval`]: crate::time::interval() +/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time +/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all // Alias for old name in 0.x #[cfg_attr(docsrs, doc(alias = "delay_until"))] -#[cfg_attr(tokio_track_caller, track_caller)] +#[track_caller] pub fn sleep_until(deadline: Instant) -> Sleep { return Sleep::new_timeout(deadline, trace::caller_location()); } @@ -84,12 +99,29 @@ pub fn sleep_until(deadline: Instant) -> Sleep { /// /// See the documentation for the [`Sleep`] type for more examples. /// +/// # Panics +/// +/// This function panics if there is no current timer set. +/// +/// It can be triggered when [`Builder::enable_time`] or +/// [`Builder::enable_all`] are not included in the builder. +/// +/// It can also panic whenever a timer is created outside of a +/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, +/// since the function is executed outside of the runtime. +/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. +/// And this is because wrapping the function on an async makes it lazy, +/// and so gets executed inside the runtime successfully without +/// panicking. +/// /// [`Sleep`]: struct@crate::time::Sleep /// [`interval`]: crate::time::interval() +/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time +/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all // Alias for old name in 0.x #[cfg_attr(docsrs, doc(alias = "delay_for"))] #[cfg_attr(docsrs, doc(alias = "wait"))] -#[cfg_attr(tokio_track_caller, track_caller)] +#[track_caller] pub fn sleep(duration: Duration) -> Sleep { let location = trace::caller_location(); @@ -204,8 +236,7 @@ cfg_trace! { #[derive(Debug)] struct Inner { deadline: Instant, - resource_span: tracing::Span, - async_op_span: tracing::Span, + ctx: trace::AsyncOpTracingCtx, time_source: ClockTime, } } @@ -232,10 +263,7 @@ impl Sleep { let deadline_tick = time_source.deadline_to_tick(deadline); let duration = deadline_tick.checked_sub(time_source.now()).unwrap_or(0); - #[cfg(tokio_track_caller)] - let location = location.expect("should have location if tracking caller"); - - #[cfg(tokio_track_caller)] + let location = location.expect("should have location if tracing"); let resource_span = tracing::trace_span!( "runtime.resource", concrete_type = "Sleep", @@ -245,25 +273,29 @@ impl Sleep { loc.col = location.column(), ); - #[cfg(not(tokio_track_caller))] - let resource_span = - tracing::trace_span!("runtime.resource", concrete_type = "Sleep", kind = "timer"); + let async_op_span = resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + duration = duration, + duration.unit = "ms", + duration.op = "override", + ); - let async_op_span = - tracing::trace_span!("runtime.resource.async_op", source = "Sleep::new_timeout"); + tracing::trace_span!("runtime.resource.async_op", source = "Sleep::new_timeout") + }); - tracing::trace!( - target: "runtime::resource::state_update", - parent: resource_span.id(), - duration = duration, - duration.unit = "ms", - duration.op = "override", - ); + let async_op_poll_span = + async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll")); + + let ctx = trace::AsyncOpTracingCtx { + async_op_span, + async_op_poll_span, + resource_span, + }; Inner { deadline, - resource_span, - async_op_span, + ctx, time_source, } }; @@ -330,54 +362,52 @@ impl Sleep { #[cfg(all(tokio_unstable, feature = "tracing"))] { - me.inner.async_op_span = + let _resource_enter = me.inner.ctx.resource_span.enter(); + me.inner.ctx.async_op_span = tracing::trace_span!("runtime.resource.async_op", source = "Sleep::reset"); + let _async_op_enter = me.inner.ctx.async_op_span.enter(); + + me.inner.ctx.async_op_poll_span = + tracing::trace_span!("runtime.resource.async_op.poll"); + + let duration = { + let now = me.inner.time_source.now(); + let deadline_tick = me.inner.time_source.deadline_to_tick(deadline); + deadline_tick.checked_sub(now).unwrap_or(0) + }; tracing::trace!( target: "runtime::resource::state_update", - parent: me.inner.resource_span.id(), - duration = { - let now = me.inner.time_source.now(); - let deadline_tick = me.inner.time_source.deadline_to_tick(deadline); - deadline_tick.checked_sub(now).unwrap_or(0) - }, + duration = duration, duration.unit = "ms", duration.op = "override", ); } } - cfg_not_trace! { - fn poll_elapsed(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let me = self.project(); + fn poll_elapsed(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + let me = self.project(); - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); + // Keep track of task budget + #[cfg(all(tokio_unstable, feature = "tracing"))] + let coop = ready!(trace_poll_op!( + "poll_elapsed", + crate::coop::poll_proceed(cx), + )); - me.entry.poll_elapsed(cx).map(move |r| { - coop.made_progress(); - r - }) - } - } + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let coop = ready!(crate::coop::poll_proceed(cx)); - cfg_trace! { - fn poll_elapsed(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let me = self.project(); - // Keep track of task budget - let coop = ready!(trace_poll_op!( - "poll_elapsed", - crate::coop::poll_proceed(cx), - me.inner.resource_span.id(), - )); - - let result = me.entry.poll_elapsed(cx).map(move |r| { - coop.made_progress(); - r - }); + let result = me.entry.poll_elapsed(cx).map(move |r| { + coop.made_progress(); + r + }); - trace_poll_op!("poll_elapsed", result, me.inner.resource_span.id()) - } + #[cfg(all(tokio_unstable, feature = "tracing"))] + return trace_poll_op!("poll_elapsed", result); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + return result; } } @@ -395,8 +425,11 @@ impl Future for Sleep { // really do much better if we passed the error onwards. fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { #[cfg(all(tokio_unstable, feature = "tracing"))] - let _span = self.inner.async_op_span.clone().entered(); - + let _res_span = self.inner.ctx.resource_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _ao_span = self.inner.ctx.async_op_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _ao_poll_span = self.inner.ctx.async_op_poll_span.clone().entered(); match ready!(self.as_mut().poll_elapsed(cx)) { Ok(()) => Poll::Ready(()), Err(e) => panic!("timer error: {}", e), diff --git a/tokio/src/time/driver/wheel/level.rs b/tokio/src/time/driver/wheel/level.rs index 34d31766ce1..7fd2266bb8e 100644 --- a/tokio/src/time/driver/wheel/level.rs +++ b/tokio/src/time/driver/wheel/level.rs @@ -53,7 +53,7 @@ impl Level { // However, that is only supported for arrays of size // 32 or fewer. So in our case we have to explicitly // invoke the constructor for each array element. - let ctor = || EntryList::default(); + let ctor = EntryList::default; Level { level, diff --git a/tokio/src/time/interval.rs b/tokio/src/time/interval.rs index 7e07e51267e..8ecb15b389e 100644 --- a/tokio/src/time/interval.rs +++ b/tokio/src/time/interval.rs @@ -1,6 +1,8 @@ use crate::future::poll_fn; use crate::time::{sleep_until, Duration, Instant, Sleep}; +use crate::util::trace; +use std::panic::Location; use std::pin::Pin; use std::task::{Context, Poll}; use std::{convert::TryInto, future::Future}; @@ -68,10 +70,10 @@ use std::{convert::TryInto, future::Future}; /// /// [`sleep`]: crate::time::sleep() /// [`.tick().await`]: Interval::tick +#[track_caller] pub fn interval(period: Duration) -> Interval { assert!(period > Duration::new(0, 0), "`period` must be non-zero."); - - interval_at(Instant::now(), period) + internal_interval_at(Instant::now(), period, trace::caller_location()) } /// Creates new [`Interval`] that yields with interval of `period` with the @@ -103,13 +105,44 @@ pub fn interval(period: Duration) -> Interval { /// // approximately 70ms have elapsed. /// } /// ``` +#[track_caller] pub fn interval_at(start: Instant, period: Duration) -> Interval { assert!(period > Duration::new(0, 0), "`period` must be non-zero."); + internal_interval_at(start, period, trace::caller_location()) +} + +#[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_variables))] +fn internal_interval_at( + start: Instant, + period: Duration, + location: Option<&'static Location<'static>>, +) -> Interval { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = location.expect("should have location if tracing"); + + tracing::trace_span!( + "runtime.resource", + concrete_type = "Interval", + kind = "timer", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ) + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let delay = resource_span.in_scope(|| Box::pin(sleep_until(start))); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let delay = Box::pin(sleep_until(start)); Interval { - delay: Box::pin(sleep_until(start)), + delay, period, missed_tick_behavior: Default::default(), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -124,7 +157,7 @@ pub fn interval_at(start: Instant, period: Duration) -> Interval { /// /// #[tokio::main] /// async fn main() { -/// // ticks every 2 seconds +/// // ticks every 2 milliseconds /// let mut interval = time::interval(Duration::from_millis(2)); /// for _ in 0..5 { /// interval.tick().await; @@ -362,6 +395,9 @@ pub struct Interval { /// The strategy `Interval` should use when a tick is missed. missed_tick_behavior: MissedTickBehavior, + + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } impl Interval { @@ -391,7 +427,20 @@ impl Interval { /// } /// ``` pub async fn tick(&mut self) -> Instant { - poll_fn(|cx| self.poll_tick(cx)).await + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let instant = trace::async_op( + || poll_fn(|cx| self.poll_tick(cx)), + resource_span, + "Interval::tick", + "poll_tick", + false, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let instant = poll_fn(|cx| self.poll_tick(cx)); + + instant.await } /// Polls for the next instant in the interval to be reached. @@ -435,6 +484,36 @@ impl Interval { Poll::Ready(timeout) } + /// Resets the interval to complete one period after the current time. + /// + /// This method ignores [`MissedTickBehavior`] strategy. + /// + /// # Examples + /// + /// ``` + /// use tokio::time; + /// + /// use std::time::Duration; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut interval = time::interval(Duration::from_millis(100)); + /// + /// interval.tick().await; + /// + /// time::sleep(Duration::from_millis(50)).await; + /// interval.reset(); + /// + /// interval.tick().await; + /// interval.tick().await; + /// + /// // approximately 250ms have elapsed. + /// } + /// ``` + pub fn reset(&mut self) { + self.delay.as_mut().reset(Instant::now() + self.period); + } + /// Returns the [`MissedTickBehavior`] strategy currently being used. pub fn missed_tick_behavior(&self) -> MissedTickBehavior { self.missed_tick_behavior diff --git a/tokio/src/time/timeout.rs b/tokio/src/time/timeout.rs index 6725caa09f8..4a93089e8e8 100644 --- a/tokio/src/time/timeout.rs +++ b/tokio/src/time/timeout.rs @@ -5,6 +5,7 @@ //! [`Timeout`]: struct@Timeout use crate::{ + coop, time::{error::Elapsed, sleep_until, Duration, Instant, Sleep}, util::trace, }; @@ -48,7 +49,25 @@ use std::task::{self, Poll}; /// } /// # } /// ``` -#[cfg_attr(tokio_track_caller, track_caller)] +/// +/// # Panics +/// +/// This function panics if there is no current timer set. +/// +/// It can be triggered when [`Builder::enable_time`] or +/// [`Builder::enable_all`] are not included in the builder. +/// +/// It can also panic whenever a timer is created outside of a +/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, +/// since the function is executed outside of the runtime. +/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. +/// And this is because wrapping the function on an async makes it lazy, +/// and so gets executed inside the runtime successfully without +/// panicking. +/// +/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time +/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all +#[track_caller] pub fn timeout(duration: Duration, future: T) -> Timeout where T: Future, @@ -151,15 +170,33 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let me = self.project(); + let had_budget_before = coop::has_budget_remaining(); + // First, try polling the future if let Poll::Ready(v) = me.value.poll(cx) { return Poll::Ready(Ok(v)); } - // Now check the timer - match me.delay.poll(cx) { - Poll::Ready(()) => Poll::Ready(Err(Elapsed::new())), - Poll::Pending => Poll::Pending, + let has_budget_now = coop::has_budget_remaining(); + + let delay = me.delay; + + let poll_delay = || -> Poll { + match delay.poll(cx) { + Poll::Ready(()) => Poll::Ready(Err(Elapsed::new())), + Poll::Pending => Poll::Pending, + } + }; + + if let (true, false) = (had_budget_before, has_budget_now) { + // if it is the underlying future that exhausted the budget, we poll + // the `delay` with an unconstrained one. This prevents pathological + // cases where the underlying future always exhausts the budget and + // we never get a chance to evaluate whether the timeout was hit or + // not. + coop::with_unconstrained(poll_delay) + } else { + poll_delay() } } } diff --git a/tokio/src/runtime/thread_pool/atomic_cell.rs b/tokio/src/util/atomic_cell.rs similarity index 77% rename from tokio/src/runtime/thread_pool/atomic_cell.rs rename to tokio/src/util/atomic_cell.rs index 98847e6ffa1..07e37303a7b 100644 --- a/tokio/src/runtime/thread_pool/atomic_cell.rs +++ b/tokio/src/util/atomic_cell.rs @@ -3,7 +3,7 @@ use crate::loom::sync::atomic::AtomicPtr; use std::ptr; use std::sync::atomic::Ordering::AcqRel; -pub(super) struct AtomicCell { +pub(crate) struct AtomicCell { data: AtomicPtr, } @@ -11,22 +11,22 @@ unsafe impl Send for AtomicCell {} unsafe impl Sync for AtomicCell {} impl AtomicCell { - pub(super) fn new(data: Option>) -> AtomicCell { + pub(crate) fn new(data: Option>) -> AtomicCell { AtomicCell { data: AtomicPtr::new(to_raw(data)), } } - pub(super) fn swap(&self, val: Option>) -> Option> { + pub(crate) fn swap(&self, val: Option>) -> Option> { let old = self.data.swap(to_raw(val), AcqRel); from_raw(old) } - pub(super) fn set(&self, val: Box) { + pub(crate) fn set(&self, val: Box) { let _ = self.swap(Some(val)); } - pub(super) fn take(&self) -> Option> { + pub(crate) fn take(&self) -> Option> { self.swap(None) } } diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index df30f2b86a9..f0a79a7cca9 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -3,6 +3,9 @@ cfg_io_driver! { pub(crate) mod slab; } +#[cfg(feature = "rt")] +pub(crate) mod atomic_cell; + #[cfg(any( // io driver uses `WakeList` directly feature = "net", diff --git a/tokio/src/util/trace.rs b/tokio/src/util/trace.rs index e3c26f9d666..6080e2358ae 100644 --- a/tokio/src/util/trace.rs +++ b/tokio/src/util/trace.rs @@ -1,14 +1,18 @@ cfg_trace! { cfg_rt! { + use core::{ + pin::Pin, + task::{Context, Poll}, + }; + use pin_project_lite::pin_project; + use std::future::Future; pub(crate) use tracing::instrument::Instrumented; #[inline] - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub(crate) fn task(task: F, kind: &'static str, name: Option<&str>) -> Instrumented { use tracing::instrument::Instrument; - #[cfg(tokio_track_caller)] let location = std::panic::Location::caller(); - #[cfg(tokio_track_caller)] let span = tracing::trace_span!( target: "tokio::task", "runtime.spawn", @@ -18,23 +22,68 @@ cfg_trace! { loc.line = location.line(), loc.col = location.column(), ); - #[cfg(not(tokio_track_caller))] - let span = tracing::trace_span!( - target: "tokio::task", - "runtime.spawn", - %kind, - task.name = %name.unwrap_or_default(), - ); task.instrument(span) } + + pub(crate) fn async_op(inner: P, resource_span: tracing::Span, source: &str, poll_op_name: &'static str, inherits_child_attrs: bool) -> InstrumentedAsyncOp + where P: FnOnce() -> F { + resource_span.in_scope(|| { + let async_op_span = tracing::trace_span!("runtime.resource.async_op", source = source, inherits_child_attrs = inherits_child_attrs); + let enter = async_op_span.enter(); + let async_op_poll_span = tracing::trace_span!("runtime.resource.async_op.poll"); + let inner = inner(); + drop(enter); + let tracing_ctx = AsyncOpTracingCtx { + async_op_span, + async_op_poll_span, + resource_span: resource_span.clone(), + }; + InstrumentedAsyncOp { + inner, + tracing_ctx, + poll_op_name, + } + }) + } + + #[derive(Debug, Clone)] + pub(crate) struct AsyncOpTracingCtx { + pub(crate) async_op_span: tracing::Span, + pub(crate) async_op_poll_span: tracing::Span, + pub(crate) resource_span: tracing::Span, + } + + + pin_project! { + #[derive(Debug, Clone)] + pub(crate) struct InstrumentedAsyncOp { + #[pin] + pub(crate) inner: F, + pub(crate) tracing_ctx: AsyncOpTracingCtx, + pub(crate) poll_op_name: &'static str + } + } + + impl Future for InstrumentedAsyncOp { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let poll_op_name = &*this.poll_op_name; + let _res_enter = this.tracing_ctx.resource_span.enter(); + let _async_op_enter = this.tracing_ctx.async_op_span.enter(); + let _async_op_poll_enter = this.tracing_ctx.async_op_poll_span.enter(); + trace_poll_op!(poll_op_name, this.inner.poll(cx)) + } + } } } cfg_time! { - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub(crate) fn caller_location() -> Option<&'static std::panic::Location<'static>> { - #[cfg(all(tokio_track_caller, tokio_unstable, feature = "tracing"))] + #[cfg(all(tokio_unstable, feature = "tracing"))] return Some(std::panic::Location::caller()); - #[cfg(not(all(tokio_track_caller, tokio_unstable, feature = "tracing")))] + #[cfg(not(all(tokio_unstable, feature = "tracing")))] None } } diff --git a/tokio/tests/io_util_empty.rs b/tokio/tests/io_util_empty.rs new file mode 100644 index 00000000000..e49cd17fcd5 --- /dev/null +++ b/tokio/tests/io_util_empty.rs @@ -0,0 +1,32 @@ +#![cfg(feature = "full")] +use tokio::io::{AsyncBufReadExt, AsyncReadExt}; + +#[tokio::test] +async fn empty_read_is_cooperative() { + tokio::select! { + biased; + + _ = async { + loop { + let mut buf = [0u8; 4096]; + let _ = tokio::io::empty().read(&mut buf).await; + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} + +#[tokio::test] +async fn empty_buf_reads_are_cooperative() { + tokio::select! { + biased; + + _ = async { + loop { + let mut buf = String::new(); + let _ = tokio::io::empty().read_line(&mut buf).await; + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} diff --git a/tokio/tests/macros_test.rs b/tokio/tests/macros_test.rs index bca2c9198a0..043ee6c78f6 100644 --- a/tokio/tests/macros_test.rs +++ b/tokio/tests/macros_test.rs @@ -46,3 +46,25 @@ pub async fn issue_4175_test() -> std::io::Result<()> { return Ok(()); panic!(); } + +// https://github.com/tokio-rs/tokio/issues/4175 +pub mod clippy_semicolon_if_nothing_returned { + #![deny(clippy::semicolon_if_nothing_returned)] + + #[tokio::main] + pub async fn local() { + let _x = (); + } + #[tokio::main] + pub async fn item() { + fn _f() {} + } + #[tokio::main] + pub async fn semi() { + panic!(); + } + #[tokio::main] + pub async fn empty() { + // To trigger clippy::semicolon_if_nothing_returned lint, the block needs to contain newline. + } +} diff --git a/tokio/tests/net_types_unwind.rs b/tokio/tests/net_types_unwind.rs new file mode 100644 index 00000000000..4eb4a87fd7a --- /dev/null +++ b/tokio/tests/net_types_unwind.rs @@ -0,0 +1,32 @@ +#![warn(rust_2018_idioms)] +#![cfg(feature = "full")] + +use std::panic::{RefUnwindSafe, UnwindSafe}; + +#[test] +fn net_types_are_unwind_safe() { + is_unwind_safe::(); + is_unwind_safe::(); + is_unwind_safe::(); + is_unwind_safe::(); +} + +#[test] +#[cfg(unix)] +fn unix_net_types_are_unwind_safe() { + is_unwind_safe::(); + is_unwind_safe::(); + is_unwind_safe::(); +} + +#[test] +#[cfg(windows)] +fn windows_net_types_are_unwind_safe() { + use tokio::net::windows::named_pipe::NamedPipeClient; + use tokio::net::windows::named_pipe::NamedPipeServer; + + is_unwind_safe::(); + is_unwind_safe::(); +} + +fn is_unwind_safe() {} diff --git a/tokio/tests/rt_basic.rs b/tokio/tests/rt_basic.rs index 70056b16f01..149b3bfaad7 100644 --- a/tokio/tests/rt_basic.rs +++ b/tokio/tests/rt_basic.rs @@ -168,6 +168,35 @@ fn drop_tasks_in_context() { assert!(SUCCESS.load(Ordering::SeqCst)); } +#[test] +#[should_panic(expected = "boom")] +fn wake_in_drop_after_panic() { + let (tx, rx) = oneshot::channel::<()>(); + + struct WakeOnDrop(Option>); + + impl Drop for WakeOnDrop { + fn drop(&mut self) { + self.0.take().unwrap().send(()).unwrap(); + } + } + + let rt = rt(); + + rt.spawn(async move { + let _wake_on_drop = WakeOnDrop(Some(tx)); + // wait forever + futures::future::pending::<()>().await; + }); + + let _join = rt.spawn(async move { rx.await }); + + rt.block_on(async { + tokio::task::yield_now().await; + panic!("boom"); + }); +} + #[test] #[should_panic( expected = "A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers." diff --git a/tokio/tests/rt_handle_block_on.rs b/tokio/tests/rt_handle_block_on.rs index 17878c8d239..5c1d533a010 100644 --- a/tokio/tests/rt_handle_block_on.rs +++ b/tokio/tests/rt_handle_block_on.rs @@ -135,7 +135,7 @@ rt_test! { let contents = Handle::current() .block_on(fs::read_to_string("Cargo.toml")) .unwrap(); - assert!(contents.contains("Cargo.toml")); + assert!(contents.contains("https://tokio.rs")); } #[test] diff --git a/tokio/tests/sync_watch.rs b/tokio/tests/sync_watch.rs index b7bbaf721c1..b982324262c 100644 --- a/tokio/tests/sync_watch.rs +++ b/tokio/tests/sync_watch.rs @@ -174,17 +174,24 @@ fn poll_close() { fn borrow_and_update() { let (tx, mut rx) = watch::channel("one"); + assert!(!rx.has_changed().unwrap()); + tx.send("two").unwrap(); + assert!(rx.has_changed().unwrap()); assert_ready!(spawn(rx.changed()).poll()).unwrap(); assert_pending!(spawn(rx.changed()).poll()); + assert!(!rx.has_changed().unwrap()); tx.send("three").unwrap(); + assert!(rx.has_changed().unwrap()); assert_eq!(*rx.borrow_and_update(), "three"); assert_pending!(spawn(rx.changed()).poll()); + assert!(!rx.has_changed().unwrap()); drop(tx); assert_eq!(*rx.borrow_and_update(), "three"); assert_ready!(spawn(rx.changed()).poll()).unwrap_err(); + assert!(rx.has_changed().is_err()); } #[test] diff --git a/tokio/tests/tcp_socket.rs b/tokio/tests/tcp_socket.rs index 9258864d416..3030416502a 100644 --- a/tokio/tests/tcp_socket.rs +++ b/tokio/tests/tcp_socket.rs @@ -1,6 +1,7 @@ #![warn(rust_2018_idioms)] #![cfg(feature = "full")] +use std::time::Duration; use tokio::net::TcpSocket; use tokio_test::assert_ok; @@ -58,3 +59,16 @@ async fn bind_before_connect() { // Accept let _ = assert_ok!(srv.accept().await); } + +#[tokio::test] +async fn basic_linger() { + // Create server + let addr = assert_ok!("127.0.0.1:0".parse()); + let srv = assert_ok!(TcpSocket::new_v4()); + assert_ok!(srv.bind(addr)); + + assert!(srv.linger().unwrap().is_none()); + + srv.set_linger(Some(Duration::new(0, 0))).unwrap(); + assert_eq!(srv.linger().unwrap(), Some(Duration::new(0, 0))); +} diff --git a/tokio/tests/time_interval.rs b/tokio/tests/time_interval.rs index 5f7bf55f254..186582e2e52 100644 --- a/tokio/tests/time_interval.rs +++ b/tokio/tests/time_interval.rs @@ -166,6 +166,42 @@ async fn skip() { check_interval_poll!(i, start, 1800); } +#[tokio::test(start_paused = true)] +async fn reset() { + let start = Instant::now(); + + // This is necessary because the timer is only so granular, and in order for + // all our ticks to resolve, the time needs to be 1ms ahead of what we + // expect, so that the runtime will see that it is time to resolve the timer + time::advance(ms(1)).await; + + let mut i = task::spawn(time::interval_at(start, ms(300))); + + check_interval_poll!(i, start, 0); + + time::advance(ms(100)).await; + check_interval_poll!(i, start); + + time::advance(ms(200)).await; + check_interval_poll!(i, start, 300); + + time::advance(ms(100)).await; + check_interval_poll!(i, start); + + i.reset(); + + time::advance(ms(250)).await; + check_interval_poll!(i, start); + + time::advance(ms(50)).await; + // We add one because when using `reset` method, `Interval` adds the + // `period` from `Instant::now()`, which will always be off by one + check_interval_poll!(i, start, 701); + + time::advance(ms(300)).await; + check_interval_poll!(i, start, 1001); +} + fn poll_next(interval: &mut task::Spawn) -> Poll { interval.enter(|cx, mut interval| interval.poll_tick(cx)) } diff --git a/tokio/tests/time_timeout.rs b/tokio/tests/time_timeout.rs index dbd80eb8a6a..a1ff51e7d27 100644 --- a/tokio/tests/time_timeout.rs +++ b/tokio/tests/time_timeout.rs @@ -135,3 +135,16 @@ async fn deadline_future_elapses() { fn ms(n: u64) -> Duration { Duration::from_millis(n) } + +#[tokio::test] +async fn timeout_is_not_exhausted_by_future() { + let fut = timeout(ms(1), async { + let mut buffer = [0u8; 1]; + loop { + use tokio::io::AsyncReadExt; + let _ = tokio::io::empty().read(&mut buffer).await; + } + }); + + assert!(fut.await.is_err()); +}