From 8b6542fc3e7784ea4f73cb439038ba1d9f9933d0 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 22 Nov 2021 06:13:14 +0900 Subject: [PATCH 01/59] chore: update nix to 0.23 (#4255) --- .cargo/audit.toml | 3 +++ tokio/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.cargo/audit.toml b/.cargo/audit.toml index 4fd083d9481..d03b022ef30 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -4,4 +4,7 @@ ignore = [ # https://github.com/tokio-rs/tokio/issues/4177 "RUSTSEC-2020-0159", + # We depend on nix 0.22 only via mio-aio, a dev-dependency. + # https://github.com/tokio-rs/tokio/pull/4255#issuecomment-974786349 + "RUSTSEC-2021-0119", ] diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 348ec46576a..5b18d409543 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -110,7 +110,7 @@ signal-hook-registry = { version = "1.1.1", optional = true } [target.'cfg(unix)'.dev-dependencies] libc = { version = "0.2.42" } -nix = { version = "0.22.0" } +nix = { version = "0.23" } [target.'cfg(windows)'.dependencies.winapi] version = "0.3.8" From fe770dc509b19c5159ece7e38f353c5e30df3d6c Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 22 Nov 2021 18:40:57 +0900 Subject: [PATCH 02/59] chore: fix newly added warnings (#4253) --- .github/workflows/ci.yml | 4 +--- benches/fs.rs | 2 +- examples/tinydb.rs | 2 +- tokio-stream/src/stream_ext/collect.rs | 8 ++++---- tokio/src/io/async_fd.rs | 2 +- tokio/src/io/util/buf_reader.rs | 3 +-- tokio/src/io/util/read_exact.rs | 4 ++-- tokio/src/task/task_local.rs | 4 ++-- tokio/src/task/yield_now.rs | 1 - tokio/src/time/driver/wheel/level.rs | 2 +- 10 files changed, 14 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9fec5837e3c..4e07f18c049 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -267,8 +267,6 @@ jobs: - name: Install Rust run: rustup update stable - uses: Swatinem/rust-cache@v1 - - name: Install rustfmt - run: rustup component add rustfmt # Check fmt - name: "rustfmt --check" @@ -285,7 +283,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Install Rust - run: rustup update 1.52.1 && rustup default 1.52.1 + run: rustup update 1.56 && rustup default 1.56 - uses: Swatinem/rust-cache@v1 - name: Install clippy run: rustup component add clippy diff --git a/benches/fs.rs b/benches/fs.rs index 305668f9a54..026814ff468 100644 --- a/benches/fs.rs +++ b/benches/fs.rs @@ -21,7 +21,7 @@ fn rt() -> tokio::runtime::Runtime { const BLOCK_COUNT: usize = 1_000; const BUFFER_SIZE: usize = 4096; -const DEV_ZERO: &'static str = "/dev/zero"; +const DEV_ZERO: &str = "/dev/zero"; fn async_read_codec(b: &mut Bencher) { let rt = rt(); diff --git a/examples/tinydb.rs b/examples/tinydb.rs index 9da429ace69..5a1983df6b4 100644 --- a/examples/tinydb.rs +++ b/examples/tinydb.rs @@ -149,7 +149,7 @@ async fn main() -> Result<(), Box> { } fn handle_request(line: &str, db: &Arc) -> Response { - let request = match Request::parse(&line) { + let request = match Request::parse(line) { Ok(req) => req, Err(e) => return Response::Error { msg: e }, }; diff --git a/tokio-stream/src/stream_ext/collect.rs b/tokio-stream/src/stream_ext/collect.rs index a33a6d6692a..4b157a9aacc 100644 --- a/tokio-stream/src/stream_ext/collect.rs +++ b/tokio-stream/src/stream_ext/collect.rs @@ -66,17 +66,17 @@ where use Poll::Ready; loop { - let mut me = self.as_mut().project(); + let me = self.as_mut().project(); let item = match ready!(me.stream.poll_next(cx)) { Some(item) => item, None => { - return Ready(U::finalize(sealed::Internal, &mut me.collection)); + return Ready(U::finalize(sealed::Internal, me.collection)); } }; - if !U::extend(sealed::Internal, &mut me.collection, item) { - return Ready(U::finalize(sealed::Internal, &mut me.collection)); + if !U::extend(sealed::Internal, me.collection, item) { + return Ready(U::finalize(sealed::Internal, me.collection)); } } } diff --git a/tokio/src/io/async_fd.rs b/tokio/src/io/async_fd.rs index 9ec5b7f2387..e352843398a 100644 --- a/tokio/src/io/async_fd.rs +++ b/tokio/src/io/async_fd.rs @@ -598,7 +598,7 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyMutGuard<'a, Inner> { &mut self, f: impl FnOnce(&mut AsyncFd) -> io::Result, ) -> Result, TryIoError> { - let result = f(&mut self.async_fd); + let result = f(self.async_fd); if let Err(e) = result.as_ref() { if e.kind() == io::ErrorKind::WouldBlock { diff --git a/tokio/src/io/util/buf_reader.rs b/tokio/src/io/util/buf_reader.rs index 7df610b143a..60879c0fdc2 100644 --- a/tokio/src/io/util/buf_reader.rs +++ b/tokio/src/io/util/buf_reader.rs @@ -204,7 +204,6 @@ impl AsyncSeek for BufReader { self.as_mut() .get_pin_mut() .start_seek(SeekFrom::Current(offset))?; - self.as_mut().get_pin_mut().poll_complete(cx)? } else { // seek backwards by our remainder, and then by the offset self.as_mut() @@ -221,8 +220,8 @@ impl AsyncSeek for BufReader { self.as_mut() .get_pin_mut() .start_seek(SeekFrom::Current(n))?; - self.as_mut().get_pin_mut().poll_complete(cx)? } + self.as_mut().get_pin_mut().poll_complete(cx)? } SeekState::PendingOverflowed(n) => { if self.as_mut().get_pin_mut().poll_complete(cx)?.is_pending() { diff --git a/tokio/src/io/util/read_exact.rs b/tokio/src/io/util/read_exact.rs index 1e8150eb20f..dbdd58bae99 100644 --- a/tokio/src/io/util/read_exact.rs +++ b/tokio/src/io/util/read_exact.rs @@ -51,13 +51,13 @@ where type Output = io::Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut me = self.project(); + let me = self.project(); loop { // if our buffer is empty, then we need to read some data to continue. let rem = me.buf.remaining(); if rem != 0 { - ready!(Pin::new(&mut *me.reader).poll_read(cx, &mut me.buf))?; + ready!(Pin::new(&mut *me.reader).poll_read(cx, me.buf))?; if me.buf.remaining() == rem { return Err(eof()).into(); } diff --git a/tokio/src/task/task_local.rs b/tokio/src/task/task_local.rs index b6e7df43e18..949bbca3eee 100644 --- a/tokio/src/task/task_local.rs +++ b/tokio/src/task/task_local.rs @@ -258,14 +258,14 @@ impl TaskLocalFuture { } } - let mut project = self.project(); + let project = self.project(); let val = project.slot.take(); let prev = project.local.inner.with(|c| c.replace(val)); let _guard = Guard { prev, - slot: &mut project.slot, + slot: project.slot, local: *project.local, }; diff --git a/tokio/src/task/yield_now.rs b/tokio/src/task/yield_now.rs index 5eeb46a8983..148e3dc0c87 100644 --- a/tokio/src/task/yield_now.rs +++ b/tokio/src/task/yield_now.rs @@ -33,7 +33,6 @@ use std::task::{Context, Poll}; /// which order the runtime polls your tasks in. /// /// [`tokio::select!`]: macro@crate::select -#[must_use = "yield_now does nothing unless polled/`await`-ed"] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub async fn yield_now() { /// Yield implementation diff --git a/tokio/src/time/driver/wheel/level.rs b/tokio/src/time/driver/wheel/level.rs index 34d31766ce1..7fd2266bb8e 100644 --- a/tokio/src/time/driver/wheel/level.rs +++ b/tokio/src/time/driver/wheel/level.rs @@ -53,7 +53,7 @@ impl Level { // However, that is only supported for arrays of size // 32 or fewer. So in our case we have to explicitly // invoke the constructor for each array element. - let ctor = || EntryList::default(); + let ctor = EntryList::default; Level { level, From 8943e8aeef0b33f371d6dc69f62b38da390b5d5f Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 22 Nov 2021 18:41:17 +0900 Subject: [PATCH 03/59] macros: address remainging clippy::semicolon_if_nothing_returned warning (#4252) --- tokio-macros/src/entry.rs | 10 +++++----- tokio/tests/macros_test.rs | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/tokio-macros/src/entry.rs b/tokio-macros/src/entry.rs index 01f8ee4c1eb..5cb4a49b430 100644 --- a/tokio-macros/src/entry.rs +++ b/tokio-macros/src/entry.rs @@ -339,17 +339,17 @@ fn parse_knobs(mut input: syn::ItemFn, is_test: bool, config: FinalConfig) -> To let body = &input.block; let brace_token = input.block.brace_token; let (tail_return, tail_semicolon) = match body.stmts.last() { - Some(syn::Stmt::Semi(expr, _)) => match expr { - syn::Expr::Return(_) => (quote! { return }, quote! { ; }), - _ => match &input.sig.output { + Some(syn::Stmt::Semi(syn::Expr::Return(_), _)) => (quote! { return }, quote! { ; }), + Some(syn::Stmt::Semi(..)) | Some(syn::Stmt::Local(..)) | None => { + match &input.sig.output { syn::ReturnType::Type(_, ty) if matches!(&**ty, syn::Type::Tuple(ty) if ty.elems.is_empty()) => { (quote! {}, quote! { ; }) // unit } syn::ReturnType::Default => (quote! {}, quote! { ; }), // unit syn::ReturnType::Type(..) => (quote! {}, quote! {}), // ! or another - }, - }, + } + } _ => (quote! {}, quote! {}), }; input.block = syn::parse2(quote_spanned! {last_stmt_end_span=> diff --git a/tokio/tests/macros_test.rs b/tokio/tests/macros_test.rs index bca2c9198a0..043ee6c78f6 100644 --- a/tokio/tests/macros_test.rs +++ b/tokio/tests/macros_test.rs @@ -46,3 +46,25 @@ pub async fn issue_4175_test() -> std::io::Result<()> { return Ok(()); panic!(); } + +// https://github.com/tokio-rs/tokio/issues/4175 +pub mod clippy_semicolon_if_nothing_returned { + #![deny(clippy::semicolon_if_nothing_returned)] + + #[tokio::main] + pub async fn local() { + let _x = (); + } + #[tokio::main] + pub async fn item() { + fn _f() {} + } + #[tokio::main] + pub async fn semi() { + panic!(); + } + #[tokio::main] + pub async fn empty() { + // To trigger clippy::semicolon_if_nothing_returned lint, the block needs to contain newline. + } +} From cf3206842c0d94ecdaaeb421a58b1c963b627c3d Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 23 Nov 2021 12:09:24 +0900 Subject: [PATCH 04/59] chore: bump MSRV to 1.46 (#4254) --- .clippy.toml | 2 +- .github/workflows/ci.yml | 2 +- README.md | 2 +- tokio-macros/Cargo.toml | 1 + tokio-stream/Cargo.toml | 1 + tokio-test/Cargo.toml | 1 + tokio-util/Cargo.toml | 1 + tokio/Cargo.toml | 4 +--- tokio/README.md | 2 +- tokio/build.rs | 22 ---------------------- tokio/src/runtime/handle.rs | 18 ++++-------------- tokio/src/runtime/mod.rs | 6 +++--- tokio/src/task/blocking.rs | 2 +- tokio/src/task/builder.rs | 6 +++--- tokio/src/task/local.rs | 4 ++-- tokio/src/task/spawn.rs | 4 ++-- tokio/src/time/driver/sleep.rs | 10 ++-------- tokio/src/time/timeout.rs | 2 +- tokio/src/util/trace.rs | 17 ++++------------- 19 files changed, 31 insertions(+), 76 deletions(-) delete mode 100644 tokio/build.rs diff --git a/.clippy.toml b/.clippy.toml index 1cf14c6d01e..eb66960ac8d 100644 --- a/.clippy.toml +++ b/.clippy.toml @@ -1 +1 @@ -msrv = "1.45" +msrv = "1.46" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4e07f18c049..b3df113d220 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ env: RUSTFLAGS: -Dwarnings RUST_BACKTRACE: 1 nightly: nightly-2021-10-25 - minrust: 1.45.2 + minrust: 1.46 jobs: # Depends on all action sthat are required for a "successful" CI run. diff --git a/README.md b/README.md index 19f049cba78..1bc1850087d 100644 --- a/README.md +++ b/README.md @@ -164,7 +164,7 @@ several other libraries, including: ## Supported Rust Versions Tokio is built against the latest stable release. The minimum supported version -is 1.45. The current Tokio version is not guaranteed to build on Rust versions +is 1.46. The current Tokio version is not guaranteed to build on Rust versions earlier than the minimum supported version. ## Release schedule diff --git a/tokio-macros/Cargo.toml b/tokio-macros/Cargo.toml index 2114cd2942c..34ca4b0a368 100644 --- a/tokio-macros/Cargo.toml +++ b/tokio-macros/Cargo.toml @@ -8,6 +8,7 @@ name = "tokio-macros" # - Create "tokio-macros-1.0.x" git tag. version = "1.6.0" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" diff --git a/tokio-stream/Cargo.toml b/tokio-stream/Cargo.toml index 83f8551826c..41c1a71c32b 100644 --- a/tokio-stream/Cargo.toml +++ b/tokio-stream/Cargo.toml @@ -8,6 +8,7 @@ name = "tokio-stream" # - Create "tokio-stream-0.1.x" git tag. version = "0.1.8" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" diff --git a/tokio-test/Cargo.toml b/tokio-test/Cargo.toml index 55d5aafc557..09fdf06b5ef 100644 --- a/tokio-test/Cargo.toml +++ b/tokio-test/Cargo.toml @@ -8,6 +8,7 @@ name = "tokio-test" # - Create "tokio-test-0.4.x" git tag. version = "0.4.2" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 676b0e2ec94..2bf6ac9a049 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -8,6 +8,7 @@ name = "tokio-util" # - Create "tokio-util-0.6.x" git tag. version = "0.6.9" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 5b18d409543..0a5cfea0389 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -9,6 +9,7 @@ name = "tokio" # - Create "v1.0.x" git tag. version = "1.14.0" edition = "2018" +rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" readme = "README.md" @@ -137,9 +138,6 @@ mio-aio = { version = "0.6.0", features = ["tokio"] } [target.'cfg(loom)'.dev-dependencies] loom = { version = "0.5", features = ["futures", "checkpoint"] } -[build-dependencies] -autocfg = "1" # Needed for conditionally enabling `track-caller` - [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] diff --git a/tokio/README.md b/tokio/README.md index 19f049cba78..1bc1850087d 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -164,7 +164,7 @@ several other libraries, including: ## Supported Rust Versions Tokio is built against the latest stable release. The minimum supported version -is 1.45. The current Tokio version is not guaranteed to build on Rust versions +is 1.46. The current Tokio version is not guaranteed to build on Rust versions earlier than the minimum supported version. ## Release schedule diff --git a/tokio/build.rs b/tokio/build.rs deleted file mode 100644 index fe5c8300560..00000000000 --- a/tokio/build.rs +++ /dev/null @@ -1,22 +0,0 @@ -use autocfg::AutoCfg; - -fn main() { - match AutoCfg::new() { - Ok(ac) => { - // The #[track_caller] attribute was stabilized in rustc 1.46.0. - if ac.probe_rustc_version(1, 46) { - autocfg::emit("tokio_track_caller") - } - } - - Err(e) => { - // If we couldn't detect the compiler version and features, just - // print a warning. This isn't a fatal error: we can still build - // Tokio, we just can't enable cfgs automatically. - println!( - "cargo:warning=tokio: failed to detect compiler features: {}", - e - ); - } - } -} diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index cd1cb760a3b..ba9a9eaf7c3 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -157,7 +157,7 @@ impl Handle { /// }); /// # } /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn(&self, future: F) -> JoinHandle where F: Future + Send + 'static, @@ -187,7 +187,7 @@ impl Handle { /// println!("now running on a worker thread"); /// }); /// # } - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_blocking(&self, func: F) -> JoinHandle where F: FnOnce() -> R + Send + 'static, @@ -200,7 +200,7 @@ impl Handle { } } - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub(crate) fn spawn_blocking_inner(&self, func: F, name: Option<&str>) -> JoinHandle where F: FnOnce() -> R + Send + 'static, @@ -211,9 +211,7 @@ impl Handle { #[cfg(all(tokio_unstable, feature = "tracing"))] let fut = { use tracing::Instrument; - #[cfg(tokio_track_caller)] let location = std::panic::Location::caller(); - #[cfg(tokio_track_caller)] let span = tracing::trace_span!( target: "tokio::task::blocking", "runtime.spawn", @@ -222,14 +220,6 @@ impl Handle { "fn" = %std::any::type_name::(), spawn.location = %format_args!("{}:{}:{}", location.file(), location.line(), location.column()), ); - #[cfg(not(tokio_track_caller))] - let span = tracing::trace_span!( - target: "tokio::task::blocking", - "runtime.spawn", - kind = %"blocking", - task.name = %name.unwrap_or_default(), - "fn" = %std::any::type_name::(), - ); fut.instrument(span) }; @@ -311,7 +301,7 @@ impl Handle { /// [`tokio::fs`]: crate::fs /// [`tokio::net`]: crate::net /// [`tokio::time`]: crate::time - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn block_on(&self, future: F) -> F::Output { #[cfg(all(tokio_unstable, feature = "tracing"))] let future = crate::util::trace::task(future, "block_on", None); diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index 96bb47c1ded..a4fa8924208 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -375,7 +375,7 @@ cfg_rt! { /// }); /// # } /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn(&self, future: F) -> JoinHandle where F: Future + Send + 'static, @@ -400,7 +400,7 @@ cfg_rt! { /// println!("now running on a worker thread"); /// }); /// # } - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_blocking(&self, func: F) -> JoinHandle where F: FnOnce() -> R + Send + 'static, @@ -450,7 +450,7 @@ cfg_rt! { /// ``` /// /// [handle]: fn@Handle::block_on - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn block_on(&self, future: F) -> F::Output { #[cfg(all(tokio_unstable, feature = "tracing"))] let future = crate::util::trace::task(future, "block_on", None); diff --git a/tokio/src/task/blocking.rs b/tokio/src/task/blocking.rs index 825f25f8c66..5fe358f3e50 100644 --- a/tokio/src/task/blocking.rs +++ b/tokio/src/task/blocking.rs @@ -188,7 +188,7 @@ cfg_rt! { /// worker.await.unwrap(); /// # } /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_blocking(f: F) -> JoinHandle where F: FnOnce() -> R + Send + 'static, diff --git a/tokio/src/task/builder.rs b/tokio/src/task/builder.rs index f991fc65e2a..dae334928e7 100644 --- a/tokio/src/task/builder.rs +++ b/tokio/src/task/builder.rs @@ -65,7 +65,7 @@ impl<'a> Builder<'a> { /// /// See [`task::spawn`](crate::task::spawn) for /// more details. - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn(self, future: Fut) -> JoinHandle where Fut: Future + Send + 'static, @@ -78,7 +78,7 @@ impl<'a> Builder<'a> { /// /// See [`task::spawn_local`](crate::task::spawn_local) /// for more details. - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_local(self, future: Fut) -> JoinHandle where Fut: Future + 'static, @@ -91,7 +91,7 @@ impl<'a> Builder<'a> { /// /// See [`task::spawn_blocking`](crate::task::spawn_blocking) /// for more details. - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_blocking(self, function: Function) -> JoinHandle where Function: FnOnce() -> Output + Send + 'static, diff --git a/tokio/src/task/local.rs b/tokio/src/task/local.rs index 4a5d313c6e4..1beee6891ba 100644 --- a/tokio/src/task/local.rs +++ b/tokio/src/task/local.rs @@ -286,7 +286,7 @@ cfg_rt! { /// }).await; /// } /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_local(future: F) -> JoinHandle where F: Future + 'static, @@ -377,7 +377,7 @@ impl LocalSet { /// } /// ``` /// [`spawn_local`]: fn@spawn_local - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn_local(&self, future: F) -> JoinHandle where F: Future + 'static, diff --git a/tokio/src/task/spawn.rs b/tokio/src/task/spawn.rs index 065d38f54b5..a9d736674c0 100644 --- a/tokio/src/task/spawn.rs +++ b/tokio/src/task/spawn.rs @@ -121,7 +121,7 @@ cfg_rt! { /// ```text /// error[E0391]: cycle detected when processing `main` /// ``` - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub fn spawn(future: T) -> JoinHandle where T: Future + Send + 'static, @@ -136,7 +136,7 @@ cfg_rt! { } } - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub(super) fn spawn_inner(future: T, name: Option<&str>) -> JoinHandle where T: Future + Send + 'static, diff --git a/tokio/src/time/driver/sleep.rs b/tokio/src/time/driver/sleep.rs index 43ff694ffc6..7a2327be6db 100644 --- a/tokio/src/time/driver/sleep.rs +++ b/tokio/src/time/driver/sleep.rs @@ -45,7 +45,7 @@ cfg_trace! { /// [`interval`]: crate::time::interval() // Alias for old name in 0.x #[cfg_attr(docsrs, doc(alias = "delay_until"))] -#[cfg_attr(tokio_track_caller, track_caller)] +#[track_caller] pub fn sleep_until(deadline: Instant) -> Sleep { return Sleep::new_timeout(deadline, trace::caller_location()); } @@ -89,7 +89,7 @@ pub fn sleep_until(deadline: Instant) -> Sleep { // Alias for old name in 0.x #[cfg_attr(docsrs, doc(alias = "delay_for"))] #[cfg_attr(docsrs, doc(alias = "wait"))] -#[cfg_attr(tokio_track_caller, track_caller)] +#[track_caller] pub fn sleep(duration: Duration) -> Sleep { let location = trace::caller_location(); @@ -232,10 +232,8 @@ impl Sleep { let deadline_tick = time_source.deadline_to_tick(deadline); let duration = deadline_tick.checked_sub(time_source.now()).unwrap_or(0); - #[cfg(tokio_track_caller)] let location = location.expect("should have location if tracking caller"); - #[cfg(tokio_track_caller)] let resource_span = tracing::trace_span!( "runtime.resource", concrete_type = "Sleep", @@ -245,10 +243,6 @@ impl Sleep { loc.col = location.column(), ); - #[cfg(not(tokio_track_caller))] - let resource_span = - tracing::trace_span!("runtime.resource", concrete_type = "Sleep", kind = "timer"); - let async_op_span = tracing::trace_span!("runtime.resource.async_op", source = "Sleep::new_timeout"); diff --git a/tokio/src/time/timeout.rs b/tokio/src/time/timeout.rs index 6725caa09f8..cf90540bfd5 100644 --- a/tokio/src/time/timeout.rs +++ b/tokio/src/time/timeout.rs @@ -48,7 +48,7 @@ use std::task::{self, Poll}; /// } /// # } /// ``` -#[cfg_attr(tokio_track_caller, track_caller)] +#[track_caller] pub fn timeout(duration: Duration, future: T) -> Timeout where T: Future, diff --git a/tokio/src/util/trace.rs b/tokio/src/util/trace.rs index e3c26f9d666..74ae739354b 100644 --- a/tokio/src/util/trace.rs +++ b/tokio/src/util/trace.rs @@ -3,12 +3,10 @@ cfg_trace! { pub(crate) use tracing::instrument::Instrumented; #[inline] - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub(crate) fn task(task: F, kind: &'static str, name: Option<&str>) -> Instrumented { use tracing::instrument::Instrument; - #[cfg(tokio_track_caller)] let location = std::panic::Location::caller(); - #[cfg(tokio_track_caller)] let span = tracing::trace_span!( target: "tokio::task", "runtime.spawn", @@ -18,23 +16,16 @@ cfg_trace! { loc.line = location.line(), loc.col = location.column(), ); - #[cfg(not(tokio_track_caller))] - let span = tracing::trace_span!( - target: "tokio::task", - "runtime.spawn", - %kind, - task.name = %name.unwrap_or_default(), - ); task.instrument(span) } } } cfg_time! { - #[cfg_attr(tokio_track_caller, track_caller)] + #[track_caller] pub(crate) fn caller_location() -> Option<&'static std::panic::Location<'static>> { - #[cfg(all(tokio_track_caller, tokio_unstable, feature = "tracing"))] + #[cfg(all(tokio_unstable, feature = "tracing"))] return Some(std::panic::Location::caller()); - #[cfg(not(all(tokio_track_caller, tokio_unstable, feature = "tracing")))] + #[cfg(not(all(tokio_unstable, feature = "tracing")))] None } } From a8b662f643b22a2d5cecb56ea67e2a1d202323ea Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 23 Nov 2021 19:29:57 +0900 Subject: [PATCH 05/59] ci: upgrade to new nightly (#4268) --- .cirrus.yml | 2 +- .github/workflows/ci.yml | 2 +- tokio-macros/src/lib.rs | 1 - tokio-stream/src/lib.rs | 1 - tokio-test/src/lib.rs | 1 - tokio-util/src/lib.rs | 1 - tokio/src/lib.rs | 5 ----- tokio/src/macros/cfg.rs | 1 - 8 files changed, 2 insertions(+), 12 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index 4bef869c24f..73d77abfa1d 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -29,7 +29,7 @@ task: setup_script: - pkg install -y bash curl - curl https://sh.rustup.rs -sSf --output rustup.sh - - sh rustup.sh -y --profile minimal --default-toolchain nightly-2021-10-25 + - sh rustup.sh -y --profile minimal --default-toolchain nightly-2021-11-23 - . $HOME/.cargo/env - | echo "~~~~ rustc --version ~~~~" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b3df113d220..4474e9667bd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,7 @@ name: CI env: RUSTFLAGS: -Dwarnings RUST_BACKTRACE: 1 - nightly: nightly-2021-10-25 + nightly: nightly-2021-11-23 minrust: 1.46 jobs: diff --git a/tokio-macros/src/lib.rs b/tokio-macros/src/lib.rs index f8ba8eab18a..38638a1df8a 100644 --- a/tokio-macros/src/lib.rs +++ b/tokio-macros/src/lib.rs @@ -5,7 +5,6 @@ rust_2018_idioms, unreachable_pub )] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) diff --git a/tokio-stream/src/lib.rs b/tokio-stream/src/lib.rs index b7f232fdadc..f600ccb8d36 100644 --- a/tokio-stream/src/lib.rs +++ b/tokio-stream/src/lib.rs @@ -10,7 +10,6 @@ unreachable_pub )] #![cfg_attr(docsrs, feature(doc_cfg))] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) diff --git a/tokio-test/src/lib.rs b/tokio-test/src/lib.rs index 7bba3eedac8..de3f0864a94 100644 --- a/tokio-test/src/lib.rs +++ b/tokio-test/src/lib.rs @@ -4,7 +4,6 @@ rust_2018_idioms, unreachable_pub )] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) diff --git a/tokio-util/src/lib.rs b/tokio-util/src/lib.rs index 0b3e5962343..3786a4002db 100644 --- a/tokio-util/src/lib.rs +++ b/tokio-util/src/lib.rs @@ -5,7 +5,6 @@ rust_2018_idioms, unreachable_pub )] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 9821c1a62f5..22f1ece3d8b 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -10,16 +10,11 @@ unreachable_pub )] #![deny(unused_must_use)] -#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] #![cfg_attr(docsrs, feature(doc_cfg))] -#![cfg_attr(docsrs, feature(doc_cfg_hide))] -#![cfg_attr(docsrs, doc(cfg_hide(docsrs)))] -#![cfg_attr(docsrs, doc(cfg_hide(loom)))] -#![cfg_attr(docsrs, doc(cfg_hide(not(loom))))] #![cfg_attr(docsrs, allow(unused_attributes))] //! A runtime for writing reliable network applications without compromising speed. diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index 606bce7689d..3afc0402374 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -99,7 +99,6 @@ macro_rules! cfg_io_driver_impl { feature = "process", all(unix, feature = "signal"), ))] - #[cfg_attr(docsrs, doc(cfg(all())))] $item )* } From 1a423b3322bd32afc761a2a39fc986909aab633d Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 23 Nov 2021 19:53:32 +0900 Subject: [PATCH 06/59] chore: remove doc URL from Cargo.toml (#4251) https://doc.rust-lang.org/cargo/reference/manifest.html#the-documentation-field > If no URL is specified in the manifest file, crates.io will > automatically link your crate to the corresponding docs.rs page. --- tokio-macros/Cargo.toml | 3 --- tokio-stream/Cargo.toml | 3 --- tokio-test/Cargo.toml | 3 --- tokio-util/Cargo.toml | 3 --- tokio/Cargo.toml | 2 -- tokio/tests/rt_handle_block_on.rs | 2 +- 6 files changed, 1 insertion(+), 15 deletions(-) diff --git a/tokio-macros/Cargo.toml b/tokio-macros/Cargo.toml index 34ca4b0a368..e96ba20c921 100644 --- a/tokio-macros/Cargo.toml +++ b/tokio-macros/Cargo.toml @@ -2,8 +2,6 @@ name = "tokio-macros" # When releasing to crates.io: # - Remove path dependencies -# - Update doc url -# - Cargo.toml # - Update CHANGELOG.md. # - Create "tokio-macros-1.0.x" git tag. version = "1.6.0" @@ -13,7 +11,6 @@ authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio-macros/1.6.0/tokio_macros" description = """ Tokio's proc macros. """ diff --git a/tokio-stream/Cargo.toml b/tokio-stream/Cargo.toml index 41c1a71c32b..5b2535a3371 100644 --- a/tokio-stream/Cargo.toml +++ b/tokio-stream/Cargo.toml @@ -2,8 +2,6 @@ name = "tokio-stream" # When releasing to crates.io: # - Remove path dependencies -# - Update doc url -# - Cargo.toml # - Update CHANGELOG.md. # - Create "tokio-stream-0.1.x" git tag. version = "0.1.8" @@ -13,7 +11,6 @@ authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio-stream/0.1.8/tokio_stream" description = """ Utilities to work with `Stream` and `tokio`. """ diff --git a/tokio-test/Cargo.toml b/tokio-test/Cargo.toml index 09fdf06b5ef..59af228daae 100644 --- a/tokio-test/Cargo.toml +++ b/tokio-test/Cargo.toml @@ -2,8 +2,6 @@ name = "tokio-test" # When releasing to crates.io: # - Remove path dependencies -# - Update doc url -# - Cargo.toml # - Update CHANGELOG.md. # - Create "tokio-test-0.4.x" git tag. version = "0.4.2" @@ -13,7 +11,6 @@ authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio-test/0.4.2/tokio_test" description = """ Testing utilities for Tokio- and futures-based code """ diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 2bf6ac9a049..85df32c2655 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -2,8 +2,6 @@ name = "tokio-util" # When releasing to crates.io: # - Remove path dependencies -# - Update doc url -# - Cargo.toml # - Update CHANGELOG.md. # - Create "tokio-util-0.6.x" git tag. version = "0.6.9" @@ -13,7 +11,6 @@ authors = ["Tokio Contributors "] license = "MIT" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio-util/0.6.9/tokio_util" description = """ Additional utilities for working with Tokio. """ diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 0a5cfea0389..ee3a2260b29 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -3,7 +3,6 @@ name = "tokio" # When releasing to crates.io: # - Remove path dependencies # - Update doc url -# - Cargo.toml # - README.md # - Update CHANGELOG.md. # - Create "v1.0.x" git tag. @@ -13,7 +12,6 @@ rust-version = "1.46" authors = ["Tokio Contributors "] license = "MIT" readme = "README.md" -documentation = "https://docs.rs/tokio/1.14.0/tokio/" repository = "https://github.com/tokio-rs/tokio" homepage = "https://tokio.rs" description = """ diff --git a/tokio/tests/rt_handle_block_on.rs b/tokio/tests/rt_handle_block_on.rs index 17878c8d239..5c1d533a010 100644 --- a/tokio/tests/rt_handle_block_on.rs +++ b/tokio/tests/rt_handle_block_on.rs @@ -135,7 +135,7 @@ rt_test! { let contents = Handle::current() .block_on(fs::read_to_string("Cargo.toml")) .unwrap(); - assert!(contents.contains("Cargo.toml")); + assert!(contents.contains("https://tokio.rs")); } #[test] From 3b339024f0f7213f0b0cf59b119b7e1bc99de2e9 Mon Sep 17 00:00:00 2001 From: David Pedersen Date: Tue, 23 Nov 2021 11:54:06 +0100 Subject: [PATCH 07/59] stream: impl Extend for StreamMap (#4272) ## Motivation This allows `StreamMap` to be used with [`futures::stream::StreamExt::collect`][collect]. My use case is something like this: ```rust let stream_map: StreamMap<_, _> = things .into_iter() .map(|thing| make_stream(thing)) // iterator of futures .collect::>() // stream of streams .collect::>() // combine all the inner streams into one .await; async fn make_stream(thing: Thing) -> impl Stream { ... } ``` [collect]: https://docs.rs/futures/0.3.17/futures/stream/trait.StreamExt.html#method.collect ## Solution Add `Extend` impl that delegates to the inner `Vec`. --- tokio-stream/src/stream_map.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tokio-stream/src/stream_map.rs b/tokio-stream/src/stream_map.rs index 80a521ee17a..215980474b1 100644 --- a/tokio-stream/src/stream_map.rs +++ b/tokio-stream/src/stream_map.rs @@ -585,6 +585,15 @@ where } } +impl Extend<(K, V)> for StreamMap { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + self.entries.extend(iter); + } +} + mod rand { use std::cell::Cell; From 2a614fba0d6af9660912cffa5615b464bad224a7 Mon Sep 17 00:00:00 2001 From: omjadas Date: Tue, 23 Nov 2021 22:11:42 +1100 Subject: [PATCH 08/59] docs: document that parking_lot is enabled by full (#4269) --- tokio/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 22f1ece3d8b..35295d837a6 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -307,7 +307,7 @@ //! Beware though that this will pull in many extra dependencies that you may not //! need. //! -//! - `full`: Enables all Tokio public API features listed below except `test-util`. +//! - `full`: Enables all features listed below except `test-util` and `tracing`. //! - `rt`: Enables `tokio::spawn`, the basic (current thread) scheduler, //! and non-scheduler utilities. //! - `rt-multi-thread`: Enables the heavier, multi-threaded, work-stealing scheduler. From 2c0e5c97049cbd527754477709839fb1f52ed282 Mon Sep 17 00:00:00 2001 From: Shin Seunghun <36041278+seunghunee@users.noreply.github.com> Date: Tue, 23 Nov 2021 20:14:08 +0900 Subject: [PATCH 09/59] time: document missing timer panics (#4247) --- tokio/src/time/driver/handle.rs | 28 ++++++++++++++++----------- tokio/src/time/driver/sleep.rs | 34 +++++++++++++++++++++++++++++++++ tokio/src/time/timeout.rs | 18 +++++++++++++++++ 3 files changed, 69 insertions(+), 11 deletions(-) diff --git a/tokio/src/time/driver/handle.rs b/tokio/src/time/driver/handle.rs index 7aaf65a79e4..b61c0476e15 100644 --- a/tokio/src/time/driver/handle.rs +++ b/tokio/src/time/driver/handle.rs @@ -40,16 +40,19 @@ cfg_rt! { /// /// This function panics if there is no current timer set. /// - /// It can be triggered when `Builder::enable_time()` or - /// `Builder::enable_all()` are not included in the builder. + /// It can be triggered when [`Builder::enable_time`] or + /// [`Builder::enable_all`] are not included in the builder. /// /// It can also panic whenever a timer is created outside of a - /// Tokio runtime. That is why `rt.block_on(delay_for(...))` will panic, + /// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, /// since the function is executed outside of the runtime. - /// Whereas `rt.block_on(async {delay_for(...).await})` doesn't panic. + /// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. /// And this is because wrapping the function on an async makes it lazy, /// and so gets executed inside the runtime successfully without /// panicking. + /// + /// [`Builder::enable_time`]: crate::runtime::Builder::enable_time + /// [`Builder::enable_all`]: crate::runtime::Builder::enable_all pub(crate) fn current() -> Self { crate::runtime::context::time_handle() .expect("A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers.") @@ -65,16 +68,19 @@ cfg_not_rt! { /// /// This function panics if there is no current timer set. /// - /// It can be triggered when `Builder::enable_time()` or - /// `Builder::enable_all()` are not included in the builder. + /// It can be triggered when [`Builder::enable_time`] or + /// [`Builder::enable_all`] are not included in the builder. /// - /// It can also panic whenever a timer is created outside of a Tokio - /// runtime. That is why `rt.block_on(delay_for(...))` will panic, + /// It can also panic whenever a timer is created outside of a + /// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, /// since the function is executed outside of the runtime. - /// Whereas `rt.block_on(async {delay_for(...).await})` doesn't - /// panic. And this is because wrapping the function on an async makes it - /// lazy, and so outside executed inside the runtime successfully without + /// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. + /// And this is because wrapping the function on an async makes it lazy, + /// and so gets executed inside the runtime successfully without /// panicking. + /// + /// [`Builder::enable_time`]: crate::runtime::Builder::enable_time + /// [`Builder::enable_all`]: crate::runtime::Builder::enable_all pub(crate) fn current() -> Self { panic!("{}", crate::util::error::CONTEXT_MISSING_ERROR) } diff --git a/tokio/src/time/driver/sleep.rs b/tokio/src/time/driver/sleep.rs index 7a2327be6db..d10639d191c 100644 --- a/tokio/src/time/driver/sleep.rs +++ b/tokio/src/time/driver/sleep.rs @@ -41,8 +41,25 @@ cfg_trace! { /// /// See the documentation for the [`Sleep`] type for more examples. /// +/// # Panics +/// +/// This function panics if there is no current timer set. +/// +/// It can be triggered when [`Builder::enable_time`] or +/// [`Builder::enable_all`] are not included in the builder. +/// +/// It can also panic whenever a timer is created outside of a +/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, +/// since the function is executed outside of the runtime. +/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. +/// And this is because wrapping the function on an async makes it lazy, +/// and so gets executed inside the runtime successfully without +/// panicking. +/// /// [`Sleep`]: struct@crate::time::Sleep /// [`interval`]: crate::time::interval() +/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time +/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all // Alias for old name in 0.x #[cfg_attr(docsrs, doc(alias = "delay_until"))] #[track_caller] @@ -84,8 +101,25 @@ pub fn sleep_until(deadline: Instant) -> Sleep { /// /// See the documentation for the [`Sleep`] type for more examples. /// +/// # Panics +/// +/// This function panics if there is no current timer set. +/// +/// It can be triggered when [`Builder::enable_time`] or +/// [`Builder::enable_all`] are not included in the builder. +/// +/// It can also panic whenever a timer is created outside of a +/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, +/// since the function is executed outside of the runtime. +/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. +/// And this is because wrapping the function on an async makes it lazy, +/// and so gets executed inside the runtime successfully without +/// panicking. +/// /// [`Sleep`]: struct@crate::time::Sleep /// [`interval`]: crate::time::interval() +/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time +/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all // Alias for old name in 0.x #[cfg_attr(docsrs, doc(alias = "delay_for"))] #[cfg_attr(docsrs, doc(alias = "wait"))] diff --git a/tokio/src/time/timeout.rs b/tokio/src/time/timeout.rs index cf90540bfd5..cc299161633 100644 --- a/tokio/src/time/timeout.rs +++ b/tokio/src/time/timeout.rs @@ -48,6 +48,24 @@ use std::task::{self, Poll}; /// } /// # } /// ``` +/// +/// # Panics +/// +/// This function panics if there is no current timer set. +/// +/// It can be triggered when [`Builder::enable_time`] or +/// [`Builder::enable_all`] are not included in the builder. +/// +/// It can also panic whenever a timer is created outside of a +/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, +/// since the function is executed outside of the runtime. +/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. +/// And this is because wrapping the function on an async makes it lazy, +/// and so gets executed inside the runtime successfully without +/// panicking. +/// +/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time +/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all #[track_caller] pub fn timeout(duration: Duration, future: T) -> Timeout where From 347c0cdaba27eea8e54b034db4195fa71652c5a1 Mon Sep 17 00:00:00 2001 From: oblique Date: Tue, 23 Nov 2021 14:51:07 +0200 Subject: [PATCH 10/59] time: add `Interval::reset` method (#4248) --- tokio/src/time/interval.rs | 30 ++++++++++++++++++++++++++++++ tokio/tests/time_interval.rs | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/tokio/src/time/interval.rs b/tokio/src/time/interval.rs index 7e07e51267e..6a89943741e 100644 --- a/tokio/src/time/interval.rs +++ b/tokio/src/time/interval.rs @@ -435,6 +435,36 @@ impl Interval { Poll::Ready(timeout) } + /// Resets the interval to complete one period after the current time. + /// + /// This method ignores [`MissedTickBehavior`] strategy. + /// + /// # Examples + /// + /// ``` + /// use tokio::time; + /// + /// use std::time::Duration; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut interval = time::interval(Duration::from_millis(100)); + /// + /// interval.tick().await; + /// + /// time::sleep(Duration::from_millis(50)).await; + /// interval.reset(); + /// + /// interval.tick().await; + /// interval.tick().await; + /// + /// // approximately 250ms have elapsed. + /// } + /// ``` + pub fn reset(&mut self) { + self.delay.as_mut().reset(Instant::now() + self.period); + } + /// Returns the [`MissedTickBehavior`] strategy currently being used. pub fn missed_tick_behavior(&self) -> MissedTickBehavior { self.missed_tick_behavior diff --git a/tokio/tests/time_interval.rs b/tokio/tests/time_interval.rs index 5f7bf55f254..186582e2e52 100644 --- a/tokio/tests/time_interval.rs +++ b/tokio/tests/time_interval.rs @@ -166,6 +166,42 @@ async fn skip() { check_interval_poll!(i, start, 1800); } +#[tokio::test(start_paused = true)] +async fn reset() { + let start = Instant::now(); + + // This is necessary because the timer is only so granular, and in order for + // all our ticks to resolve, the time needs to be 1ms ahead of what we + // expect, so that the runtime will see that it is time to resolve the timer + time::advance(ms(1)).await; + + let mut i = task::spawn(time::interval_at(start, ms(300))); + + check_interval_poll!(i, start, 0); + + time::advance(ms(100)).await; + check_interval_poll!(i, start); + + time::advance(ms(200)).await; + check_interval_poll!(i, start, 300); + + time::advance(ms(100)).await; + check_interval_poll!(i, start); + + i.reset(); + + time::advance(ms(250)).await; + check_interval_poll!(i, start); + + time::advance(ms(50)).await; + // We add one because when using `reset` method, `Interval` adds the + // `period` from `Instant::now()`, which will always be off by one + check_interval_poll!(i, start, 701); + + time::advance(ms(300)).await; + check_interval_poll!(i, start, 1001); +} + fn poll_next(interval: &mut task::Spawn) -> Poll { interval.enter(|cx, mut interval| interval.poll_tick(cx)) } From a77b2fbab25f947942acd3fd1e9c97ec43814822 Mon Sep 17 00:00:00 2001 From: Axel Forsman Date: Tue, 23 Nov 2021 13:56:31 +0100 Subject: [PATCH 11/59] io: extend `AsyncFdReadyGuard` method lifetimes (#4267) The implicit elided lifetimes of the `AsyncFd` references in return types of methods on `AsyncFdReadyGuard` resolved to that of `&self`. However that lifetime is smaller than `'a` since `self` contains an `&'a AsyncFd` reference. This will not change so the change also does not lessen future proofing. --- tokio/src/io/async_fd.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tokio/src/io/async_fd.rs b/tokio/src/io/async_fd.rs index e352843398a..3d5bc632502 100644 --- a/tokio/src/io/async_fd.rs +++ b/tokio/src/io/async_fd.rs @@ -525,7 +525,7 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> { #[cfg_attr(docsrs, doc(alias = "with_io"))] pub fn try_io( &mut self, - f: impl FnOnce(&AsyncFd) -> io::Result, + f: impl FnOnce(&'a AsyncFd) -> io::Result, ) -> Result, TryIoError> { let result = f(self.async_fd); @@ -542,12 +542,12 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> { } /// Returns a shared reference to the inner [`AsyncFd`]. - pub fn get_ref(&self) -> &AsyncFd { + pub fn get_ref(&self) -> &'a AsyncFd { self.async_fd } /// Returns a shared reference to the backing object of the inner [`AsyncFd`]. - pub fn get_inner(&self) -> &Inner { + pub fn get_inner(&self) -> &'a Inner { self.get_ref().get_ref() } } From 65fb0210d587254cb9a81b342decbffaf6e54c4a Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 24 Nov 2021 09:18:50 +0100 Subject: [PATCH 12/59] tokio: add 1.14.x to LTS releases (#4273) --- README.md | 1 + tokio/README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index 1bc1850087d..5e226ff7116 100644 --- a/README.md +++ b/README.md @@ -181,6 +181,7 @@ released as a new patch release for each LTS minor version. Our current LTS releases are: * `1.8.x` - LTS release until February 2022. + * `1.14.x` - LTS release until June 2022. Each LTS release will continue to receive backported fixes for at least half a year. If you wish to use a fixed minor release in your project, we recommend diff --git a/tokio/README.md b/tokio/README.md index 1bc1850087d..5e226ff7116 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -181,6 +181,7 @@ released as a new patch release for each LTS minor version. Our current LTS releases are: * `1.8.x` - LTS release until February 2022. + * `1.14.x` - LTS release until June 2022. Each LTS release will continue to receive backported fixes for at least half a year. If you wish to use a fixed minor release in your project, we recommend From d764ba5816ac0c05bc6fe006bcc249a08c8fee42 Mon Sep 17 00:00:00 2001 From: kenmasu Date: Thu, 2 Dec 2021 19:01:23 +0900 Subject: [PATCH 13/59] io: call `tcp.set_nonblocking(true)` in `AsyncFd` example. (#4292) --- tokio/src/io/async_fd.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio/src/io/async_fd.rs b/tokio/src/io/async_fd.rs index 3d5bc632502..93f9cb458a7 100644 --- a/tokio/src/io/async_fd.rs +++ b/tokio/src/io/async_fd.rs @@ -81,6 +81,7 @@ use std::{task::Context, task::Poll}; /// /// impl AsyncTcpStream { /// pub fn new(tcp: TcpStream) -> io::Result { +/// tcp.set_nonblocking(true)?; /// Ok(Self { /// inner: AsyncFd::new(tcp)?, /// }) From 64da914d1798190bbfc82c2f8548bba5a5098538 Mon Sep 17 00:00:00 2001 From: Shin Seunghun <36041278+seunghunee@users.noreply.github.com> Date: Thu, 2 Dec 2021 23:27:46 +0900 Subject: [PATCH 14/59] time: add doc links in entry doc (#4293) --- tokio/src/time/driver/entry.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tokio/src/time/driver/entry.rs b/tokio/src/time/driver/entry.rs index 9e9f0dc8592..1beee57604b 100644 --- a/tokio/src/time/driver/entry.rs +++ b/tokio/src/time/driver/entry.rs @@ -5,9 +5,9 @@ //! //! # Ground rules //! -//! The heart of the timer implementation here is the `TimerShared` structure, -//! shared between the `TimerEntry` and the driver. Generally, we permit access -//! to `TimerShared` ONLY via either 1) a mutable reference to `TimerEntry` or +//! The heart of the timer implementation here is the [`TimerShared`] structure, +//! shared between the [`TimerEntry`] and the driver. Generally, we permit access +//! to [`TimerShared`] ONLY via either 1) a mutable reference to [`TimerEntry`] or //! 2) a held driver lock. //! //! It follows from this that any changes made while holding BOTH 1 and 2 will @@ -49,8 +49,10 @@ //! There is of course a race condition between timer reset and timer //! expiration. If the driver fails to observe the updated expiration time, it //! could trigger expiration of the timer too early. However, because -//! `mark_pending` performs a compare-and-swap, it will identify this race and +//! [`mark_pending`][mark_pending] performs a compare-and-swap, it will identify this race and //! refuse to mark the timer as pending. +//! +//! [mark_pending]: TimerHandle::mark_pending use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::AtomicU64; From ee4b2ede83c661715c054d3cda170994a499c39f Mon Sep 17 00:00:00 2001 From: Ivan Petkov Date: Thu, 2 Dec 2021 23:51:27 -0800 Subject: [PATCH 15/59] process: add `as_std()` method to `Command` (#4295) --- tokio/src/process/mod.rs | 6 ++++++ tokio/src/runtime/handle.rs | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/tokio/src/process/mod.rs b/tokio/src/process/mod.rs index 6eeefdbef71..8a0d9db25fd 100644 --- a/tokio/src/process/mod.rs +++ b/tokio/src/process/mod.rs @@ -264,6 +264,12 @@ impl Command { Self::from(StdCommand::new(program)) } + /// Cheaply convert to a `&std::process::Command` for places where the type from the standard + /// library is expected. + pub fn as_std(&self) -> &StdCommand { + &self.std + } + /// Adds an argument to pass to the program. /// /// Only one argument can be passed per use. So instead of: diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index ba9a9eaf7c3..612205cccfa 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -26,7 +26,11 @@ pub struct Handle { /// Handles to the signal drivers #[cfg_attr( - not(any(feature = "signal", all(unix, feature = "process"))), + any( + loom, + not(all(unix, feature = "signal")), + not(all(unix, feature = "process")), + ), allow(dead_code) )] pub(super) signal_handle: driver::SignalHandle, From f73ed1fdbaa0febd1f89b64603cd171865b26afc Mon Sep 17 00:00:00 2001 From: Shin Seunghun <36041278+seunghunee@users.noreply.github.com> Date: Tue, 7 Dec 2021 19:07:00 +0900 Subject: [PATCH 16/59] runtime: fix typo (#4303) --- tokio/src/runtime/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index a4fa8924208..847dd5972e1 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -582,7 +582,7 @@ cfg_rt! { match self::context::try_enter(self.handle.clone()) { Some(guard) => basic.set_context_guard(guard), None => { - // The context thread-local has alread been destroyed. + // The context thread-local has already been destroyed. // // We don't set the guard in this case. Calls to tokio::spawn in task // destructors would fail regardless if this happens. From 60ba634d60d8ee4ad5f0dc515660505e2194562d Mon Sep 17 00:00:00 2001 From: Naruto210 Date: Tue, 7 Dec 2021 22:49:45 +0800 Subject: [PATCH 17/59] time: fix typo in tokio-time document (#4304) --- tokio/src/time/interval.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/time/interval.rs b/tokio/src/time/interval.rs index 6a89943741e..2052567ab82 100644 --- a/tokio/src/time/interval.rs +++ b/tokio/src/time/interval.rs @@ -124,7 +124,7 @@ pub fn interval_at(start: Instant, period: Duration) -> Interval { /// /// #[tokio::main] /// async fn main() { -/// // ticks every 2 seconds +/// // ticks every 2 milliseconds /// let mut interval = time::interval(Duration::from_millis(2)); /// for _ in 0..5 { /// interval.tick().await; From 4c571b55b10b39255548a995a876883260b0bfeb Mon Sep 17 00:00:00 2001 From: Shin Seunghun <36041278+seunghunee@users.noreply.github.com> Date: Wed, 8 Dec 2021 21:30:52 +0900 Subject: [PATCH 18/59] runtime: fix typo in the task modules (#4306) --- tokio/src/runtime/task/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/runtime/task/mod.rs b/tokio/src/runtime/task/mod.rs index 1f18209a6fb..0592cca1a09 100644 --- a/tokio/src/runtime/task/mod.rs +++ b/tokio/src/runtime/task/mod.rs @@ -25,7 +25,7 @@ //! //! The task uses a reference count to keep track of how many active references //! exist. The Unowned reference type takes up two ref-counts. All other -//! reference types take pu a single ref-count. +//! reference types take up a single ref-count. //! //! Besides the waker type, each task has at most one of each reference type. //! From 0bc9160e25a998d5e5671a32e340e3ec19575372 Mon Sep 17 00:00:00 2001 From: Toby Lawrence Date: Thu, 9 Dec 2021 10:40:27 -0500 Subject: [PATCH 19/59] chore: update labeler.yml to drop filepath prefixes (#4308) - update labeler.yml to drop filepath prefixes - make sure labeler enforces labels over lifetime of PR --- .github/labeler.yml | 13 ++++++------- .github/workflows/labeler.yml | 1 + 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index 604d3631a27..6e53c92aaf7 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,9 +1,8 @@ R-loom: -- ./tokio/src/sync/* -- ./tokio/src/sync/**/* -- ./tokio-util/src/sync/* -- ./tokio-util/src/sync/**/* -- ./tokio/src/runtime/* -- ./tokio/src/runtime/**/* - +- tokio/src/sync/* +- tokio/src/sync/**/* +- tokio-util/src/sync/* +- tokio-util/src/sync/**/* +- tokio/src/runtime/* +- tokio/src/runtime/**/* diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 4386c381510..6d5dd6fbe1b 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -11,3 +11,4 @@ jobs: - uses: actions/labeler@v3 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" + sync-labels: true From eb1af7f29caaa1cd9e84b9fcbd87b9d62b546de3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Braulio=20Valdivielso=20Mart=C3=ADnez?= Date: Fri, 10 Dec 2021 10:08:49 +0000 Subject: [PATCH 20/59] io: make `tokio::io::empty` cooperative (#4300) Reads and buffered reads from a `tokio::io::empty` were always marked as ready. That makes sense, given that there is nothing to wait for. However, doing repeated reads on the `empty` could stall the event loop and prevent other tasks from making progress. This change uses tokio's coop system to yield control back to the executor when appropriate. Note that the issue that originally triggered this PR is not fixed yet, because the `timeout` function will not poll the timer after empty::read runs out of budget. A different change will be needed to address that. Refs: #4291 --- tokio/src/io/util/empty.rs | 20 ++++++++++++++++++-- tokio/tests/io_util_empty.rs | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 tokio/tests/io_util_empty.rs diff --git a/tokio/src/io/util/empty.rs b/tokio/src/io/util/empty.rs index f964d18e6ef..77db60e40b4 100644 --- a/tokio/src/io/util/empty.rs +++ b/tokio/src/io/util/empty.rs @@ -50,16 +50,18 @@ impl AsyncRead for Empty { #[inline] fn poll_read( self: Pin<&mut Self>, - _: &mut Context<'_>, + cx: &mut Context<'_>, _: &mut ReadBuf<'_>, ) -> Poll> { + ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(())) } } impl AsyncBufRead for Empty { #[inline] - fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(&[])) } @@ -73,6 +75,20 @@ impl fmt::Debug for Empty { } } +cfg_coop! { + fn poll_proceed_and_make_progress(cx: &mut Context<'_>) -> Poll<()> { + let coop = ready!(crate::coop::poll_proceed(cx)); + coop.made_progress(); + Poll::Ready(()) + } +} + +cfg_not_coop! { + fn poll_proceed_and_make_progress(_: &mut Context<'_>) -> Poll<()> { + Poll::Ready(()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/tokio/tests/io_util_empty.rs b/tokio/tests/io_util_empty.rs new file mode 100644 index 00000000000..e49cd17fcd5 --- /dev/null +++ b/tokio/tests/io_util_empty.rs @@ -0,0 +1,32 @@ +#![cfg(feature = "full")] +use tokio::io::{AsyncBufReadExt, AsyncReadExt}; + +#[tokio::test] +async fn empty_read_is_cooperative() { + tokio::select! { + biased; + + _ = async { + loop { + let mut buf = [0u8; 4096]; + let _ = tokio::io::empty().read(&mut buf).await; + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} + +#[tokio::test] +async fn empty_buf_reads_are_cooperative() { + tokio::select! { + biased; + + _ = async { + loop { + let mut buf = String::new(); + let _ = tokio::io::empty().read_line(&mut buf).await; + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} From 4b6bb1d9a790b0b81e56d4d852944a2a08b1f70d Mon Sep 17 00:00:00 2001 From: Toby Lawrence Date: Fri, 10 Dec 2021 13:16:17 -0500 Subject: [PATCH 21/59] chore(util): start v0.7 release cycle (#4313) * chore(util): start v0.7 release cycle Signed-off-by: Toby Lawrence --- benches/Cargo.toml | 2 +- examples/Cargo.toml | 2 +- tokio-stream/Cargo.toml | 2 +- tokio-util/Cargo.toml | 5 +++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/benches/Cargo.toml b/benches/Cargo.toml index eb2784dc50f..2b98cfd3934 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -9,7 +9,7 @@ tokio = { version = "1.5.0", path = "../tokio", features = ["full"] } bencher = "0.1.5" [dev-dependencies] -tokio-util = { version = "0.6.6", path = "../tokio-util", features = ["full"] } +tokio-util = { version = "0.7.0", path = "../tokio-util", features = ["full"] } tokio-stream = { path = "../tokio-stream" } [target.'cfg(unix)'.dependencies] diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 1d155a2b17b..7491c81c831 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" # [dependencies] instead. [dev-dependencies] tokio = { version = "1.0.0", path = "../tokio",features = ["full", "tracing"] } -tokio-util = { version = "0.6.3", path = "../tokio-util",features = ["full"] } +tokio-util = { version = "0.7.0", path = "../tokio-util",features = ["full"] } tokio-stream = { version = "0.1", path = "../tokio-stream" } tracing = "0.1" diff --git a/tokio-stream/Cargo.toml b/tokio-stream/Cargo.toml index 5b2535a3371..91a86bbbba5 100644 --- a/tokio-stream/Cargo.toml +++ b/tokio-stream/Cargo.toml @@ -29,7 +29,7 @@ signal = ["tokio/signal"] futures-core = { version = "0.3.0" } pin-project-lite = "0.2.0" tokio = { version = "1.8.0", path = "../tokio", features = ["sync"] } -tokio-util = { version = "0.6.3", path = "../tokio-util", optional = true } +tokio-util = { version = "0.7.0", path = "../tokio-util", optional = true } [dev-dependencies] tokio = { version = "1.2.0", path = "../tokio", features = ["full", "test-util"] } diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 85df32c2655..00fc018cdce 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -3,8 +3,8 @@ name = "tokio-util" # When releasing to crates.io: # - Remove path dependencies # - Update CHANGELOG.md. -# - Create "tokio-util-0.6.x" git tag. -version = "0.6.9" +# - Create "tokio-util-0.7.x" git tag. +version = "0.7.0" edition = "2018" rust-version = "1.46" authors = ["Tokio Contributors "] @@ -15,6 +15,7 @@ description = """ Additional utilities for working with Tokio. """ categories = ["asynchronous"] +publish = false [features] # No features on by default From 4e3268d222423e874f5bbfa67e20f773da3c025f Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Wed, 15 Dec 2021 00:04:19 +0200 Subject: [PATCH 22/59] tracing: instrument more resources (#4302) This PR adds instrumentation to more resources from the sync package. The new instrumentation requires the `tokio_unstable` feature flag to enable. --- tokio/src/macros/cfg.rs | 2 +- tokio/src/macros/trace.rs | 9 +- tokio/src/sync/barrier.rs | 63 +++++ tokio/src/sync/batch_semaphore.rs | 152 ++++++++++- tokio/src/sync/mutex.rs | 147 +++++++++- tokio/src/sync/oneshot.rs | 155 ++++++++++- tokio/src/sync/rwlock.rs | 257 +++++++++++++++++- tokio/src/sync/rwlock/owned_read_guard.rs | 21 ++ tokio/src/sync/rwlock/owned_write_guard.rs | 47 +++- .../sync/rwlock/owned_write_guard_mapped.rs | 20 ++ tokio/src/sync/rwlock/read_guard.rs | 21 ++ tokio/src/sync/rwlock/write_guard.rs | 46 +++- tokio/src/sync/rwlock/write_guard_mapped.rs | 21 ++ tokio/src/sync/semaphore.rs | 92 ++++++- tokio/src/time/driver/sleep.rs | 115 ++++---- tokio/src/time/interval.rs | 57 +++- tokio/src/util/trace.rs | 58 ++++ 17 files changed, 1185 insertions(+), 98 deletions(-) diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index 3afc0402374..4ab13c2c11c 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -368,7 +368,7 @@ macro_rules! cfg_trace { #[cfg_attr(docsrs, doc(cfg(feature = "tracing")))] $item )* - } + }; } macro_rules! cfg_not_trace { diff --git a/tokio/src/macros/trace.rs b/tokio/src/macros/trace.rs index 31dde2f255a..80a257e1899 100644 --- a/tokio/src/macros/trace.rs +++ b/tokio/src/macros/trace.rs @@ -1,9 +1,8 @@ cfg_trace! { macro_rules! trace_op { - ($name:literal, $readiness:literal, $parent:expr) => { + ($name:expr, $readiness:literal) => { tracing::trace!( target: "runtime::resource::poll_op", - parent: $parent, op_name = $name, is_ready = $readiness ); @@ -11,14 +10,14 @@ cfg_trace! { } macro_rules! trace_poll_op { - ($name:literal, $poll:expr, $parent:expr $(,)*) => { + ($name:expr, $poll:expr $(,)*) => { match $poll { std::task::Poll::Ready(t) => { - trace_op!($name, true, $parent); + trace_op!($name, true); std::task::Poll::Ready(t) } std::task::Poll::Pending => { - trace_op!($name, false, $parent); + trace_op!($name, false); return std::task::Poll::Pending; } } diff --git a/tokio/src/sync/barrier.rs b/tokio/src/sync/barrier.rs index 0e39dac8bb5..dfc76a40ebf 100644 --- a/tokio/src/sync/barrier.rs +++ b/tokio/src/sync/barrier.rs @@ -1,5 +1,7 @@ use crate::loom::sync::Mutex; use crate::sync::watch; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; /// A barrier enables multiple tasks to synchronize the beginning of some computation. /// @@ -41,6 +43,8 @@ pub struct Barrier { state: Mutex, wait: watch::Receiver, n: usize, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } #[derive(Debug)] @@ -55,6 +59,7 @@ impl Barrier { /// /// A barrier will block `n`-1 tasks which call [`Barrier::wait`] and then wake up all /// tasks at once when the `n`th task calls `wait`. + #[track_caller] pub fn new(mut n: usize) -> Barrier { let (waker, wait) = crate::sync::watch::channel(0); @@ -65,6 +70,32 @@ impl Barrier { n = 1; } + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "Barrier", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + size = n, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + arrived = 0, + ) + }); + resource_span + }; + Barrier { state: Mutex::new(BarrierState { waker, @@ -73,6 +104,8 @@ impl Barrier { }), n, wait, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: resource_span, } } @@ -85,6 +118,20 @@ impl Barrier { /// [`BarrierWaitResult::is_leader`] when returning from this function, and all other tasks /// will receive a result that will return `false` from `is_leader`. pub async fn wait(&self) -> BarrierWaitResult { + #[cfg(all(tokio_unstable, feature = "tracing"))] + return trace::async_op( + || self.wait_internal(), + self.resource_span.clone(), + "Barrier::wait", + "poll", + false, + ) + .await; + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + return self.wait_internal().await; + } + async fn wait_internal(&self) -> BarrierWaitResult { // NOTE: we are taking a _synchronous_ lock here. // It is okay to do so because the critical section is fast and never yields, so it cannot // deadlock even if another future is concurrently holding the lock. @@ -96,7 +143,23 @@ impl Barrier { let mut state = self.state.lock(); let generation = state.generation; state.arrived += 1; + #[cfg(all(tokio_unstable, feature = "tracing"))] + tracing::trace!( + target: "runtime::resource::state_update", + arrived = 1, + arrived.op = "add", + ); + #[cfg(all(tokio_unstable, feature = "tracing"))] + tracing::trace!( + target: "runtime::resource::async_op::state_update", + arrived = true, + ); if state.arrived == self.n { + #[cfg(all(tokio_unstable, feature = "tracing"))] + tracing::trace!( + target: "runtime::resource::async_op::state_update", + is_leader = true, + ); // we are the leader for this generation // wake everyone, increment the generation, and return state diff --git a/tokio/src/sync/batch_semaphore.rs b/tokio/src/sync/batch_semaphore.rs index b5c39d2a05d..4f5effff319 100644 --- a/tokio/src/sync/batch_semaphore.rs +++ b/tokio/src/sync/batch_semaphore.rs @@ -19,6 +19,8 @@ use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::{Mutex, MutexGuard}; use crate::util::linked_list::{self, LinkedList}; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use crate::util::WakeList; use std::future::Future; @@ -35,6 +37,8 @@ pub(crate) struct Semaphore { waiters: Mutex, /// The current number of available permits in the semaphore. permits: AtomicUsize, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } struct Waitlist { @@ -101,6 +105,9 @@ struct Waiter { /// use `UnsafeCell` internally. pointers: linked_list::Pointers, + #[cfg(all(tokio_unstable, feature = "tracing"))] + ctx: trace::AsyncOpTracingCtx, + /// Should not be `Unpin`. _p: PhantomPinned, } @@ -129,12 +136,34 @@ impl Semaphore { "a semaphore may not have more than MAX_PERMITS permits ({})", Self::MAX_PERMITS ); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "Semaphore", + kind = "Sync", + is_internal = true + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + permits = permits, + permits.op = "override", + ) + }); + resource_span + }; + Self { permits: AtomicUsize::new(permits << Self::PERMIT_SHIFT), waiters: Mutex::new(Waitlist { queue: LinkedList::new(), closed: false, }), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -156,6 +185,8 @@ impl Semaphore { queue: LinkedList::new(), closed: false, }), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span::none(), } } @@ -224,7 +255,10 @@ impl Semaphore { let next = curr - num_permits; match self.permits.compare_exchange(curr, next, AcqRel, Acquire) { - Ok(_) => return Ok(()), + Ok(_) => { + // TODO: Instrument once issue has been solved} + return Ok(()); + } Err(actual) => curr = actual, } } @@ -283,6 +317,17 @@ impl Semaphore { rem, Self::MAX_PERMITS ); + + // add remaining permits back + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + permits = rem, + permits.op = "add", + ) + }); + rem = 0; } @@ -347,6 +392,20 @@ impl Semaphore { acquired += acq; if remaining == 0 { if !queued { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + permits = acquired, + permits.op = "sub", + ); + tracing::trace!( + target: "runtime::resource::async_op::state_update", + permits_obtained = acquired, + permits.op = "add", + ) + }); + return Ready(Ok(())); } else if lock.is_none() { break self.waiters.lock(); @@ -362,6 +421,15 @@ impl Semaphore { return Ready(Err(AcquireError::closed())); } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + permits = acquired, + permits.op = "sub", + ) + }); + if node.assign_permits(&mut acquired) { self.add_permits_locked(acquired, waiters); return Ready(Ok(())); @@ -406,11 +474,16 @@ impl fmt::Debug for Semaphore { } impl Waiter { - fn new(num_permits: u32) -> Self { + fn new( + num_permits: u32, + #[cfg(all(tokio_unstable, feature = "tracing"))] ctx: trace::AsyncOpTracingCtx, + ) -> Self { Waiter { waker: UnsafeCell::new(None), state: AtomicUsize::new(num_permits as usize), pointers: linked_list::Pointers::new(), + #[cfg(all(tokio_unstable, feature = "tracing"))] + ctx, _p: PhantomPinned, } } @@ -426,6 +499,14 @@ impl Waiter { match self.state.compare_exchange(curr, next, AcqRel, Acquire) { Ok(_) => { *n -= assign; + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.ctx.async_op_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::async_op::state_update", + permits_obtained = assign, + permits.op = "add", + ); + }); return next == 0; } Err(actual) => curr = actual, @@ -438,12 +519,26 @@ impl Future for Acquire<'_> { type Output = Result<(), AcquireError>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // First, ensure the current task has enough budget to proceed. - let coop = ready!(crate::coop::poll_proceed(cx)); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _resource_span = self.node.ctx.resource_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _async_op_span = self.node.ctx.async_op_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _async_op_poll_span = self.node.ctx.async_op_poll_span.clone().entered(); let (node, semaphore, needed, queued) = self.project(); - match semaphore.poll_acquire(cx, needed, node, *queued) { + // First, ensure the current task has enough budget to proceed. + #[cfg(all(tokio_unstable, feature = "tracing"))] + let coop = ready!(trace_poll_op!( + "poll_acquire", + crate::coop::poll_proceed(cx), + )); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let coop = ready!(crate::coop::poll_proceed(cx)); + + let result = match semaphore.poll_acquire(cx, needed, node, *queued) { Pending => { *queued = true; Pending @@ -454,18 +549,59 @@ impl Future for Acquire<'_> { *queued = false; Ready(Ok(())) } - } + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + return trace_poll_op!("poll_acquire", result); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + return result; } } impl<'a> Acquire<'a> { fn new(semaphore: &'a Semaphore, num_permits: u32) -> Self { - Self { + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + return Self { node: Waiter::new(num_permits), semaphore, num_permits, queued: false, - } + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + return semaphore.resource_span.in_scope(|| { + let async_op_span = + tracing::trace_span!("runtime.resource.async_op", source = "Acquire::new"); + let async_op_poll_span = async_op_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::async_op::state_update", + permits_requested = num_permits, + permits.op = "override", + ); + + tracing::trace!( + target: "runtime::resource::async_op::state_update", + permits_obtained = 0 as usize, + permits.op = "override", + ); + + tracing::trace_span!("runtime.resource.async_op.poll") + }); + + let ctx = trace::AsyncOpTracingCtx { + async_op_span, + async_op_poll_span, + resource_span: semaphore.resource_span.clone(), + }; + + Self { + node: Waiter::new(num_permits, ctx), + semaphore, + num_permits, + queued: false, + } + }); } fn project(self: Pin<&mut Self>) -> (Pin<&mut Waiter>, &Semaphore, u32, &mut bool) { diff --git a/tokio/src/sync/mutex.rs b/tokio/src/sync/mutex.rs index 4d9f9886d76..2476726bdea 100644 --- a/tokio/src/sync/mutex.rs +++ b/tokio/src/sync/mutex.rs @@ -1,6 +1,8 @@ #![cfg_attr(not(feature = "sync"), allow(unreachable_pub, dead_code))] use crate::sync::batch_semaphore as semaphore; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use std::cell::UnsafeCell; use std::error::Error; @@ -124,6 +126,8 @@ use std::{fmt, marker, mem}; /// [`Send`]: trait@std::marker::Send /// [`lock`]: method@Mutex::lock pub struct Mutex { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, s: semaphore::Semaphore, c: UnsafeCell, } @@ -138,6 +142,8 @@ pub struct Mutex { /// The lock is automatically released whenever the guard is dropped, at which /// point `lock` will succeed yet again. pub struct MutexGuard<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, lock: &'a Mutex, } @@ -157,6 +163,8 @@ pub struct MutexGuard<'a, T: ?Sized> { /// /// [`Arc`]: std::sync::Arc pub struct OwnedMutexGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, lock: Arc>, } @@ -242,13 +250,42 @@ impl Mutex { /// /// let lock = Mutex::new(5); /// ``` + #[track_caller] pub fn new(t: T) -> Self where T: Sized, { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + + tracing::trace_span!( + "runtime.resource", + concrete_type = "Mutex", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ) + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let s = resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = false, + ); + semaphore::Semaphore::new(1) + }); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let s = semaphore::Semaphore::new(1); + Self { c: UnsafeCell::new(t), - s: semaphore::Semaphore::new(1), + s, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -270,6 +307,8 @@ impl Mutex { Self { c: UnsafeCell::new(t), s: semaphore::Semaphore::const_new(1), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span::none(), } } @@ -297,8 +336,32 @@ impl Mutex { /// } /// ``` pub async fn lock(&self) -> MutexGuard<'_, T> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + trace::async_op( + || self.acquire(), + self.resource_span.clone(), + "Mutex::lock", + "poll", + false, + ) + .await; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = true, + ); + }); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] self.acquire().await; - MutexGuard { lock: self } + + MutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + } } /// Blocking lock this mutex. When the lock has been acquired, function returns a @@ -368,8 +431,35 @@ impl Mutex { /// /// [`Arc`]: std::sync::Arc pub async fn lock_owned(self: Arc) -> OwnedMutexGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + trace::async_op( + || self.acquire(), + self.resource_span.clone(), + "Mutex::lock_owned", + "poll", + false, + ) + .await; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = true, + ); + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] self.acquire().await; - OwnedMutexGuard { lock: self } + + OwnedMutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, + } } async fn acquire(&self) { @@ -399,7 +489,21 @@ impl Mutex { /// ``` pub fn try_lock(&self) -> Result, TryLockError> { match self.s.try_acquire(1) { - Ok(_) => Ok(MutexGuard { lock: self }), + Ok(_) => { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = true, + ); + }); + + Ok(MutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + }) + } Err(_) => Err(TryLockError(())), } } @@ -454,7 +558,24 @@ impl Mutex { /// # } pub fn try_lock_owned(self: Arc) -> Result, TryLockError> { match self.s.try_acquire(1) { - Ok(_) => Ok(OwnedMutexGuard { lock: self }), + Ok(_) => { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = true, + ); + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + + Ok(OwnedMutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, + }) + } Err(_) => Err(TryLockError(())), } } @@ -637,7 +758,14 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> { impl Drop for MutexGuard<'_, T> { fn drop(&mut self) { - self.lock.s.release(1) + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = false, + ); + }); + self.lock.s.release(1); } } @@ -699,6 +827,13 @@ impl OwnedMutexGuard { impl Drop for OwnedMutexGuard { fn drop(&mut self) { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = false, + ); + }); self.lock.s.release(1) } } diff --git a/tokio/src/sync/oneshot.rs b/tokio/src/sync/oneshot.rs index 08a2d9e49d7..cfc92259d39 100644 --- a/tokio/src/sync/oneshot.rs +++ b/tokio/src/sync/oneshot.rs @@ -122,6 +122,8 @@ use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::Arc; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use std::fmt; use std::future::Future; @@ -215,6 +217,8 @@ use std::task::{Context, Poll, Waker}; #[derive(Debug)] pub struct Sender { inner: Option>>, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } /// Receives a value from the associated [`Sender`]. @@ -305,6 +309,12 @@ pub struct Sender { #[derive(Debug)] pub struct Receiver { inner: Option>>, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + #[cfg(all(tokio_unstable, feature = "tracing"))] + async_op_span: tracing::Span, + #[cfg(all(tokio_unstable, feature = "tracing"))] + async_op_poll_span: tracing::Span, } pub mod error { @@ -442,7 +452,56 @@ struct State(usize); /// } /// } /// ``` +#[track_caller] pub fn channel() -> (Sender, Receiver) { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "Sender|Receiver", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + tx_dropped = false, + tx_dropped.op = "override", + ) + }); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + rx_dropped = false, + rx_dropped.op = "override", + ) + }); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + value_sent = false, + value_sent.op = "override", + ) + }); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + value_received = false, + value_received.op = "override", + ) + }); + + resource_span + }; + let inner = Arc::new(Inner { state: AtomicUsize::new(State::new().as_usize()), value: UnsafeCell::new(None), @@ -452,8 +511,27 @@ pub fn channel() -> (Sender, Receiver) { let tx = Sender { inner: Some(inner.clone()), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: resource_span.clone(), + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let async_op_span = resource_span + .in_scope(|| tracing::trace_span!("runtime.resource.async_op", source = "Receiver::await")); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let async_op_poll_span = + async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll")); + + let rx = Receiver { + inner: Some(inner), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: resource_span, + #[cfg(all(tokio_unstable, feature = "tracing"))] + async_op_span, + #[cfg(all(tokio_unstable, feature = "tracing"))] + async_op_poll_span, }; - let rx = Receiver { inner: Some(inner) }; (tx, rx) } @@ -525,6 +603,15 @@ impl Sender { } } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + value_sent = true, + value_sent.op = "override", + ) + }); + Ok(()) } @@ -598,7 +685,20 @@ impl Sender { pub async fn closed(&mut self) { use crate::future::poll_fn; - poll_fn(|cx| self.poll_closed(cx)).await + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let closed = trace::async_op( + || poll_fn(|cx| self.poll_closed(cx)), + resource_span, + "Sender::closed", + "poll_closed", + false, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let closed = poll_fn(|cx| self.poll_closed(cx)); + + closed.await } /// Returns `true` if the associated [`Receiver`] handle has been dropped. @@ -728,6 +828,14 @@ impl Drop for Sender { fn drop(&mut self) { if let Some(inner) = self.inner.as_ref() { inner.complete(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + tx_dropped = true, + tx_dropped.op = "override", + ) + }); } } } @@ -795,6 +903,14 @@ impl Receiver { pub fn close(&mut self) { if let Some(inner) = self.inner.as_ref() { inner.close(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + rx_dropped = true, + rx_dropped.op = "override", + ) + }); } } @@ -872,7 +988,17 @@ impl Receiver { // `UnsafeCell`. Therefore, it is now safe for us to access the // cell. match unsafe { inner.consume_value() } { - Some(value) => Ok(value), + Some(value) => { + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + value_received = true, + value_received.op = "override", + ) + }); + Ok(value) + } None => Err(TryRecvError::Closed), } } else if state.is_closed() { @@ -894,6 +1020,14 @@ impl Drop for Receiver { fn drop(&mut self) { if let Some(inner) = self.inner.as_ref() { inner.close(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + rx_dropped = true, + rx_dropped.op = "override", + ) + }); } } } @@ -903,8 +1037,21 @@ impl Future for Receiver { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { // If `inner` is `None`, then `poll()` has already completed. + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _res_span = self.resource_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _ao_span = self.async_op_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _ao_poll_span = self.async_op_poll_span.clone().entered(); + let ret = if let Some(inner) = self.as_ref().get_ref().inner.as_ref() { - ready!(inner.poll_recv(cx))? + #[cfg(all(tokio_unstable, feature = "tracing"))] + let res = ready!(trace_poll_op!("poll_recv", inner.poll_recv(cx)))?; + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let res = ready!(inner.poll_recv(cx))?; + + res } else { panic!("called after complete"); }; diff --git a/tokio/src/sync/rwlock.rs b/tokio/src/sync/rwlock.rs index 120bc72b848..6991f0e8b6a 100644 --- a/tokio/src/sync/rwlock.rs +++ b/tokio/src/sync/rwlock.rs @@ -1,5 +1,7 @@ use crate::sync::batch_semaphore::{Semaphore, TryAcquireError}; use crate::sync::mutex::TryLockError; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use std::cell::UnsafeCell; use std::marker; use std::marker::PhantomData; @@ -86,6 +88,9 @@ const MAX_READS: u32 = 10; /// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies #[derive(Debug)] pub struct RwLock { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + // maximum number of concurrent readers mr: u32, @@ -197,14 +202,55 @@ impl RwLock { /// /// let lock = RwLock::new(5); /// ``` + #[track_caller] pub fn new(value: T) -> RwLock where T: Sized, { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "RwLock", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + max_readers = MAX_READS, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 0, + ); + }); + + resource_span + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let s = resource_span.in_scope(|| Semaphore::new(MAX_READS as usize)); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let s = Semaphore::new(MAX_READS as usize); + RwLock { mr: MAX_READS, c: UnsafeCell::new(value), - s: Semaphore::new(MAX_READS as usize), + s, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -222,6 +268,7 @@ impl RwLock { /// # Panics /// /// Panics if `max_reads` is more than `u32::MAX >> 3`. + #[track_caller] pub fn with_max_readers(value: T, max_reads: u32) -> RwLock where T: Sized, @@ -231,10 +278,52 @@ impl RwLock { "a RwLock may not be created with more than {} readers", MAX_READS ); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "RwLock", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ); + + resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + max_readers = max_reads, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + ); + + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 0, + ); + }); + + resource_span + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let s = resource_span.in_scope(|| Semaphore::new(max_reads as usize)); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let s = Semaphore::new(max_reads as usize); + RwLock { mr: max_reads, c: UnsafeCell::new(value), - s: Semaphore::new(max_reads as usize), + s, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -257,6 +346,8 @@ impl RwLock { mr: MAX_READS, c: UnsafeCell::new(value), s: Semaphore::const_new(MAX_READS as usize), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span::none(), } } @@ -281,6 +372,8 @@ impl RwLock { mr: max_reads, c: UnsafeCell::new(value), s: Semaphore::const_new(max_reads as usize), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span::none(), } } @@ -330,15 +423,39 @@ impl RwLock { ///} /// ``` pub async fn read(&self) -> RwLockReadGuard<'_, T> { - self.s.acquire(1).await.unwrap_or_else(|_| { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.s.acquire(1), + self.resource_span.clone(), + "RwLock::read", + "poll", + false, + ); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.s.acquire(1); + + inner.await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + RwLockReadGuard { s: &self.s, data: self.c.get(), marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), } } @@ -394,15 +511,42 @@ impl RwLock { ///} /// ``` pub async fn read_owned(self: Arc) -> OwnedRwLockReadGuard { - self.s.acquire(1).await.unwrap_or_else(|_| { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.s.acquire(1), + self.resource_span.clone(), + "RwLock::read_owned", + "poll", + false, + ); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.s.acquire(1); + + inner.await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + OwnedRwLockReadGuard { data: self.c.get(), lock: ManuallyDrop::new(self), _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -445,10 +589,21 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + Ok(RwLockReadGuard { s: &self.s, data: self.c.get(), marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), }) } @@ -497,10 +652,24 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + Ok(OwnedRwLockReadGuard { data: self.c.get(), lock: ManuallyDrop::new(self), _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } @@ -533,16 +702,40 @@ impl RwLock { ///} /// ``` pub async fn write(&self) -> RwLockWriteGuard<'_, T> { - self.s.acquire(self.mr).await.unwrap_or_else(|_| { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.s.acquire(self.mr), + self.resource_span.clone(), + "RwLock::write", + "poll", + false, + ); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.s.acquire(self.mr); + + inner.await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = true, + write_locked.op = "override", + ) + }); + RwLockWriteGuard { permits_acquired: self.mr, s: &self.s, data: self.c.get(), marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), } } @@ -582,16 +775,43 @@ impl RwLock { ///} /// ``` pub async fn write_owned(self: Arc) -> OwnedRwLockWriteGuard { - self.s.acquire(self.mr).await.unwrap_or_else(|_| { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.s.acquire(self.mr), + self.resource_span.clone(), + "RwLock::write_owned", + "poll", + false, + ); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.s.acquire(self.mr); + + inner.await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = true, + write_locked.op = "override", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + OwnedRwLockWriteGuard { permits_acquired: self.mr, data: self.c.get(), lock: ManuallyDrop::new(self), _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -625,11 +845,22 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = true, + write_locked.op = "override", + ) + }); + Ok(RwLockWriteGuard { permits_acquired: self.mr, s: &self.s, data: self.c.get(), marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), }) } @@ -670,11 +901,25 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = true, + write_locked.op = "override", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + Ok(OwnedRwLockWriteGuard { permits_acquired: self.mr, data: self.c.get(), lock: ManuallyDrop::new(self), _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } diff --git a/tokio/src/sync/rwlock/owned_read_guard.rs b/tokio/src/sync/rwlock/owned_read_guard.rs index 1881295846e..27b71bd988b 100644 --- a/tokio/src/sync/rwlock/owned_read_guard.rs +++ b/tokio/src/sync/rwlock/owned_read_guard.rs @@ -15,6 +15,8 @@ use std::sync::Arc; /// [`read_owned`]: method@crate::sync::RwLock::read_owned /// [`RwLock`]: struct@crate::sync::RwLock pub struct OwnedRwLockReadGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, // ManuallyDrop allows us to destructure into this field without running the destructor. pub(super) lock: ManuallyDrop>>, pub(super) data: *const U, @@ -56,12 +58,17 @@ impl OwnedRwLockReadGuard { { let data = f(&*this) as *const V; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + OwnedRwLockReadGuard { lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -105,12 +112,17 @@ impl OwnedRwLockReadGuard { None => return Err(this), }; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(OwnedRwLockReadGuard { lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } } @@ -145,5 +157,14 @@ impl Drop for OwnedRwLockReadGuard { fn drop(&mut self) { self.lock.s.release(1); unsafe { ManuallyDrop::drop(&mut self.lock) }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "sub", + ) + }); } } diff --git a/tokio/src/sync/rwlock/owned_write_guard.rs b/tokio/src/sync/rwlock/owned_write_guard.rs index 0a78d28e903..dbedab4cbb2 100644 --- a/tokio/src/sync/rwlock/owned_write_guard.rs +++ b/tokio/src/sync/rwlock/owned_write_guard.rs @@ -16,6 +16,8 @@ use std::sync::Arc; /// [`write_owned`]: method@crate::sync::RwLock::write_owned /// [`RwLock`]: struct@crate::sync::RwLock pub struct OwnedRwLockWriteGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, // ManuallyDrop allows us to destructure into this field without running the destructor. pub(super) lock: ManuallyDrop>>, @@ -64,13 +66,18 @@ impl OwnedRwLockWriteGuard { let data = f(&mut *this) as *mut U; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + OwnedRwLockMappedWriteGuard { permits_acquired, lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -123,13 +130,19 @@ impl OwnedRwLockWriteGuard { }; let permits_acquired = this.permits_acquired; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); + // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(OwnedRwLockMappedWriteGuard { permits_acquired, lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } @@ -181,15 +194,39 @@ impl OwnedRwLockWriteGuard { pub fn downgrade(mut self) -> OwnedRwLockReadGuard { let lock = unsafe { ManuallyDrop::take(&mut self.lock) }; let data = self.data; + let to_release = (self.permits_acquired - 1) as usize; // Release all but one of the permits held by the write guard - lock.s.release((self.permits_acquired - 1) as usize); + lock.s.release(to_release); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(self); + OwnedRwLockReadGuard { lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } } @@ -229,6 +266,14 @@ where impl Drop for OwnedRwLockWriteGuard { fn drop(&mut self) { self.lock.s.release(self.permits_acquired as usize); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); unsafe { ManuallyDrop::drop(&mut self.lock) }; } } diff --git a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs index d88ee01e1fd..55a24d96ac3 100644 --- a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs +++ b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs @@ -15,6 +15,8 @@ use std::sync::Arc; /// [mapping]: method@crate::sync::OwnedRwLockWriteGuard::map /// [`OwnedRwLockWriteGuard`]: struct@crate::sync::OwnedRwLockWriteGuard pub struct OwnedRwLockMappedWriteGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, // ManuallyDrop allows us to destructure into this field without running the destructor. pub(super) lock: ManuallyDrop>>, @@ -63,13 +65,18 @@ impl OwnedRwLockMappedWriteGuard { let data = f(&mut *this) as *mut V; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + OwnedRwLockMappedWriteGuard { permits_acquired, lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -120,13 +127,18 @@ impl OwnedRwLockMappedWriteGuard { }; let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(OwnedRwLockMappedWriteGuard { permits_acquired, lock: ManuallyDrop::new(lock), data, _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } } @@ -166,6 +178,14 @@ where impl Drop for OwnedRwLockMappedWriteGuard { fn drop(&mut self) { self.lock.s.release(self.permits_acquired as usize); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); unsafe { ManuallyDrop::drop(&mut self.lock) }; } } diff --git a/tokio/src/sync/rwlock/read_guard.rs b/tokio/src/sync/rwlock/read_guard.rs index 090b297e4af..36921319923 100644 --- a/tokio/src/sync/rwlock/read_guard.rs +++ b/tokio/src/sync/rwlock/read_guard.rs @@ -13,6 +13,8 @@ use std::ops; /// [`read`]: method@crate::sync::RwLock::read /// [`RwLock`]: struct@crate::sync::RwLock pub struct RwLockReadGuard<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) s: &'a Semaphore, pub(super) data: *const T, pub(super) marker: marker::PhantomData<&'a T>, @@ -59,12 +61,17 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { { let data = f(&*this) as *const U; let s = this.s; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + RwLockReadGuard { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -113,12 +120,17 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { None => return Err(this), }; let s = this.s; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(RwLockReadGuard { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } } @@ -152,5 +164,14 @@ where impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> { fn drop(&mut self) { self.s.release(1); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "sub", + ) + }); } } diff --git a/tokio/src/sync/rwlock/write_guard.rs b/tokio/src/sync/rwlock/write_guard.rs index 8c80ee70db4..7cadd74c60d 100644 --- a/tokio/src/sync/rwlock/write_guard.rs +++ b/tokio/src/sync/rwlock/write_guard.rs @@ -15,6 +15,8 @@ use std::ops; /// [`write`]: method@crate::sync::RwLock::write /// [`RwLock`]: struct@crate::sync::RwLock pub struct RwLockWriteGuard<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, pub(super) s: &'a Semaphore, pub(super) data: *mut T, @@ -66,6 +68,8 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { let data = f(&mut *this) as *mut U; let s = this.s; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); RwLockMappedWriteGuard { @@ -73,6 +77,8 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -129,6 +135,8 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { }; let s = this.s; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); Ok(RwLockMappedWriteGuard { @@ -136,6 +144,8 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } @@ -188,15 +198,38 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { /// [`RwLock`]: struct@crate::sync::RwLock pub fn downgrade(self) -> RwLockReadGuard<'a, T> { let RwLockWriteGuard { s, data, .. } = self; - + let to_release = (self.permits_acquired - 1) as usize; // Release all but one of the permits held by the write guard - s.release((self.permits_acquired - 1) as usize); + s.release(to_release); + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + current_readers = 1, + current_readers.op = "add", + ) + }); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(self); + RwLockReadGuard { s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } } @@ -236,5 +269,14 @@ where impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> { fn drop(&mut self) { self.s.release(self.permits_acquired as usize); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); } } diff --git a/tokio/src/sync/rwlock/write_guard_mapped.rs b/tokio/src/sync/rwlock/write_guard_mapped.rs index 3cf69de4bdd..b5c644a9e83 100644 --- a/tokio/src/sync/rwlock/write_guard_mapped.rs +++ b/tokio/src/sync/rwlock/write_guard_mapped.rs @@ -14,6 +14,8 @@ use std::ops; /// [mapping]: method@crate::sync::RwLockWriteGuard::map /// [`RwLockWriteGuard`]: struct@crate::sync::RwLockWriteGuard pub struct RwLockMappedWriteGuard<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, pub(super) s: &'a Semaphore, pub(super) data: *mut T, @@ -64,13 +66,18 @@ impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> { let data = f(&mut *this) as *mut U; let s = this.s; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + RwLockMappedWriteGuard { permits_acquired, s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -126,13 +133,18 @@ impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> { }; let s = this.s; let permits_acquired = this.permits_acquired; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = this.resource_span.clone(); // NB: Forget to avoid drop impl from being called. mem::forget(this); + Ok(RwLockMappedWriteGuard { permits_acquired, s, data, marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, }) } } @@ -172,5 +184,14 @@ where impl<'a, T: ?Sized> Drop for RwLockMappedWriteGuard<'a, T> { fn drop(&mut self) { self.s.release(self.permits_acquired as usize); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + write_locked = false, + write_locked.op = "override", + ) + }); } } diff --git a/tokio/src/sync/semaphore.rs b/tokio/src/sync/semaphore.rs index 839b523c4ce..860f46f3998 100644 --- a/tokio/src/sync/semaphore.rs +++ b/tokio/src/sync/semaphore.rs @@ -1,5 +1,7 @@ use super::batch_semaphore as ll; // low level implementation use super::{AcquireError, TryAcquireError}; +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::util::trace; use std::sync::Arc; /// Counting semaphore performing asynchronous permit acquisition. @@ -77,6 +79,8 @@ use std::sync::Arc; pub struct Semaphore { /// The low level semaphore ll_sem: ll::Semaphore, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } /// A permit from the semaphore. @@ -120,9 +124,33 @@ fn bounds() { impl Semaphore { /// Creates a new semaphore with the initial number of permits. + #[track_caller] pub fn new(permits: usize) -> Self { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = std::panic::Location::caller(); + + tracing::trace_span!( + "runtime.resource", + concrete_type = "Semaphore", + kind = "Sync", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + inherits_child_attrs = true, + ) + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let ll_sem = resource_span.in_scope(|| ll::Semaphore::new(permits)); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let ll_sem = ll::Semaphore::new(permits); + Self { - ll_sem: ll::Semaphore::new(permits), + ll_sem, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -139,9 +167,16 @@ impl Semaphore { #[cfg(all(feature = "parking_lot", not(all(loom, test))))] #[cfg_attr(docsrs, doc(cfg(feature = "parking_lot")))] pub const fn const_new(permits: usize) -> Self { - Self { + #[cfg(all(tokio_unstable, feature = "tracing"))] + return Self { ll_sem: ll::Semaphore::const_new(permits), - } + resource_span: tracing::Span::none(), + }; + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + return Self { + ll_sem: ll::Semaphore::const_new(permits), + }; } /// Returns the current number of available permits. @@ -191,7 +226,18 @@ impl Semaphore { /// [`AcquireError`]: crate::sync::AcquireError /// [`SemaphorePermit`]: crate::sync::SemaphorePermit pub async fn acquire(&self) -> Result, AcquireError> { - self.ll_sem.acquire(1).await?; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.ll_sem.acquire(1), + self.resource_span.clone(), + "Semaphore::acquire", + "poll", + true, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.ll_sem.acquire(1); + + inner.await?; Ok(SemaphorePermit { sem: self, permits: 1, @@ -227,7 +273,19 @@ impl Semaphore { /// [`AcquireError`]: crate::sync::AcquireError /// [`SemaphorePermit`]: crate::sync::SemaphorePermit pub async fn acquire_many(&self, n: u32) -> Result, AcquireError> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + trace::async_op( + || self.ll_sem.acquire(n), + self.resource_span.clone(), + "Semaphore::acquire_many", + "poll", + true, + ) + .await?; + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] self.ll_sem.acquire(n).await?; + Ok(SemaphorePermit { sem: self, permits: n, @@ -350,7 +408,18 @@ impl Semaphore { /// [`AcquireError`]: crate::sync::AcquireError /// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit pub async fn acquire_owned(self: Arc) -> Result { - self.ll_sem.acquire(1).await?; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.ll_sem.acquire(1), + self.resource_span.clone(), + "Semaphore::acquire_owned", + "poll", + true, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.ll_sem.acquire(1); + + inner.await?; Ok(OwnedSemaphorePermit { sem: self, permits: 1, @@ -403,7 +472,18 @@ impl Semaphore { self: Arc, n: u32, ) -> Result { - self.ll_sem.acquire(n).await?; + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = trace::async_op( + || self.ll_sem.acquire(n), + self.resource_span.clone(), + "Semaphore::acquire_many_owned", + "poll", + true, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = self.ll_sem.acquire(n); + + inner.await?; Ok(OwnedSemaphorePermit { sem: self, permits: n, diff --git a/tokio/src/time/driver/sleep.rs b/tokio/src/time/driver/sleep.rs index d10639d191c..7f27ef201f7 100644 --- a/tokio/src/time/driver/sleep.rs +++ b/tokio/src/time/driver/sleep.rs @@ -1,3 +1,5 @@ +#[cfg(all(tokio_unstable, feature = "tracing"))] +use crate::time::driver::ClockTime; use crate::time::driver::{Handle, TimerEntry}; use crate::time::{error::Error, Duration, Instant}; use crate::util::trace; @@ -8,10 +10,6 @@ use std::panic::Location; use std::pin::Pin; use std::task::{self, Poll}; -cfg_trace! { - use crate::time::driver::ClockTime; -} - /// Waits until `deadline` is reached. /// /// No work is performed while awaiting on the sleep future to complete. `Sleep` @@ -238,8 +236,7 @@ cfg_trace! { #[derive(Debug)] struct Inner { deadline: Instant, - resource_span: tracing::Span, - async_op_span: tracing::Span, + ctx: trace::AsyncOpTracingCtx, time_source: ClockTime, } } @@ -266,8 +263,7 @@ impl Sleep { let deadline_tick = time_source.deadline_to_tick(deadline); let duration = deadline_tick.checked_sub(time_source.now()).unwrap_or(0); - let location = location.expect("should have location if tracking caller"); - + let location = location.expect("should have location if tracing"); let resource_span = tracing::trace_span!( "runtime.resource", concrete_type = "Sleep", @@ -277,21 +273,29 @@ impl Sleep { loc.col = location.column(), ); - let async_op_span = - tracing::trace_span!("runtime.resource.async_op", source = "Sleep::new_timeout"); + let async_op_span = resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + duration = duration, + duration.unit = "ms", + duration.op = "override", + ); - tracing::trace!( - target: "runtime::resource::state_update", - parent: resource_span.id(), - duration = duration, - duration.unit = "ms", - duration.op = "override", - ); + tracing::trace_span!("runtime.resource.async_op", source = "Sleep::new_timeout") + }); + + let async_op_poll_span = + async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll")); + + let ctx = trace::AsyncOpTracingCtx { + async_op_span, + async_op_poll_span, + resource_span, + }; Inner { deadline, - resource_span, - async_op_span, + ctx, time_source, } }; @@ -358,54 +362,52 @@ impl Sleep { #[cfg(all(tokio_unstable, feature = "tracing"))] { - me.inner.async_op_span = + let _resource_enter = me.inner.ctx.resource_span.enter(); + me.inner.ctx.async_op_span = tracing::trace_span!("runtime.resource.async_op", source = "Sleep::reset"); + let _async_op_enter = me.inner.ctx.async_op_span.enter(); + + me.inner.ctx.async_op_poll_span = + tracing::trace_span!("runtime.resource.async_op.poll"); + + let duration = { + let now = me.inner.time_source.now(); + let deadline_tick = me.inner.time_source.deadline_to_tick(deadline); + deadline_tick.checked_sub(now).unwrap_or(0) + }; tracing::trace!( target: "runtime::resource::state_update", - parent: me.inner.resource_span.id(), - duration = { - let now = me.inner.time_source.now(); - let deadline_tick = me.inner.time_source.deadline_to_tick(deadline); - deadline_tick.checked_sub(now).unwrap_or(0) - }, + duration = duration, duration.unit = "ms", duration.op = "override", ); } } - cfg_not_trace! { - fn poll_elapsed(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let me = self.project(); + fn poll_elapsed(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + let me = self.project(); - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); + // Keep track of task budget + #[cfg(all(tokio_unstable, feature = "tracing"))] + let coop = ready!(trace_poll_op!( + "poll_elapsed", + crate::coop::poll_proceed(cx), + )); - me.entry.poll_elapsed(cx).map(move |r| { - coop.made_progress(); - r - }) - } - } + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + let coop = ready!(crate::coop::poll_proceed(cx)); - cfg_trace! { - fn poll_elapsed(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let me = self.project(); - // Keep track of task budget - let coop = ready!(trace_poll_op!( - "poll_elapsed", - crate::coop::poll_proceed(cx), - me.inner.resource_span.id(), - )); - - let result = me.entry.poll_elapsed(cx).map(move |r| { - coop.made_progress(); - r - }); + let result = me.entry.poll_elapsed(cx).map(move |r| { + coop.made_progress(); + r + }); - trace_poll_op!("poll_elapsed", result, me.inner.resource_span.id()) - } + #[cfg(all(tokio_unstable, feature = "tracing"))] + return trace_poll_op!("poll_elapsed", result); + + #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] + return result; } } @@ -423,8 +425,11 @@ impl Future for Sleep { // really do much better if we passed the error onwards. fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { #[cfg(all(tokio_unstable, feature = "tracing"))] - let _span = self.inner.async_op_span.clone().entered(); - + let _res_span = self.inner.ctx.resource_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _ao_span = self.inner.ctx.async_op_span.clone().entered(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let _ao_poll_span = self.inner.ctx.async_op_poll_span.clone().entered(); match ready!(self.as_mut().poll_elapsed(cx)) { Ok(()) => Poll::Ready(()), Err(e) => panic!("timer error: {}", e), diff --git a/tokio/src/time/interval.rs b/tokio/src/time/interval.rs index 2052567ab82..8ecb15b389e 100644 --- a/tokio/src/time/interval.rs +++ b/tokio/src/time/interval.rs @@ -1,6 +1,8 @@ use crate::future::poll_fn; use crate::time::{sleep_until, Duration, Instant, Sleep}; +use crate::util::trace; +use std::panic::Location; use std::pin::Pin; use std::task::{Context, Poll}; use std::{convert::TryInto, future::Future}; @@ -68,10 +70,10 @@ use std::{convert::TryInto, future::Future}; /// /// [`sleep`]: crate::time::sleep() /// [`.tick().await`]: Interval::tick +#[track_caller] pub fn interval(period: Duration) -> Interval { assert!(period > Duration::new(0, 0), "`period` must be non-zero."); - - interval_at(Instant::now(), period) + internal_interval_at(Instant::now(), period, trace::caller_location()) } /// Creates new [`Interval`] that yields with interval of `period` with the @@ -103,13 +105,44 @@ pub fn interval(period: Duration) -> Interval { /// // approximately 70ms have elapsed. /// } /// ``` +#[track_caller] pub fn interval_at(start: Instant, period: Duration) -> Interval { assert!(period > Duration::new(0, 0), "`period` must be non-zero."); + internal_interval_at(start, period, trace::caller_location()) +} + +#[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_variables))] +fn internal_interval_at( + start: Instant, + period: Duration, + location: Option<&'static Location<'static>>, +) -> Interval { + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = { + let location = location.expect("should have location if tracing"); + + tracing::trace_span!( + "runtime.resource", + concrete_type = "Interval", + kind = "timer", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), + ) + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let delay = resource_span.in_scope(|| Box::pin(sleep_until(start))); + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let delay = Box::pin(sleep_until(start)); Interval { - delay: Box::pin(sleep_until(start)), + delay, period, missed_tick_behavior: Default::default(), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span, } } @@ -362,6 +395,9 @@ pub struct Interval { /// The strategy `Interval` should use when a tick is missed. missed_tick_behavior: MissedTickBehavior, + + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, } impl Interval { @@ -391,7 +427,20 @@ impl Interval { /// } /// ``` pub async fn tick(&mut self) -> Instant { - poll_fn(|cx| self.poll_tick(cx)).await + #[cfg(all(tokio_unstable, feature = "tracing"))] + let resource_span = self.resource_span.clone(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let instant = trace::async_op( + || poll_fn(|cx| self.poll_tick(cx)), + resource_span, + "Interval::tick", + "poll_tick", + false, + ); + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let instant = poll_fn(|cx| self.poll_tick(cx)); + + instant.await } /// Polls for the next instant in the interval to be reached. diff --git a/tokio/src/util/trace.rs b/tokio/src/util/trace.rs index 74ae739354b..6080e2358ae 100644 --- a/tokio/src/util/trace.rs +++ b/tokio/src/util/trace.rs @@ -1,5 +1,11 @@ cfg_trace! { cfg_rt! { + use core::{ + pin::Pin, + task::{Context, Poll}, + }; + use pin_project_lite::pin_project; + use std::future::Future; pub(crate) use tracing::instrument::Instrumented; #[inline] @@ -18,6 +24,58 @@ cfg_trace! { ); task.instrument(span) } + + pub(crate) fn async_op(inner: P, resource_span: tracing::Span, source: &str, poll_op_name: &'static str, inherits_child_attrs: bool) -> InstrumentedAsyncOp + where P: FnOnce() -> F { + resource_span.in_scope(|| { + let async_op_span = tracing::trace_span!("runtime.resource.async_op", source = source, inherits_child_attrs = inherits_child_attrs); + let enter = async_op_span.enter(); + let async_op_poll_span = tracing::trace_span!("runtime.resource.async_op.poll"); + let inner = inner(); + drop(enter); + let tracing_ctx = AsyncOpTracingCtx { + async_op_span, + async_op_poll_span, + resource_span: resource_span.clone(), + }; + InstrumentedAsyncOp { + inner, + tracing_ctx, + poll_op_name, + } + }) + } + + #[derive(Debug, Clone)] + pub(crate) struct AsyncOpTracingCtx { + pub(crate) async_op_span: tracing::Span, + pub(crate) async_op_poll_span: tracing::Span, + pub(crate) resource_span: tracing::Span, + } + + + pin_project! { + #[derive(Debug, Clone)] + pub(crate) struct InstrumentedAsyncOp { + #[pin] + pub(crate) inner: F, + pub(crate) tracing_ctx: AsyncOpTracingCtx, + pub(crate) poll_op_name: &'static str + } + } + + impl Future for InstrumentedAsyncOp { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let poll_op_name = &*this.poll_op_name; + let _res_enter = this.tracing_ctx.resource_span.enter(); + let _async_op_enter = this.tracing_ctx.async_op_span.enter(); + let _async_op_poll_enter = this.tracing_ctx.async_op_poll_span.enter(); + trace_poll_op!(poll_op_name, this.inner.poll(cx)) + } + } } } cfg_time! { From 54e6693dff86e6e9d687ed3952bbc2c68bb6db8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Braulio=20Valdivielso=20Mart=C3=ADnez?= Date: Wed, 15 Dec 2021 10:59:21 +0000 Subject: [PATCH 23/59] time: make timeout robust against budget-depleting tasks (#4314) --- tokio/src/coop.rs | 18 +++++++----------- tokio/src/time/timeout.rs | 27 +++++++++++++++++++++++---- tokio/tests/time_timeout.rs | 13 +++++++++++++ 3 files changed, 43 insertions(+), 15 deletions(-) diff --git a/tokio/src/coop.rs b/tokio/src/coop.rs index 256e9620e75..145e703971b 100644 --- a/tokio/src/coop.rs +++ b/tokio/src/coop.rs @@ -59,13 +59,9 @@ impl Budget { const fn unconstrained() -> Budget { Budget(None) } -} -cfg_rt_multi_thread! { - impl Budget { - fn has_remaining(self) -> bool { - self.0.map(|budget| budget > 0).unwrap_or(true) - } + fn has_remaining(self) -> bool { + self.0.map(|budget| budget > 0).unwrap_or(true) } } @@ -107,16 +103,16 @@ fn with_budget(budget: Budget, f: impl FnOnce() -> R) -> R { }) } +#[inline(always)] +pub(crate) fn has_budget_remaining() -> bool { + CURRENT.with(|cell| cell.get().has_remaining()) +} + cfg_rt_multi_thread! { /// Sets the current task's budget. pub(crate) fn set(budget: Budget) { CURRENT.with(|cell| cell.set(budget)) } - - #[inline(always)] - pub(crate) fn has_budget_remaining() -> bool { - CURRENT.with(|cell| cell.get().has_remaining()) - } } cfg_rt! { diff --git a/tokio/src/time/timeout.rs b/tokio/src/time/timeout.rs index cc299161633..4a93089e8e8 100644 --- a/tokio/src/time/timeout.rs +++ b/tokio/src/time/timeout.rs @@ -5,6 +5,7 @@ //! [`Timeout`]: struct@Timeout use crate::{ + coop, time::{error::Elapsed, sleep_until, Duration, Instant, Sleep}, util::trace, }; @@ -169,15 +170,33 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let me = self.project(); + let had_budget_before = coop::has_budget_remaining(); + // First, try polling the future if let Poll::Ready(v) = me.value.poll(cx) { return Poll::Ready(Ok(v)); } - // Now check the timer - match me.delay.poll(cx) { - Poll::Ready(()) => Poll::Ready(Err(Elapsed::new())), - Poll::Pending => Poll::Pending, + let has_budget_now = coop::has_budget_remaining(); + + let delay = me.delay; + + let poll_delay = || -> Poll { + match delay.poll(cx) { + Poll::Ready(()) => Poll::Ready(Err(Elapsed::new())), + Poll::Pending => Poll::Pending, + } + }; + + if let (true, false) = (had_budget_before, has_budget_now) { + // if it is the underlying future that exhausted the budget, we poll + // the `delay` with an unconstrained one. This prevents pathological + // cases where the underlying future always exhausts the budget and + // we never get a chance to evaluate whether the timeout was hit or + // not. + coop::with_unconstrained(poll_delay) + } else { + poll_delay() } } } diff --git a/tokio/tests/time_timeout.rs b/tokio/tests/time_timeout.rs index dbd80eb8a6a..a1ff51e7d27 100644 --- a/tokio/tests/time_timeout.rs +++ b/tokio/tests/time_timeout.rs @@ -135,3 +135,16 @@ async fn deadline_future_elapses() { fn ms(n: u64) -> Duration { Duration::from_millis(n) } + +#[tokio::test] +async fn timeout_is_not_exhausted_by_future() { + let fut = timeout(ms(1), async { + let mut buffer = [0u8; 1]; + loop { + use tokio::io::AsyncReadExt; + let _ = tokio::io::empty().read(&mut buffer).await; + } + }); + + assert!(fut.await.is_err()); +} From f64673580dfc649954eb744eb2734f2f118baa47 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Wed, 15 Dec 2021 10:36:09 -0800 Subject: [PATCH 24/59] chore: prepare Tokio v1.15.0 release (#4320) Includes `tokio-macros` v1.7.0 --- README.md | 2 +- tokio-macros/CHANGELOG.md | 6 ++++++ tokio-macros/Cargo.toml | 2 +- tokio/CHANGELOG.md | 28 ++++++++++++++++++++++++++++ tokio/Cargo.toml | 4 ++-- tokio/README.md | 2 +- 6 files changed, 39 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 5e226ff7116..ad192fec706 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.14.0", features = ["full"] } +tokio = { version = "1.15.0", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio-macros/CHANGELOG.md b/tokio-macros/CHANGELOG.md index eb5504cc5da..633b43fbbdb 100644 --- a/tokio-macros/CHANGELOG.md +++ b/tokio-macros/CHANGELOG.md @@ -1,3 +1,9 @@ +# 1.7.0 (December 15th, 2021) + +- macros: address remainging clippy::semicolon_if_nothing_returned warning ([#4252]) + +[#4252]: https://github.com/tokio-rs/tokio/pull/4252 + # 1.6.0 (November 16th, 2021) - macros: fix mut patterns in `select!` macro ([#4211]) diff --git a/tokio-macros/Cargo.toml b/tokio-macros/Cargo.toml index e96ba20c921..d9b05795bf9 100644 --- a/tokio-macros/Cargo.toml +++ b/tokio-macros/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-macros" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-macros-1.0.x" git tag. -version = "1.6.0" +version = "1.7.0" edition = "2018" rust-version = "1.46" authors = ["Tokio Contributors "] diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index afa8bf0ce18..a17ffa9b8cd 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,31 @@ +# 1.15.0 (December 15, 2021) + +### Fixed + +- io: add cooperative yielding support to `io::empty()` ([#4300]) +- time: make timeout robust against budget-depleting tasks ([#4314]) + +### Changed + +- update minimum supported Rust version to 1.46. + +### Added + +- time: add `Interval::reset()` ([#4248]) +- io: add explicit lifetimes to `AsyncFdReadyGuard` ([#4267]) +- process: add `Command::as_std()` ([#4295]) + +### Added (unstable) + +- tracing: instrument `tokio::sync` types ([#4302]) + +[#4302]: https://github.com/tokio-rs/tokio/pull/4302 +[#4300]: https://github.com/tokio-rs/tokio/pull/4300 +[#4295]: https://github.com/tokio-rs/tokio/pull/4295 +[#4267]: https://github.com/tokio-rs/tokio/pull/4267 +[#4248]: https://github.com/tokio-rs/tokio/pull/4248 +[#4314]: https://github.com/tokio-rs/tokio/pull/4314 + # 1.14.0 (November 15, 2021) ### Fixed diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index ee3a2260b29..06ef7074070 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.0.x" git tag. -version = "1.14.0" +version = "1.15.0" edition = "2018" rust-version = "1.46" authors = ["Tokio Contributors "] @@ -86,7 +86,7 @@ test-util = ["rt", "sync", "time"] time = [] [dependencies] -tokio-macros = { version = "1.6.0", path = "../tokio-macros", optional = true } +tokio-macros = { version = "1.7.0", path = "../tokio-macros", optional = true } pin-project-lite = "0.2.0" diff --git a/tokio/README.md b/tokio/README.md index 5e226ff7116..ad192fec706 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.14.0", features = ["full"] } +tokio = { version = "1.15.0", features = ["full"] } ``` Then, on your main.rs: From 22e6aef6e776b00ce36f8c7e003f475d9bd18242 Mon Sep 17 00:00:00 2001 From: Fabien Gaud Date: Thu, 16 Dec 2021 11:34:00 -0800 Subject: [PATCH 25/59] net: allow to set linger on `TcpSocket` (#4324) For now, this is only allowed on TcpStream. This is a problem when one want to disable lingering (i.e. set it to Duration(0, 0)). Without being able to set it prior to the connect call, if the connect future is dropped it would leave sockets in a TIME_WAIT state. Co-authored-by: Fabien Gaud --- tokio/src/net/tcp/socket.rs | 23 +++++++++++++++++++++++ tokio/tests/tcp_socket.rs | 14 ++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index fb75f75a5b8..3c6870221c2 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -8,6 +8,7 @@ use std::net::SocketAddr; use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; #[cfg(windows)] use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket}; +use std::time::Duration; cfg_net! { /// A TCP socket that has not yet been converted to a `TcpStream` or @@ -349,6 +350,28 @@ impl TcpSocket { self.inner.get_recv_buffer_size() } + /// Sets the linger duration of this socket by setting the SO_LINGER option. + /// + /// This option controls the action taken when a stream has unsent messages and the stream is + /// closed. If SO_LINGER is set, the system shall block the process until it can transmit the + /// data or until the time expires. + /// + /// If SO_LINGER is not specified, and the socket is closed, the system handles the call in a + /// way that allows the process to continue as quickly as possible. + pub fn set_linger(&self, dur: Option) -> io::Result<()> { + self.inner.set_linger(dur) + } + + /// Reads the linger duration for this socket by getting the `SO_LINGER` + /// option. + /// + /// For more information about this option, see [`set_linger`]. + /// + /// [`set_linger`]: TcpSocket::set_linger + pub fn linger(&self) -> io::Result> { + self.inner.get_linger() + } + /// Gets the local address of this socket. /// /// Will fail on windows if called before `bind`. diff --git a/tokio/tests/tcp_socket.rs b/tokio/tests/tcp_socket.rs index 9258864d416..3030416502a 100644 --- a/tokio/tests/tcp_socket.rs +++ b/tokio/tests/tcp_socket.rs @@ -1,6 +1,7 @@ #![warn(rust_2018_idioms)] #![cfg(feature = "full")] +use std::time::Duration; use tokio::net::TcpSocket; use tokio_test::assert_ok; @@ -58,3 +59,16 @@ async fn bind_before_connect() { // Accept let _ = assert_ok!(srv.accept().await); } + +#[tokio::test] +async fn basic_linger() { + // Create server + let addr = assert_ok!("127.0.0.1:0".parse()); + let srv = assert_ok!(TcpSocket::new_v4()); + assert_ok!(srv.bind(addr)); + + assert!(srv.linger().unwrap().is_none()); + + srv.set_linger(Some(Duration::new(0, 0))).unwrap(); + assert_eq!(srv.linger().unwrap(), Some(Duration::new(0, 0))); +} From c3fbaba1f94f116d8bd0013e21fe6cabba521b6a Mon Sep 17 00:00:00 2001 From: Cyborus04 <87248184+Cyborus04@users.noreply.github.com> Date: Fri, 17 Dec 2021 14:00:24 -0500 Subject: [PATCH 26/59] io: replace use of `transmute` with pointer manipulations (#4307) --- tokio/src/io/read_buf.rs | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/tokio/src/io/read_buf.rs b/tokio/src/io/read_buf.rs index ad58cbe757b..8c34ae6c817 100644 --- a/tokio/src/io/read_buf.rs +++ b/tokio/src/io/read_buf.rs @@ -1,9 +1,5 @@ -// This lint claims ugly casting is somehow safer than transmute, but there's -// no evidence that is the case. Shush. -#![allow(clippy::transmute_ptr_to_ptr)] - use std::fmt; -use std::mem::{self, MaybeUninit}; +use std::mem::MaybeUninit; /// A wrapper around a byte buffer that is incrementally filled and initialized. /// @@ -35,7 +31,7 @@ impl<'a> ReadBuf<'a> { #[inline] pub fn new(buf: &'a mut [u8]) -> ReadBuf<'a> { let initialized = buf.len(); - let buf = unsafe { mem::transmute::<&mut [u8], &mut [MaybeUninit]>(buf) }; + let buf = unsafe { slice_to_uninit_mut(buf) }; ReadBuf { buf, filled: 0, @@ -67,8 +63,7 @@ impl<'a> ReadBuf<'a> { let slice = &self.buf[..self.filled]; // safety: filled describes how far into the buffer that the // user has filled with bytes, so it's been initialized. - // TODO: This could use `MaybeUninit::slice_get_ref` when it is stable. - unsafe { mem::transmute::<&[MaybeUninit], &[u8]>(slice) } + unsafe { slice_assume_init(slice) } } /// Returns a mutable reference to the filled portion of the buffer. @@ -77,8 +72,7 @@ impl<'a> ReadBuf<'a> { let slice = &mut self.buf[..self.filled]; // safety: filled describes how far into the buffer that the // user has filled with bytes, so it's been initialized. - // TODO: This could use `MaybeUninit::slice_get_mut` when it is stable. - unsafe { mem::transmute::<&mut [MaybeUninit], &mut [u8]>(slice) } + unsafe { slice_assume_init_mut(slice) } } /// Returns a new `ReadBuf` comprised of the unfilled section up to `n`. @@ -97,8 +91,7 @@ impl<'a> ReadBuf<'a> { let slice = &self.buf[..self.initialized]; // safety: initialized describes how far into the buffer that the // user has at some point initialized with bytes. - // TODO: This could use `MaybeUninit::slice_get_ref` when it is stable. - unsafe { mem::transmute::<&[MaybeUninit], &[u8]>(slice) } + unsafe { slice_assume_init(slice) } } /// Returns a mutable reference to the initialized portion of the buffer. @@ -109,15 +102,14 @@ impl<'a> ReadBuf<'a> { let slice = &mut self.buf[..self.initialized]; // safety: initialized describes how far into the buffer that the // user has at some point initialized with bytes. - // TODO: This could use `MaybeUninit::slice_get_mut` when it is stable. - unsafe { mem::transmute::<&mut [MaybeUninit], &mut [u8]>(slice) } + unsafe { slice_assume_init_mut(slice) } } /// Returns a mutable reference to the entire buffer, without ensuring that it has been fully /// initialized. /// /// The elements between 0 and `self.filled().len()` are filled, and those between 0 and - /// `self.initialized().len()` are initialized (and so can be transmuted to a `&mut [u8]`). + /// `self.initialized().len()` are initialized (and so can be converted to a `&mut [u8]`). /// /// The caller of this method must ensure that these invariants are upheld. For example, if the /// caller initializes some of the uninitialized section of the buffer, it must call @@ -178,7 +170,7 @@ impl<'a> ReadBuf<'a> { let slice = &mut self.buf[self.filled..end]; // safety: just above, we checked that the end of the buf has // been initialized to some value. - unsafe { mem::transmute::<&mut [MaybeUninit], &mut [u8]>(slice) } + unsafe { slice_assume_init_mut(slice) } } /// Returns the number of bytes at the end of the slice that have not yet been filled. @@ -283,3 +275,17 @@ impl fmt::Debug for ReadBuf<'_> { .finish() } } + +unsafe fn slice_to_uninit_mut(slice: &mut [u8]) -> &mut [MaybeUninit] { + &mut *(slice as *mut [u8] as *mut [MaybeUninit]) +} + +// TODO: This could use `MaybeUninit::slice_assume_init` when it is stable. +unsafe fn slice_assume_init(slice: &[MaybeUninit]) -> &[u8] { + &*(slice as *const [MaybeUninit] as *const [u8]) +} + +// TODO: This could use `MaybeUninit::slice_assume_init_mut` when it is stable. +unsafe fn slice_assume_init_mut(slice: &mut [MaybeUninit]) -> &mut [u8] { + &mut *(slice as *mut [MaybeUninit] as *mut [u8]) +} From 8582363b4e2e98cef94bf2cbce222da6fdba6728 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sat, 18 Dec 2021 13:40:24 +0100 Subject: [PATCH 27/59] stats: mark stats feature unstable in lib.rs (#4327) --- tokio/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 35295d837a6..01878524e13 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -346,6 +346,7 @@ //! `RUSTFLAGS="--cfg tokio_unstable"`. //! //! - `tracing`: Enables tracing events. +//! - `stats`: Enables runtime stats collection. ([RFC](https://github.com/tokio-rs/tokio/pull/3845)) //! //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section From e55f3d4398c01fb81ff57d3600634000b6ab662d Mon Sep 17 00:00:00 2001 From: Jinhua Tan <312841925@qq.com> Date: Tue, 21 Dec 2021 21:02:18 +0800 Subject: [PATCH 28/59] examples: make the introduction in examples/Cargo.toml more clear (#4333) --- examples/Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 7491c81c831..b37adff09d6 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -5,10 +5,10 @@ publish = false edition = "2018" # If you copy one of the examples into a new project, you should be using -# [dependencies] instead. +# [dependencies] instead, and delete the **path**. [dev-dependencies] -tokio = { version = "1.0.0", path = "../tokio",features = ["full", "tracing"] } -tokio-util = { version = "0.7.0", path = "../tokio-util",features = ["full"] } +tokio = { version = "1.0.0", path = "../tokio", features = ["full", "tracing"] } +tokio-util = { version = "0.7.0", path = "../tokio-util", features = ["full"] } tokio-stream = { version = "0.1", path = "../tokio-stream" } tracing = "0.1" From 78e0f0b42a4f7a50f3986f576703e5a3cb473b79 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Tue, 21 Dec 2021 11:11:48 -0800 Subject: [PATCH 29/59] docs: improve RustDoc for unstable features (#4331) Currently, the docs.rs documentation for tokio is built without --cfg tokio_unstable set. This means that unstable features are not shown in the API docs, making them difficutl to discover. Clearly, we do want to document the existence of unstable APIs, given that there's a section in the lib.rs documentation listing them, so it would be better if it was also possible to determine what APIs an unstable feature enables when reading the RustDoc documentation. This branch changes the docs.rs metadata to also pass --cfg tokio_unstable when building the documentation. It turns out that it's necessary to separately pass the cfg flag to both RustDoc and rustc, or else the tracing dependency, which is only enabled in target.cfg(tokio_unstable).dependencies, will be missing and the build will fail. In addition, I made some minor improvements to the docs for unstable features. Some links in the task::Builder docs were broken, and the required tokio_unstable cfg was missing from the doc(cfg(...)) attributes. Furthermore, I added a note in the top-level docs for unstable APIs, stating that they are unstable and linking back to the section in the crate-level docs that explains how to enable unstable features. Fixes #4328 --- CONTRIBUTING.md | 8 ++++++++ tokio/Cargo.toml | 6 +++++- tokio/src/macros/cfg.rs | 4 ++-- tokio/src/runtime/stats/mod.rs | 6 ++++++ tokio/src/runtime/stats/stats.rs | 12 ++++++++++++ tokio/src/task/builder.rs | 10 ++++++++++ 6 files changed, 43 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66ec614e585..289e069a3cf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -139,6 +139,14 @@ correctly, use this command: RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features ``` +To build documentation including Tokio's unstable features, it is necessary to +pass `--cfg tokio_unstable` to both RustDoc *and* rustc. To build the +documentation for unstable features, use this command: + +``` +RUSTDOCFLAGS="--cfg docsrs --cfg tokio_unstable" RUSTFLAGS="--cfg tokio_unstable" cargo +nightly doc --all-features +``` + There is currently a [bug in cargo] that means documentation cannot be built from the root of the workspace. If you `cd` into the `tokio` subdirectory the command shown above will work. diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 06ef7074070..2a88b766c0a 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -138,7 +138,11 @@ loom = { version = "0.5", features = ["futures", "checkpoint"] } [package.metadata.docs.rs] all-features = true -rustdoc-args = ["--cfg", "docsrs"] +# enable unstable features in the documentation +rustdoc-args = ["--cfg", "docsrs", "--cfg", "tokio_unstable"] +# it's necessary to _also_ pass `--cfg tokio_unstable` to rustc, or else +# dependencies will not be enabled, and the docs build will fail. +rustc-args = ["--cfg", "tokio_unstable"] [package.metadata.playground] features = ["full", "test-util"] diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index 4ab13c2c11c..9fa30ca27d6 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -178,7 +178,7 @@ macro_rules! cfg_stats { ($($item:item)*) => { $( #[cfg(all(tokio_unstable, feature = "stats"))] - #[cfg_attr(docsrs, doc(cfg(feature = "stats")))] + #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "stats"))))] $item )* } @@ -365,7 +365,7 @@ macro_rules! cfg_trace { ($($item:item)*) => { $( #[cfg(all(tokio_unstable, feature = "tracing"))] - #[cfg_attr(docsrs, doc(cfg(feature = "tracing")))] + #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] $item )* }; diff --git a/tokio/src/runtime/stats/mod.rs b/tokio/src/runtime/stats/mod.rs index 5e08e8ec4d9..355e400602d 100644 --- a/tokio/src/runtime/stats/mod.rs +++ b/tokio/src/runtime/stats/mod.rs @@ -1,5 +1,11 @@ //! This module contains information need to view information about how the //! runtime is performing. +//! +//! **Note**: This is an [unstable API][unstable]. The public API of types in +//! this module may break in 1.x releases. See [the documentation on unstable +//! features][unstable] for details. +//! +//! [unstable]: crate#unstable-features #![allow(clippy::module_inception)] cfg_stats! { diff --git a/tokio/src/runtime/stats/stats.rs b/tokio/src/runtime/stats/stats.rs index b2bcaccaa73..375786300e7 100644 --- a/tokio/src/runtime/stats/stats.rs +++ b/tokio/src/runtime/stats/stats.rs @@ -5,12 +5,24 @@ use std::convert::TryFrom; use std::time::{Duration, Instant}; /// This type contains methods to retrieve stats from a Tokio runtime. +/// +/// **Note**: This is an [unstable API][unstable]. The public API of this type +/// may break in 1.x releases. See [the documentation on unstable +/// features][unstable] for details. +/// +/// [unstable]: crate#unstable-features #[derive(Debug)] pub struct RuntimeStats { workers: Box<[WorkerStats]>, } /// This type contains methods to retrieve stats from a worker thread on a Tokio runtime. +/// +/// **Note**: This is an [unstable API][unstable]. The public API of this type +/// may break in 1.x releases. See [the documentation on unstable +/// features][unstable] for details. +/// +/// [unstable]: crate#unstable-features #[derive(Debug)] #[repr(align(128))] pub struct WorkerStats { diff --git a/tokio/src/task/builder.rs b/tokio/src/task/builder.rs index dae334928e7..0a7fe3c371a 100644 --- a/tokio/src/task/builder.rs +++ b/tokio/src/task/builder.rs @@ -4,6 +4,10 @@ use std::future::Future; /// Factory which is used to configure the properties of a new task. /// +/// **Note**: This is an [unstable API][unstable]. The public API of this type +/// may break in 1.x releases. See [the documentation on unstable +/// features][unstable] for details. +/// /// Methods can be chained in order to configure it. /// /// Currently, there is only one configuration option: @@ -45,7 +49,13 @@ use std::future::Future; /// } /// } /// ``` +/// [unstable API]: crate#unstable-features +/// [`name`]: Builder::name +/// [`spawn_local`]: Builder::spawn_local +/// [`spawn`]: Builder::spawn +/// [`spawn_blocking`]: Builder::spawn_blocking #[derive(Default, Debug)] +#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] pub struct Builder<'a> { name: Option<&'a str>, } From dc1894105bfacb541d219804d7678b5108f54359 Mon Sep 17 00:00:00 2001 From: David Kleingeld Date: Tue, 28 Dec 2021 15:08:37 +0100 Subject: [PATCH 30/59] codec: improve `Builder::max_frame_length` docs (#4352) --- tokio-util/src/codec/length_delimited.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-util/src/codec/length_delimited.rs b/tokio-util/src/codec/length_delimited.rs index 7135d7ce47c..de0eb4e9201 100644 --- a/tokio-util/src/codec/length_delimited.rs +++ b/tokio-util/src/codec/length_delimited.rs @@ -746,7 +746,7 @@ impl Builder { } } - /// Sets the max frame length + /// Sets the max frame length in bytes /// /// This configuration option applies to both encoding and decoding. The /// default value is 8MB. @@ -767,7 +767,7 @@ impl Builder { /// /// # fn bind_read(io: T) { /// LengthDelimitedCodec::builder() - /// .max_frame_length(8 * 1024) + /// .max_frame_length(8 * 1024 * 1024) /// .new_read(io); /// # } /// # pub fn main() {} From dda8da75d031f919a1dba9a50ce8cc1cc7207f99 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 30 Dec 2021 15:28:13 +0100 Subject: [PATCH 31/59] stream: add `StreamExt::then` (#4355) --- tokio-stream/src/stream_ext.rs | 55 +++++++++++++++++-- tokio-stream/src/stream_ext/then.rs | 83 +++++++++++++++++++++++++++++ 2 files changed, 135 insertions(+), 3 deletions(-) create mode 100644 tokio-stream/src/stream_ext/then.rs diff --git a/tokio-stream/src/stream_ext.rs b/tokio-stream/src/stream_ext.rs index 1157c9ee353..cc4841e629e 100644 --- a/tokio-stream/src/stream_ext.rs +++ b/tokio-stream/src/stream_ext.rs @@ -1,3 +1,4 @@ +use core::future::Future; use futures_core::Stream; mod all; @@ -39,15 +40,18 @@ use skip::Skip; mod skip_while; use skip_while::SkipWhile; -mod try_next; -use try_next::TryNext; - mod take; use take::Take; mod take_while; use take_while::TakeWhile; +mod then; +use then::Then; + +mod try_next; +use try_next::TryNext; + cfg_time! { mod timeout; use timeout::Timeout; @@ -197,6 +201,51 @@ pub trait StreamExt: Stream { Map::new(self, f) } + /// Maps this stream's items asynchronously to a different type, returning a + /// new stream of the resulting type. + /// + /// The provided closure is executed over all elements of this stream as + /// they are made available, and the returned future is executed. Only one + /// future is executed at the time. + /// + /// Note that this function consumes the stream passed into it and returns a + /// wrapped version of it, similar to the existing `then` methods in the + /// standard library. + /// + /// Be aware that if the future is not `Unpin`, then neither is the `Stream` + /// returned by this method. To handle this, you can use `tokio::pin!` as in + /// the example below or put the stream in a `Box` with `Box::pin(stream)`. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio_stream::{self as stream, StreamExt}; + /// + /// async fn do_async_work(value: i32) -> i32 { + /// value + 3 + /// } + /// + /// let stream = stream::iter(1..=3); + /// let stream = stream.then(do_async_work); + /// + /// tokio::pin!(stream); + /// + /// assert_eq!(stream.next().await, Some(4)); + /// assert_eq!(stream.next().await, Some(5)); + /// assert_eq!(stream.next().await, Some(6)); + /// # } + /// ``` + fn then(self, f: F) -> Then + where + F: FnMut(Self::Item) -> Fut, + Fut: Future, + Self: Sized, + { + Then::new(self, f) + } + /// Combine two streams into one by interleaving the output of both as it /// is produced. /// diff --git a/tokio-stream/src/stream_ext/then.rs b/tokio-stream/src/stream_ext/then.rs new file mode 100644 index 00000000000..7f6b5a2394f --- /dev/null +++ b/tokio-stream/src/stream_ext/then.rs @@ -0,0 +1,83 @@ +use crate::Stream; + +use core::fmt; +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream for the [`then`](super::StreamExt::then) method. + #[must_use = "streams do nothing unless polled"] + pub struct Then { + #[pin] + stream: St, + #[pin] + future: Option, + f: F, + } +} + +impl fmt::Debug for Then +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Then") + .field("stream", &self.stream) + .finish() + } +} + +impl Then { + pub(super) fn new(stream: St, f: F) -> Self { + Then { + stream, + future: None, + f, + } + } +} + +impl Stream for Then +where + St: Stream, + Fut: Future, + F: FnMut(St::Item) -> Fut, +{ + type Item = Fut::Output; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut me = self.project(); + + loop { + if let Some(future) = me.future.as_mut().as_pin_mut() { + match future.poll(cx) { + Poll::Ready(item) => { + me.future.set(None); + return Poll::Ready(Some(item)); + } + Poll::Pending => return Poll::Pending, + } + } + + match me.stream.as_mut().poll_next(cx) { + Poll::Ready(Some(item)) => { + me.future.set(Some((me.f)(item))); + } + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let future_len = if self.future.is_some() { 1 } else { 0 }; + let (lower, upper) = self.stream.size_hint(); + + let lower = lower.saturating_add(future_len); + let upper = upper.and_then(|upper| upper.checked_add(future_len)); + + (lower, upper) + } +} From 47feaa7a8957b47dc1c4b91fc9085ddebd11e5ed Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 30 Dec 2021 15:31:11 +0100 Subject: [PATCH 32/59] io: fix clippy lint in `write_all` (#4358) --- .github/workflows/ci.yml | 2 +- tokio/src/io/util/write_all.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4474e9667bd..92769a3f88b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -283,7 +283,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Install Rust - run: rustup update 1.56 && rustup default 1.56 + run: rustup update 1.57 && rustup default 1.57 - uses: Swatinem/rust-cache@v1 - name: Install clippy run: rustup component add clippy diff --git a/tokio/src/io/util/write_all.rs b/tokio/src/io/util/write_all.rs index e59d41e4d7b..abd3e39d310 100644 --- a/tokio/src/io/util/write_all.rs +++ b/tokio/src/io/util/write_all.rs @@ -42,7 +42,7 @@ where while !me.buf.is_empty() { let n = ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf))?; { - let (_, rest) = mem::replace(&mut *me.buf, &[]).split_at(n); + let (_, rest) = mem::take(&mut *me.buf).split_at(n); *me.buf = rest; } if n == 0 { From ee0e811a362e4aeb8f47cb530cace2d352fb4b8a Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 31 Dec 2021 12:28:14 +0900 Subject: [PATCH 33/59] Update mio to 0.8 (#4270) --- tokio/Cargo.toml | 19 ++++++----- tokio/src/net/tcp/socket.rs | 64 ++++++++++++++++++++++++------------- tokio/src/net/tcp/stream.rs | 26 ++++----------- 3 files changed, 58 insertions(+), 51 deletions(-) diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 2a88b766c0a..3945456520b 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -49,20 +49,19 @@ macros = ["tokio-macros"] stats = [] net = [ "libc", + "mio/net", + "mio/os-ext", "mio/os-poll", - "mio/os-util", - "mio/tcp", - "mio/udp", - "mio/uds", + "socket2/all", "winapi/namedpipeapi", ] process = [ "bytes", "once_cell", "libc", + "mio/net", + "mio/os-ext", "mio/os-poll", - "mio/os-util", - "mio/uds", "signal-hook-registry", "winapi/threadpoollegacyapiset", ] @@ -75,9 +74,9 @@ rt-multi-thread = [ signal = [ "once_cell", "libc", + "mio/net", + "mio/os-ext", "mio/os-poll", - "mio/uds", - "mio/os-util", "signal-hook-registry", "winapi/consoleapi", ] @@ -94,9 +93,10 @@ pin-project-lite = "0.2.0" bytes = { version = "1.0.0", optional = true } once_cell = { version = "1.5.2", optional = true } memchr = { version = "2.2", optional = true } -mio = { version = "0.7.6", optional = true } +mio = { version = "0.8.0", optional = true } num_cpus = { version = "1.8.0", optional = true } parking_lot = { version = "0.11.0", optional = true } +socket2 = { version = "0.4.2", optional = true } # Currently unstable. The API exposed by these features may be broken at any time. # Requires `--cfg tokio_unstable` to enable. @@ -128,7 +128,6 @@ proptest = "1" rand = "0.8.0" tempfile = "3.1.0" async-stream = "0.3" -socket2 = "0.4" [target.'cfg(target_os = "freebsd")'.dev-dependencies] mio-aio = { version = "0.6.0", features = ["tokio"] } diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index 3c6870221c2..5fb76454e0a 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -1,5 +1,6 @@ use crate::net::{TcpListener, TcpStream}; +use std::convert::TryInto; use std::fmt; use std::io; use std::net::SocketAddr; @@ -84,7 +85,7 @@ cfg_net! { /// [`socket2`]: https://docs.rs/socket2/ #[cfg_attr(docsrs, doc(alias = "connect_std"))] pub struct TcpSocket { - inner: mio::net::TcpSocket, + inner: socket2::Socket, } } @@ -119,7 +120,11 @@ impl TcpSocket { /// } /// ``` pub fn new_v4() -> io::Result { - let inner = mio::net::TcpSocket::new_v4()?; + let inner = socket2::Socket::new( + socket2::Domain::IPV4, + socket2::Type::STREAM, + Some(socket2::Protocol::TCP), + )?; Ok(TcpSocket { inner }) } @@ -153,7 +158,11 @@ impl TcpSocket { /// } /// ``` pub fn new_v6() -> io::Result { - let inner = mio::net::TcpSocket::new_v6()?; + let inner = socket2::Socket::new( + socket2::Domain::IPV6, + socket2::Type::STREAM, + Some(socket2::Protocol::TCP), + )?; Ok(TcpSocket { inner }) } @@ -184,7 +193,7 @@ impl TcpSocket { /// } /// ``` pub fn set_reuseaddr(&self, reuseaddr: bool) -> io::Result<()> { - self.inner.set_reuseaddr(reuseaddr) + self.inner.set_reuse_address(reuseaddr) } /// Retrieves the value set for `SO_REUSEADDR` on this socket. @@ -210,7 +219,7 @@ impl TcpSocket { /// } /// ``` pub fn reuseaddr(&self) -> io::Result { - self.inner.get_reuseaddr() + self.inner.reuse_address() } /// Allows the socket to bind to an in-use port. Only available for unix systems @@ -244,7 +253,7 @@ impl TcpSocket { doc(cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))) )] pub fn set_reuseport(&self, reuseport: bool) -> io::Result<()> { - self.inner.set_reuseport(reuseport) + self.inner.set_reuse_port(reuseport) } /// Allows the socket to bind to an in-use port. Only available for unix systems @@ -279,14 +288,14 @@ impl TcpSocket { doc(cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))) )] pub fn reuseport(&self) -> io::Result { - self.inner.get_reuseport() + self.inner.reuse_port() } /// Sets the size of the TCP send buffer on this socket. /// /// On most operating systems, this sets the `SO_SNDBUF` socket option. pub fn set_send_buffer_size(&self, size: u32) -> io::Result<()> { - self.inner.set_send_buffer_size(size) + self.inner.set_send_buffer_size(size as usize) } /// Returns the size of the TCP send buffer for this socket. @@ -313,14 +322,14 @@ impl TcpSocket { /// /// [`set_send_buffer_size`]: #method.set_send_buffer_size pub fn send_buffer_size(&self) -> io::Result { - self.inner.get_send_buffer_size() + self.inner.send_buffer_size().map(|n| n as u32) } /// Sets the size of the TCP receive buffer on this socket. /// /// On most operating systems, this sets the `SO_RCVBUF` socket option. pub fn set_recv_buffer_size(&self, size: u32) -> io::Result<()> { - self.inner.set_recv_buffer_size(size) + self.inner.set_recv_buffer_size(size as usize) } /// Returns the size of the TCP receive buffer for this socket. @@ -347,7 +356,7 @@ impl TcpSocket { /// /// [`set_recv_buffer_size`]: #method.set_recv_buffer_size pub fn recv_buffer_size(&self) -> io::Result { - self.inner.get_recv_buffer_size() + self.inner.recv_buffer_size().map(|n| n as u32) } /// Sets the linger duration of this socket by setting the SO_LINGER option. @@ -395,7 +404,9 @@ impl TcpSocket { /// } /// ``` pub fn local_addr(&self) -> io::Result { - self.inner.get_localaddr() + self.inner + .local_addr() + .map(|addr| addr.as_socket().unwrap()) } /// Binds the socket to the given address. @@ -427,7 +438,7 @@ impl TcpSocket { /// } /// ``` pub fn bind(&self, addr: SocketAddr) -> io::Result<()> { - self.inner.bind(addr) + self.inner.bind(&addr.into()) } /// Establishes a TCP connection with a peer at the specified socket address. @@ -463,7 +474,13 @@ impl TcpSocket { /// } /// ``` pub async fn connect(self, addr: SocketAddr) -> io::Result { - let mio = self.inner.connect(addr)?; + self.inner.connect(&addr.into())?; + + #[cfg(windows)] + let mio = unsafe { mio::net::TcpStream::from_raw_socket(self.inner.into_raw_socket()) }; + #[cfg(unix)] + let mio = unsafe { mio::net::TcpStream::from_raw_fd(self.inner.into_raw_fd()) }; + TcpStream::connect_mio(mio).await } @@ -503,7 +520,14 @@ impl TcpSocket { /// } /// ``` pub fn listen(self, backlog: u32) -> io::Result { - let mio = self.inner.listen(backlog)?; + let backlog = backlog.try_into().unwrap_or(i32::MAX); + self.inner.listen(backlog)?; + + #[cfg(windows)] + let mio = unsafe { mio::net::TcpListener::from_raw_socket(self.inner.into_raw_socket()) }; + #[cfg(unix)] + let mio = unsafe { mio::net::TcpListener::from_raw_fd(self.inner.into_raw_fd()) }; + TcpListener::new(mio) } @@ -523,7 +547,7 @@ impl TcpSocket { /// /// #[tokio::main] /// async fn main() -> std::io::Result<()> { - /// + /// /// let socket2_socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; /// /// let socket = TcpSocket::from_std_stream(socket2_socket.into()); @@ -534,16 +558,12 @@ impl TcpSocket { pub fn from_std_stream(std_stream: std::net::TcpStream) -> TcpSocket { #[cfg(unix)] { - use std::os::unix::io::{FromRawFd, IntoRawFd}; - let raw_fd = std_stream.into_raw_fd(); unsafe { TcpSocket::from_raw_fd(raw_fd) } } #[cfg(windows)] { - use std::os::windows::io::{FromRawSocket, IntoRawSocket}; - let raw_socket = std_stream.into_raw_socket(); unsafe { TcpSocket::from_raw_socket(raw_socket) } } @@ -572,7 +592,7 @@ impl FromRawFd for TcpSocket { /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. unsafe fn from_raw_fd(fd: RawFd) -> TcpSocket { - let inner = mio::net::TcpSocket::from_raw_fd(fd); + let inner = socket2::Socket::from_raw_fd(fd); TcpSocket { inner } } } @@ -607,7 +627,7 @@ impl FromRawSocket for TcpSocket { /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. unsafe fn from_raw_socket(socket: RawSocket) -> TcpSocket { - let inner = mio::net::TcpSocket::from_raw_socket(socket); + let inner = socket2::Socket::from_raw_socket(socket); TcpSocket { inner } } } diff --git a/tokio/src/net/tcp/stream.rs b/tokio/src/net/tcp/stream.rs index 60d20fd74b2..b47d97d5564 100644 --- a/tokio/src/net/tcp/stream.rs +++ b/tokio/src/net/tcp/stream.rs @@ -387,7 +387,7 @@ impl TcpStream { /// // if the readiness event is a false positive. /// match stream.try_read(&mut data) { /// Ok(n) => { - /// println!("read {} bytes", n); + /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; @@ -1090,9 +1090,8 @@ impl TcpStream { /// # } /// ``` pub fn linger(&self) -> io::Result> { - let mio_socket = std::mem::ManuallyDrop::new(self.to_mio()); - - mio_socket.get_linger() + let socket = self.to_socket(); + socket.linger() } /// Sets the linger duration of this socket by setting the SO_LINGER option. @@ -1117,23 +1116,12 @@ impl TcpStream { /// # } /// ``` pub fn set_linger(&self, dur: Option) -> io::Result<()> { - let mio_socket = std::mem::ManuallyDrop::new(self.to_mio()); - - mio_socket.set_linger(dur) + let socket = self.to_socket(); + socket.set_linger(dur) } - fn to_mio(&self) -> mio::net::TcpSocket { - #[cfg(windows)] - { - use std::os::windows::io::{AsRawSocket, FromRawSocket}; - unsafe { mio::net::TcpSocket::from_raw_socket(self.as_raw_socket()) } - } - - #[cfg(unix)] - { - use std::os::unix::io::{AsRawFd, FromRawFd}; - unsafe { mio::net::TcpSocket::from_raw_fd(self.as_raw_fd()) } - } + fn to_socket(&self) -> socket2::SockRef<'_> { + socket2::SockRef::from(self) } /// Gets the value of the `IP_TTL` option for this socket. From 0190831ec1922047751b6d40554cc4a11cf2a82c Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 31 Dec 2021 19:21:23 +0900 Subject: [PATCH 34/59] net: fix build error on master (#4361) --- tokio/src/net/tcp/socket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index 5fb76454e0a..fc240e0521d 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -378,7 +378,7 @@ impl TcpSocket { /// /// [`set_linger`]: TcpSocket::set_linger pub fn linger(&self) -> io::Result> { - self.inner.get_linger() + self.inner.linger() } /// Gets the local address of this socket. From a9d9bde0688cb88149272d78f8239a89b357974e Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 31 Dec 2021 19:23:04 +0900 Subject: [PATCH 35/59] net: add `UdpSocket::peer_addr` (#4362) --- tokio/src/net/udp.rs | 23 +++++++++++++++++++++++ tokio/tests/udp.rs | 10 ++++++++++ 2 files changed, 33 insertions(+) diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index 504d74eb491..7dc72af35bd 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -274,6 +274,29 @@ impl UdpSocket { self.io.local_addr() } + /// Returns the socket address of the remote peer this socket was connected + /// to. + /// + /// # Example + /// + /// ``` + /// use tokio::net::UdpSocket; + /// # use std::{io, net::SocketAddr}; + /// + /// # #[tokio::main] + /// # async fn main() -> io::Result<()> { + /// let addr = "127.0.0.1:0".parse::().unwrap(); + /// let peer_addr = "127.0.0.1:11100".parse::().unwrap(); + /// let sock = UdpSocket::bind(addr).await?; + /// sock.connect(peer_addr).await?; + /// assert_eq!(sock.peer_addr()?.ip(), peer_addr.ip()); + /// # Ok(()) + /// # } + /// ``` + pub fn peer_addr(&self) -> io::Result { + self.io.peer_addr() + } + /// Connects the UDP socket setting the default destination for send() and /// limiting packets that are read via recv from the address specified in /// `addr`. diff --git a/tokio/tests/udp.rs b/tokio/tests/udp.rs index ec2a1e96104..11a97276c1f 100644 --- a/tokio/tests/udp.rs +++ b/tokio/tests/udp.rs @@ -3,6 +3,7 @@ use futures::future::poll_fn; use std::io; +use std::net::SocketAddr; use std::sync::Arc; use tokio::{io::ReadBuf, net::UdpSocket}; use tokio_test::assert_ok; @@ -484,3 +485,12 @@ async fn poll_ready() { } } } + +#[tokio::test] +async fn peer_addr() { + let addr = "127.0.0.1:0".parse::().unwrap(); + let peer_addr = "127.0.0.1:11100".parse::().unwrap(); + let sock = UdpSocket::bind(addr).await.unwrap(); + sock.connect(peer_addr).await.unwrap(); + assert_eq!(sock.peer_addr().unwrap().ip(), peer_addr.ip()); +} From 96370ba4ce9ea5564f094354579d5539af8bbc9d Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 31 Dec 2021 19:25:50 +0900 Subject: [PATCH 36/59] net: add `TcpSocket::take_error` (#4364) --- tokio/src/net/tcp/socket.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index fc240e0521d..84c255b9be9 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -409,6 +409,11 @@ impl TcpSocket { .map(|addr| addr.as_socket().unwrap()) } + /// Returns the value of the `SO_ERROR` option. + pub fn take_error(&self) -> io::Result> { + self.inner.take_error() + } + /// Binds the socket to the given address. /// /// This calls the `bind(2)` operating-system function. Behavior is From 49a9dc6743a8d90c46a51a42706943acf39a5d85 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 31 Dec 2021 20:47:34 +0900 Subject: [PATCH 37/59] net: add buffer size methods to UdpSocket (#4363) This adds the following methods: - UdpSocket::set_send_buffer_size - UdpSocket::send_buffer_size - UdpSocket::set_recv_buffer_size - UdpSocket::recv_buffer_size --- tokio/src/net/tcp/stream.rs | 6 ++-- tokio/src/net/udp.rs | 72 +++++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 3 deletions(-) diff --git a/tokio/src/net/tcp/stream.rs b/tokio/src/net/tcp/stream.rs index b47d97d5564..abfc3c6612b 100644 --- a/tokio/src/net/tcp/stream.rs +++ b/tokio/src/net/tcp/stream.rs @@ -1090,7 +1090,7 @@ impl TcpStream { /// # } /// ``` pub fn linger(&self) -> io::Result> { - let socket = self.to_socket(); + let socket = self.as_socket(); socket.linger() } @@ -1116,11 +1116,11 @@ impl TcpStream { /// # } /// ``` pub fn set_linger(&self, dur: Option) -> io::Result<()> { - let socket = self.to_socket(); + let socket = self.as_socket(); socket.set_linger(dur) } - fn to_socket(&self) -> socket2::SockRef<'_> { + fn as_socket(&self) -> socket2::SockRef<'_> { socket2::SockRef::from(self) } diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index 7dc72af35bd..24de488aaa7 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -253,6 +253,78 @@ impl UdpSocket { } } + /// Sets the size of the UDP send buffer on this socket. + /// + /// On most operating systems, this sets the `SO_SNDBUF` socket option. + pub fn set_send_buffer_size(&self, size: u32) -> io::Result<()> { + self.as_socket().set_send_buffer_size(size as usize) + } + + /// Returns the size of the UDP send buffer for this socket. + /// + /// On most operating systems, this is the value of the `SO_SNDBUF` socket + /// option. + /// + /// Note that if [`set_send_buffer_size`] has been called on this socket + /// previously, the value returned by this function may not be the same as + /// the argument provided to `set_send_buffer_size`. This is for the + /// following reasons: + /// + /// * Most operating systems have minimum and maximum allowed sizes for the + /// send buffer, and will clamp the provided value if it is below the + /// minimum or above the maximum. The minimum and maximum buffer sizes are + /// OS-dependent. + /// * Linux will double the buffer size to account for internal bookkeeping + /// data, and returns the doubled value from `getsockopt(2)`. As per `man + /// 7 socket`: + /// > Sets or gets the maximum socket send buffer in bytes. The + /// > kernel doubles this value (to allow space for bookkeeping + /// > overhead) when it is set using `setsockopt(2)`, and this doubled + /// > value is returned by `getsockopt(2)`. + /// + /// [`set_send_buffer_size`]: Self::set_send_buffer_size + pub fn send_buffer_size(&self) -> io::Result { + self.as_socket().send_buffer_size().map(|n| n as u32) + } + + /// Sets the size of the UDP receive buffer on this socket. + /// + /// On most operating systems, this sets the `SO_RCVBUF` socket option. + pub fn set_recv_buffer_size(&self, size: u32) -> io::Result<()> { + self.as_socket().set_recv_buffer_size(size as usize) + } + + /// Returns the size of the UDP receive buffer for this socket. + /// + /// On most operating systems, this is the value of the `SO_RCVBUF` socket + /// option. + /// + /// Note that if [`set_recv_buffer_size`] has been called on this socket + /// previously, the value returned by this function may not be the same as + /// the argument provided to `set_send_buffer_size`. This is for the + /// following reasons: + /// + /// * Most operating systems have minimum and maximum allowed sizes for the + /// receive buffer, and will clamp the provided value if it is below the + /// minimum or above the maximum. The minimum and maximum buffer sizes are + /// OS-dependent. + /// * Linux will double the buffer size to account for internal bookkeeping + /// data, and returns the doubled value from `getsockopt(2)`. As per `man + /// 7 socket`: + /// > Sets or gets the maximum socket send buffer in bytes. The + /// > kernel doubles this value (to allow space for bookkeeping + /// > overhead) when it is set using `setsockopt(2)`, and this doubled + /// > value is returned by `getsockopt(2)`. + /// + /// [`set_recv_buffer_size`]: Self::set_recv_buffer_size + pub fn recv_buffer_size(&self) -> io::Result { + self.as_socket().recv_buffer_size().map(|n| n as u32) + } + + fn as_socket(&self) -> socket2::SockRef<'_> { + socket2::SockRef::from(self) + } + /// Returns the local address that this socket is bound to. /// /// # Example From 43cdb2cb5004a68d28c4394664b9f9964f3d59e2 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 31 Dec 2021 21:19:14 +0900 Subject: [PATCH 38/59] net: add tos and set_tos methods to TCP and UDP sockets (#4366) --- tokio/src/net/tcp/socket.rs | 55 +++++++++++++++++++++++++++++++++++++ tokio/src/net/udp.rs | 55 +++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+) diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index 84c255b9be9..ee9633611a1 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -381,6 +381,61 @@ impl TcpSocket { self.inner.linger() } + /// Gets the value of the `IP_TOS` option for this socket. + /// + /// For more information about this option, see [`set_tos`]. + /// + /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or + /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) + /// + /// [`set_tos`]: Self::set_tos + // https://docs.rs/socket2/0.4.2/src/socket2/socket.rs.html#1178 + #[cfg(not(any( + target_os = "fuchsia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))] + #[cfg_attr( + docsrs, + doc(cfg(not(any( + target_os = "fuchsia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))) + )] + pub fn tos(&self) -> io::Result { + self.inner.tos() + } + + /// Sets the value for the `IP_TOS` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + /// + /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or + /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) + // https://docs.rs/socket2/0.4.2/src/socket2/socket.rs.html#1178 + #[cfg(not(any( + target_os = "fuchsia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))] + #[cfg_attr( + docsrs, + doc(cfg(not(any( + target_os = "fuchsia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))) + )] + pub fn set_tos(&self, tos: u32) -> io::Result<()> { + self.inner.set_tos(tos) + } + /// Gets the local address of this socket. /// /// Will fail on windows if called before `bind`. diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index 24de488aaa7..68cc982390b 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -1575,6 +1575,61 @@ impl UdpSocket { self.io.set_ttl(ttl) } + /// Gets the value of the `IP_TOS` option for this socket. + /// + /// For more information about this option, see [`set_tos`]. + /// + /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or + /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) + /// + /// [`set_tos`]: Self::set_tos + // https://docs.rs/socket2/0.4.2/src/socket2/socket.rs.html#1178 + #[cfg(not(any( + target_os = "fuchsia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))] + #[cfg_attr( + docsrs, + doc(cfg(not(any( + target_os = "fuchsia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))) + )] + pub fn tos(&self) -> io::Result { + self.as_socket().tos() + } + + /// Sets the value for the `IP_TOS` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + /// + /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or + /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) + // https://docs.rs/socket2/0.4.2/src/socket2/socket.rs.html#1178 + #[cfg(not(any( + target_os = "fuchsia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))] + #[cfg_attr( + docsrs, + doc(cfg(not(any( + target_os = "fuchsia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))) + )] + pub fn set_tos(&self, tos: u32) -> io::Result<()> { + self.as_socket().set_tos(tos) + } + /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. /// /// This function specifies a new multicast group for this socket to join. From fb35c839443acd228ddefdd2a6985c07f98da86b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Braulio=20Valdivielso=20Mart=C3=ADnez?= Date: Fri, 31 Dec 2021 13:53:09 +0000 Subject: [PATCH 39/59] tokio-stream: add `StreamExt::map_while` (#4351) Fixes #4337 Rust 1.57 stabilized the `Iterator::map_while` API. This PR adds the same functionality to the `StreamExt` trait, to keep parity. --- tokio-stream/src/stream_ext.rs | 45 ++++++++++++++++++++ tokio-stream/src/stream_ext/map_while.rs | 52 ++++++++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100644 tokio-stream/src/stream_ext/map_while.rs diff --git a/tokio-stream/src/stream_ext.rs b/tokio-stream/src/stream_ext.rs index cc4841e629e..b79883bd6e8 100644 --- a/tokio-stream/src/stream_ext.rs +++ b/tokio-stream/src/stream_ext.rs @@ -28,6 +28,9 @@ use fuse::Fuse; mod map; use map::Map; +mod map_while; +use map_while::MapWhile; + mod merge; use merge::Merge; @@ -201,6 +204,48 @@ pub trait StreamExt: Stream { Map::new(self, f) } + /// Map this stream's items to a different type for as long as determined by + /// the provided closure. A stream of the target type will be returned, + /// which will yield elements until the closure returns `None`. + /// + /// The provided closure is executed over all elements of this stream as + /// they are made available, until it returns `None`. It is executed inline + /// with calls to [`poll_next`](Stream::poll_next). Once `None` is returned, + /// the underlying stream will not be polled again. + /// + /// Note that this function consumes the stream passed into it and returns a + /// wrapped version of it, similar to the [`Iterator::map_while`] method in the + /// standard library. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio_stream::{self as stream, StreamExt}; + /// + /// let stream = stream::iter(1..=10); + /// let mut stream = stream.map_while(|x| { + /// if x < 4 { + /// Some(x + 3) + /// } else { + /// None + /// } + /// }); + /// assert_eq!(stream.next().await, Some(4)); + /// assert_eq!(stream.next().await, Some(5)); + /// assert_eq!(stream.next().await, Some(6)); + /// assert_eq!(stream.next().await, None); + /// # } + /// ``` + fn map_while(self, f: F) -> MapWhile + where + F: FnMut(Self::Item) -> Option, + Self: Sized, + { + MapWhile::new(self, f) + } + /// Maps this stream's items asynchronously to a different type, returning a /// new stream of the resulting type. /// diff --git a/tokio-stream/src/stream_ext/map_while.rs b/tokio-stream/src/stream_ext/map_while.rs new file mode 100644 index 00000000000..d4fd8256560 --- /dev/null +++ b/tokio-stream/src/stream_ext/map_while.rs @@ -0,0 +1,52 @@ +use crate::Stream; + +use core::fmt; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream for the [`map_while`](super::StreamExt::map_while) method. + #[must_use = "streams do nothing unless polled"] + pub struct MapWhile { + #[pin] + stream: St, + f: F, + } +} + +impl fmt::Debug for MapWhile +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapWhile") + .field("stream", &self.stream) + .finish() + } +} + +impl MapWhile { + pub(super) fn new(stream: St, f: F) -> Self { + MapWhile { stream, f } + } +} + +impl Stream for MapWhile +where + St: Stream, + F: FnMut(St::Item) -> Option, +{ + type Item = T; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let me = self.project(); + let f = me.f; + me.stream.poll_next(cx).map(|opt| opt.and_then(f)) + } + + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.stream.size_hint(); + (0, upper) + } +} From c301f6d83a9771f92a86e430d4fef86ae829ccdb Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 31 Dec 2021 15:57:56 +0100 Subject: [PATCH 40/59] sync: don't inherit Send from `parking_lot::*Guard` (#4359) --- .github/workflows/ci.yml | 22 ++++++ tokio/src/loom/std/parking_lot.rs | 116 +++++++++++++++++++++++++----- 2 files changed, 119 insertions(+), 19 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 92769a3f88b..247c6f62f03 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,6 +20,7 @@ jobs: needs: - test - test-unstable + - test-parking_lot - miri - cross - features @@ -77,6 +78,27 @@ jobs: # bench.yml workflow runs benchmarks only on linux. if: startsWith(matrix.os, 'ubuntu') + test-parking_lot: + # The parking_lot crate has a feature called send_guard which changes when + # some of its types are Send. Tokio has some measures in place to prevent + # this from affecting when Tokio types are Send, and this test exists to + # ensure that those measures are working. + # + # This relies on the potentially affected Tokio type being listed in + # `tokio/tokio/tests/async_send_sync.rs`. + name: compile tests with parking lot send guards + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install Rust + run: rustup update stable + - uses: Swatinem/rust-cache@v1 + - name: Enable parking_lot send_guard feature + # Inserts the line "plsend = ["parking_lot/send_guard"]" right after [features] + run: sed -i '/\[features\]/a plsend = ["parking_lot/send_guard"]' tokio/Cargo.toml + - name: Compile tests with all features enabled + run: cargo build --workspace --all-features --tests + valgrind: name: valgrind runs-on: ubuntu-latest diff --git a/tokio/src/loom/std/parking_lot.rs b/tokio/src/loom/std/parking_lot.rs index 8448bed53d7..034a0ce69a5 100644 --- a/tokio/src/loom/std/parking_lot.rs +++ b/tokio/src/loom/std/parking_lot.rs @@ -3,83 +3,143 @@ //! //! This can be extended to additional types/methods as required. +use std::fmt; +use std::marker::PhantomData; +use std::ops::{Deref, DerefMut}; use std::sync::LockResult; use std::time::Duration; +// All types in this file are marked with PhantomData to ensure that +// parking_lot's send_guard feature does not leak through and affect when Tokio +// types are Send. +// +// See for more info. + // Types that do not need wrapping -pub(crate) use parking_lot::{MutexGuard, RwLockReadGuard, RwLockWriteGuard, WaitTimeoutResult}; +pub(crate) use parking_lot::WaitTimeoutResult; + +#[derive(Debug)] +pub(crate) struct Mutex(PhantomData>, parking_lot::Mutex); + +#[derive(Debug)] +pub(crate) struct RwLock(PhantomData>, parking_lot::RwLock); + +#[derive(Debug)] +pub(crate) struct Condvar(PhantomData, parking_lot::Condvar); -/// Adapter for `parking_lot::Mutex` to the `std::sync::Mutex` interface. #[derive(Debug)] -pub(crate) struct Mutex(parking_lot::Mutex); +pub(crate) struct MutexGuard<'a, T: ?Sized>( + PhantomData>, + parking_lot::MutexGuard<'a, T>, +); #[derive(Debug)] -pub(crate) struct RwLock(parking_lot::RwLock); +pub(crate) struct RwLockReadGuard<'a, T: ?Sized>( + PhantomData>, + parking_lot::RwLockReadGuard<'a, T>, +); -/// Adapter for `parking_lot::Condvar` to the `std::sync::Condvar` interface. #[derive(Debug)] -pub(crate) struct Condvar(parking_lot::Condvar); +pub(crate) struct RwLockWriteGuard<'a, T: ?Sized>( + PhantomData>, + parking_lot::RwLockWriteGuard<'a, T>, +); impl Mutex { #[inline] pub(crate) fn new(t: T) -> Mutex { - Mutex(parking_lot::Mutex::new(t)) + Mutex(PhantomData, parking_lot::Mutex::new(t)) } #[inline] #[cfg(all(feature = "parking_lot", not(all(loom, test)),))] #[cfg_attr(docsrs, doc(cfg(all(feature = "parking_lot",))))] pub(crate) const fn const_new(t: T) -> Mutex { - Mutex(parking_lot::const_mutex(t)) + Mutex(PhantomData, parking_lot::const_mutex(t)) } #[inline] pub(crate) fn lock(&self) -> MutexGuard<'_, T> { - self.0.lock() + MutexGuard(PhantomData, self.1.lock()) } #[inline] pub(crate) fn try_lock(&self) -> Option> { - self.0.try_lock() + self.1 + .try_lock() + .map(|guard| MutexGuard(PhantomData, guard)) } #[inline] pub(crate) fn get_mut(&mut self) -> &mut T { - self.0.get_mut() + self.1.get_mut() } // Note: Additional methods `is_poisoned` and `into_inner`, can be // provided here as needed. } +impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + self.1.deref() + } +} + +impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + self.1.deref_mut() + } +} + impl RwLock { pub(crate) fn new(t: T) -> RwLock { - RwLock(parking_lot::RwLock::new(t)) + RwLock(PhantomData, parking_lot::RwLock::new(t)) } pub(crate) fn read(&self) -> LockResult> { - Ok(self.0.read()) + Ok(RwLockReadGuard(PhantomData, self.1.read())) } pub(crate) fn write(&self) -> LockResult> { - Ok(self.0.write()) + Ok(RwLockWriteGuard(PhantomData, self.1.write())) + } +} + +impl<'a, T: ?Sized> Deref for RwLockReadGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + self.1.deref() + } +} + +impl<'a, T: ?Sized> Deref for RwLockWriteGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + self.1.deref() + } +} + +impl<'a, T: ?Sized> DerefMut for RwLockWriteGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + self.1.deref_mut() } } impl Condvar { #[inline] pub(crate) fn new() -> Condvar { - Condvar(parking_lot::Condvar::new()) + Condvar(PhantomData, parking_lot::Condvar::new()) } #[inline] pub(crate) fn notify_one(&self) { - self.0.notify_one(); + self.1.notify_one(); } #[inline] pub(crate) fn notify_all(&self) { - self.0.notify_all(); + self.1.notify_all(); } #[inline] @@ -87,7 +147,7 @@ impl Condvar { &self, mut guard: MutexGuard<'a, T>, ) -> LockResult> { - self.0.wait(&mut guard); + self.1.wait(&mut guard.1); Ok(guard) } @@ -97,10 +157,28 @@ impl Condvar { mut guard: MutexGuard<'a, T>, timeout: Duration, ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> { - let wtr = self.0.wait_for(&mut guard, timeout); + let wtr = self.1.wait_for(&mut guard.1, timeout); Ok((guard, wtr)) } // Note: Additional methods `wait_timeout_ms`, `wait_timeout_until`, // `wait_until` can be provided here as needed. } + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.1, f) + } +} + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.1, f) + } +} + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.1, f) + } +} From 12dd06336d2af8c2d735d4d9e3dc0454ad7942a0 Mon Sep 17 00:00:00 2001 From: Elichai Turkel Date: Fri, 31 Dec 2021 17:23:29 +0200 Subject: [PATCH 41/59] sync: add a `has_changed` method to `watch::Receiver` (#4342) --- tokio/src/sync/watch.rs | 42 +++++++++++++++++++++++++++++++++++++++ tokio/tests/sync_watch.rs | 7 +++++++ 2 files changed, 49 insertions(+) diff --git a/tokio/src/sync/watch.rs b/tokio/src/sync/watch.rs index 7e45c116c82..5e827fdbb8c 100644 --- a/tokio/src/sync/watch.rs +++ b/tokio/src/sync/watch.rs @@ -318,6 +318,48 @@ impl Receiver { Ref { inner } } + /// Checks if this channel contains a message that this receiver has not yet + /// seen. The new value is not marked as seen. + /// + /// Although this method is called `has_changed`, it does not check new + /// messages for equality, so this call will return true even if the new + /// message is equal to the old message. + /// + /// Returns an error if the channel has been closed. + /// # Examples + /// + /// ``` + /// use tokio::sync::watch; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = watch::channel("hello"); + /// + /// tx.send("goodbye").unwrap(); + /// + /// assert!(rx.has_changed().unwrap()); + /// assert_eq!(*rx.borrow_and_update(), "goodbye"); + /// + /// // The value has been marked as seen + /// assert!(!rx.has_changed().unwrap()); + /// + /// drop(tx); + /// // The `tx` handle has been dropped + /// assert!(rx.has_changed().is_err()); + /// } + /// ``` + pub fn has_changed(&self) -> Result { + // Load the version from the state + let state = self.shared.state.load(); + if state.is_closed() { + // The sender has dropped. + return Err(error::RecvError(())); + } + let new_version = state.version(); + + Ok(self.version != new_version) + } + /// Waits for a change notification, then marks the newest value as seen. /// /// If the newest value in the channel has not yet been marked seen when diff --git a/tokio/tests/sync_watch.rs b/tokio/tests/sync_watch.rs index b7bbaf721c1..b982324262c 100644 --- a/tokio/tests/sync_watch.rs +++ b/tokio/tests/sync_watch.rs @@ -174,17 +174,24 @@ fn poll_close() { fn borrow_and_update() { let (tx, mut rx) = watch::channel("one"); + assert!(!rx.has_changed().unwrap()); + tx.send("two").unwrap(); + assert!(rx.has_changed().unwrap()); assert_ready!(spawn(rx.changed()).poll()).unwrap(); assert_pending!(spawn(rx.changed()).poll()); + assert!(!rx.has_changed().unwrap()); tx.send("three").unwrap(); + assert!(rx.has_changed().unwrap()); assert_eq!(*rx.borrow_and_update(), "three"); assert_pending!(spawn(rx.changed()).poll()); + assert!(!rx.has_changed().unwrap()); drop(tx); assert_eq!(*rx.borrow_and_update(), "three"); assert_ready!(spawn(rx.changed()).poll()).unwrap_err(); + assert!(rx.has_changed().is_err()); } #[test] From 4a12163d7c434c0267fd76d7ec27239edceefec4 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Mon, 3 Jan 2022 22:21:43 +0100 Subject: [PATCH 42/59] util: add mutable reference getters for codecs to pinned `Framed` (#4372) --- tokio-util/src/codec/framed.rs | 9 +++++++++ tokio-util/src/codec/framed_read.rs | 5 +++++ tokio-util/src/codec/framed_write.rs | 5 +++++ 3 files changed, 19 insertions(+) diff --git a/tokio-util/src/codec/framed.rs b/tokio-util/src/codec/framed.rs index aff577f22cd..3f7e4207d31 100644 --- a/tokio-util/src/codec/framed.rs +++ b/tokio-util/src/codec/framed.rs @@ -204,6 +204,15 @@ impl Framed { &mut self.inner.codec } + /// Returns a mutable reference to the underlying codec wrapped by + /// `Framed`. + /// + /// Note that care should be taken to not tamper with the underlying codec + /// as it may corrupt the stream of frames otherwise being worked with. + pub fn codec_pin_mut(self: Pin<&mut Self>) -> &mut U { + self.project().inner.project().codec + } + /// Returns a reference to the read buffer. pub fn read_buffer(&self) -> &BytesMut { &self.inner.state.read.buffer diff --git a/tokio-util/src/codec/framed_read.rs b/tokio-util/src/codec/framed_read.rs index 502a073d0f7..d6e34dbafc4 100644 --- a/tokio-util/src/codec/framed_read.rs +++ b/tokio-util/src/codec/framed_read.rs @@ -108,6 +108,11 @@ impl FramedRead { &mut self.inner.codec } + /// Returns a mutable reference to the underlying decoder. + pub fn decoder_pin_mut(self: Pin<&mut Self>) -> &mut D { + self.project().inner.project().codec + } + /// Returns a reference to the read buffer. pub fn read_buffer(&self) -> &BytesMut { &self.inner.state.buffer diff --git a/tokio-util/src/codec/framed_write.rs b/tokio-util/src/codec/framed_write.rs index d2f6cb2d564..b827d9736ac 100644 --- a/tokio-util/src/codec/framed_write.rs +++ b/tokio-util/src/codec/framed_write.rs @@ -88,6 +88,11 @@ impl FramedWrite { &mut self.inner.codec } + /// Returns a mutable reference to the underlying encoder. + pub fn encoder_pin_mut(self: Pin<&mut Self>) -> &mut E { + self.project().inner.project().codec + } + /// Returns a reference to the write buffer. pub fn write_buffer(&self) -> &BytesMut { &self.inner.state.buffer From 25e5141c36a45f5690b711f617b0ef04d89517d8 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Tue, 4 Jan 2022 21:01:12 +0000 Subject: [PATCH 43/59] test: fix version requirement of tokio-stream (#4376) --- tokio-test/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-test/Cargo.toml b/tokio-test/Cargo.toml index 59af228daae..16d688b6ed5 100644 --- a/tokio-test/Cargo.toml +++ b/tokio-test/Cargo.toml @@ -18,7 +18,7 @@ categories = ["asynchronous", "testing"] [dependencies] tokio = { version = "1.2.0", path = "../tokio", features = ["rt", "sync", "time", "test-util"] } -tokio-stream = { version = "0.1", path = "../tokio-stream" } +tokio-stream = { version = "0.1.1", path = "../tokio-stream" } async-stream = "0.3" bytes = "1.0.0" From cc8ad367a0e5d8536f8be58fe560bfdea1a976a5 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Thu, 6 Jan 2022 17:19:26 -0800 Subject: [PATCH 44/59] rt: refactor current-thread scheduler (#4377) This patch does some refactoring to the current-thread scheduler bringing it closer to the structure of the multi-threaded scheduler. More specifically, the core scheduler data is stored in a Core struct and that struct is passed around as a "token" indicating permission to do work. The Core structure is also stored in the thread-local context. This refactor is intended to support #4373, making it easier to track counters in more locations in the current-thread scheduler. --- tokio/src/runtime/basic_scheduler.rs | 419 ++++++++++-------- tokio/src/runtime/mod.rs | 2 +- .../src/runtime/tests/loom_basic_scheduler.rs | 20 +- tokio/src/runtime/thread_pool/mod.rs | 3 - tokio/src/runtime/thread_pool/worker.rs | 3 +- .../thread_pool => util}/atomic_cell.rs | 10 +- tokio/src/util/mod.rs | 3 + 7 files changed, 245 insertions(+), 215 deletions(-) rename tokio/src/{runtime/thread_pool => util}/atomic_cell.rs (77%) diff --git a/tokio/src/runtime/basic_scheduler.rs b/tokio/src/runtime/basic_scheduler.rs index 872d0d5b897..d873fcf4699 100644 --- a/tokio/src/runtime/basic_scheduler.rs +++ b/tokio/src/runtime/basic_scheduler.rs @@ -3,10 +3,12 @@ use crate::loom::sync::atomic::AtomicBool; use crate::loom::sync::Mutex; use crate::park::{Park, Unpark}; use crate::runtime::context::EnterGuard; +use crate::runtime::driver::Driver; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task}; use crate::runtime::Callback; use crate::sync::notify::Notify; +use crate::util::atomic_cell::AtomicCell; use crate::util::{waker_ref, Wake, WakerRef}; use std::cell::RefCell; @@ -19,13 +21,12 @@ use std::task::Poll::{Pending, Ready}; use std::time::Duration; /// Executes tasks on the current thread -pub(crate) struct BasicScheduler { - /// Inner state guarded by a mutex that is shared - /// between all `block_on` calls. - inner: Mutex>>, +pub(crate) struct BasicScheduler { + /// Core scheduler data is acquired by a thread entering `block_on`. + core: AtomicCell, /// Notifier for waking up other threads to steal the - /// parker. + /// driver. notify: Notify, /// Sendable task spawner @@ -38,15 +39,11 @@ pub(crate) struct BasicScheduler { context_guard: Option, } -/// The inner scheduler that owns the task queue and the main parker P. -struct Inner { +/// Data required for executing the scheduler. The struct is passed around to +/// a function that will perform the scheduling work and acts as a capability token. +struct Core { /// Scheduler run queue - /// - /// When the scheduler is executed, the queue is removed from `self` and - /// moved into `Context`. - /// - /// This indirection is to allow `BasicScheduler` to be `Send`. - tasks: Option, + tasks: VecDeque>>, /// Sendable task spawner spawner: Spawner, @@ -54,13 +51,10 @@ struct Inner { /// Current tick tick: u8, - /// Thread park handle - park: P, - - /// Callback for a worker parking itself - before_park: Option, - /// Callback for a worker unparking itself - after_unpark: Option, + /// Runtime driver + /// + /// The driver is removed before starting to park the thread + driver: Option, /// Stats batcher stats: WorkerStatsBatcher, @@ -71,13 +65,6 @@ pub(crate) struct Spawner { shared: Arc, } -struct Tasks { - /// Local run queue. - /// - /// Tasks notified from the current thread are pushed into this queue. - queue: VecDeque>>, -} - /// A remote scheduler entry. /// /// These are filled in by remote threads sending instructions to the scheduler. @@ -100,22 +87,29 @@ struct Shared { owned: OwnedTasks>, /// Unpark the blocked thread. - unpark: Box, + unpark: ::Unpark, /// Indicates whether the blocked on thread was woken. woken: AtomicBool, + /// Callback for a worker parking itself + before_park: Option, + + /// Callback for a worker unparking itself + after_unpark: Option, + /// Keeps track of various runtime stats. stats: RuntimeStats, } /// Thread-local context. struct Context { - /// Shared scheduler state - shared: Arc, + /// Handle to the spawner + spawner: Spawner, - /// Local queue - tasks: RefCell, + /// Scheduler core, enabling the holder of `Context` to execute the + /// scheduler. + core: RefCell>>, } /// Initial queue capacity. @@ -133,38 +127,36 @@ const REMOTE_FIRST_INTERVAL: u8 = 31; // Tracks the current BasicScheduler. scoped_thread_local!(static CURRENT: Context); -impl BasicScheduler

{ +impl BasicScheduler { pub(crate) fn new( - park: P, + driver: Driver, before_park: Option, after_unpark: Option, - ) -> BasicScheduler

{ - let unpark = Box::new(park.unpark()); + ) -> BasicScheduler { + let unpark = driver.unpark(); let spawner = Spawner { shared: Arc::new(Shared { queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))), owned: OwnedTasks::new(), - unpark: unpark as Box, + unpark, woken: AtomicBool::new(false), + before_park, + after_unpark, stats: RuntimeStats::new(1), }), }; - let inner = Mutex::new(Some(Inner { - tasks: Some(Tasks { - queue: VecDeque::with_capacity(INITIAL_CAPACITY), - }), + let core = AtomicCell::new(Some(Box::new(Core { + tasks: VecDeque::with_capacity(INITIAL_CAPACITY), spawner: spawner.clone(), tick: 0, - park, - before_park, - after_unpark, + driver: Some(driver), stats: WorkerStatsBatcher::new(0), - })); + }))); BasicScheduler { - inner, + core, notify: Notify::new(), spawner, context_guard: None, @@ -178,12 +170,12 @@ impl BasicScheduler

{ pub(crate) fn block_on(&self, future: F) -> F::Output { pin!(future); - // Attempt to steal the dedicated parker and block_on the future if we can there, - // otherwise, lets select on a notification that the parker is available - // or the future is complete. + // Attempt to steal the scheduler core and block_on the future if we can + // there, otherwise, lets select on a notification that the core is + // available or the future is complete. loop { - if let Some(inner) = &mut self.take_inner() { - return inner.block_on(future); + if let Some(core) = self.take_core() { + return core.block_on(future); } else { let mut enter = crate::runtime::enter(false); @@ -210,11 +202,14 @@ impl BasicScheduler

{ } } - fn take_inner(&self) -> Option> { - let inner = self.inner.lock().take()?; + fn take_core(&self) -> Option> { + let core = self.core.take()?; - Some(InnerGuard { - inner: Some(inner), + Some(CoreGuard { + context: Context { + spawner: self.spawner.clone(), + core: RefCell::new(Some(core)), + }, basic_scheduler: self, }) } @@ -224,156 +219,109 @@ impl BasicScheduler

{ } } -impl Inner

{ - /// Blocks on the provided future and drives the runtime's driver. - fn block_on(&mut self, future: F) -> F::Output { - enter(self, |scheduler, context| { - let _enter = crate::runtime::enter(false); - let waker = scheduler.spawner.waker_ref(); - let mut cx = std::task::Context::from_waker(&waker); - - pin!(future); - - 'outer: loop { - if scheduler.spawner.reset_woken() { - scheduler.stats.incr_poll_count(); - if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) { - return v; - } - } +impl Context { + /// Execute the closure with the given scheduler core stored in the + /// thread-local context. + fn run_task(&self, mut core: Box, f: impl FnOnce() -> R) -> (Box, R) { + core.stats.incr_poll_count(); + self.enter(core, || crate::coop::budget(f)) + } - for _ in 0..MAX_TASKS_PER_TICK { - // Get and increment the current tick - let tick = scheduler.tick; - scheduler.tick = scheduler.tick.wrapping_add(1); + /// Blocks the current thread until an event is received by the driver, + /// including I/O events, timer events, ... + fn park(&self, mut core: Box) -> Box { + let mut driver = core.driver.take().expect("driver missing"); + + if let Some(f) = &self.spawner.shared.before_park { + // Incorrect lint, the closures are actually different types so `f` + // cannot be passed as an argument to `enter`. + #[allow(clippy::redundant_closure)] + let (c, _) = self.enter(core, || f()); + core = c; + } - let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { - scheduler.spawner.pop().or_else(|| { - context - .tasks - .borrow_mut() - .queue - .pop_front() - .map(RemoteMsg::Schedule) - }) - } else { - context - .tasks - .borrow_mut() - .queue - .pop_front() - .map(RemoteMsg::Schedule) - .or_else(|| scheduler.spawner.pop()) - }; + // This check will fail if `before_park` spawns a task for us to run + // instead of parking the thread + if core.tasks.is_empty() { + // Park until the thread is signaled + core.stats.about_to_park(); + core.stats.submit(&core.spawner.shared.stats); - let entry = match entry { - Some(entry) => entry, - None => { - if let Some(f) = &scheduler.before_park { - f(); - } - // This check will fail if `before_park` spawns a task for us to run - // instead of parking the thread - if context.tasks.borrow_mut().queue.is_empty() { - // Park until the thread is signaled - scheduler.stats.about_to_park(); - scheduler.stats.submit(&scheduler.spawner.shared.stats); - scheduler.park.park().expect("failed to park"); - scheduler.stats.returned_from_park(); - } - if let Some(f) = &scheduler.after_unpark { - f(); - } + let (c, _) = self.enter(core, || { + driver.park().expect("failed to park"); + }); - // Try polling the `block_on` future next - continue 'outer; - } - }; + core = c; + core.stats.returned_from_park(); + } - match entry { - RemoteMsg::Schedule(task) => { - scheduler.stats.incr_poll_count(); - let task = context.shared.owned.assert_owner(task); - crate::coop::budget(|| task.run()) - } - } - } + if let Some(f) = &self.spawner.shared.after_unpark { + // Incorrect lint, the closures are actually different types so `f` + // cannot be passed as an argument to `enter`. + #[allow(clippy::redundant_closure)] + let (c, _) = self.enter(core, || f()); + core = c; + } - // Yield to the park, this drives the timer and pulls any pending - // I/O events. - scheduler.stats.submit(&scheduler.spawner.shared.stats); - scheduler - .park - .park_timeout(Duration::from_millis(0)) - .expect("failed to park"); - } - }) + core.driver = Some(driver); + core } -} -/// Enters the scheduler context. This sets the queue and other necessary -/// scheduler state in the thread-local. -fn enter(scheduler: &mut Inner

, f: F) -> R -where - F: FnOnce(&mut Inner

, &Context) -> R, - P: Park, -{ - // Ensures the run queue is placed back in the `BasicScheduler` instance - // once `block_on` returns.` - struct Guard<'a, P: Park> { - context: Option, - scheduler: &'a mut Inner

, - } + /// Checks the driver for new events without blocking the thread. + fn park_yield(&self, mut core: Box) -> Box { + let mut driver = core.driver.take().expect("driver missing"); - impl Drop for Guard<'_, P> { - fn drop(&mut self) { - let Context { tasks, .. } = self.context.take().expect("context missing"); - self.scheduler.tasks = Some(tasks.into_inner()); - } - } + core.stats.submit(&core.spawner.shared.stats); + let (mut core, _) = self.enter(core, || { + driver + .park_timeout(Duration::from_millis(0)) + .expect("failed to park"); + }); - // Remove `tasks` from `self` and place it in a `Context`. - let tasks = scheduler.tasks.take().expect("invalid state"); + core.driver = Some(driver); + core + } - let guard = Guard { - context: Some(Context { - shared: scheduler.spawner.shared.clone(), - tasks: RefCell::new(tasks), - }), - scheduler, - }; + fn enter(&self, core: Box, f: impl FnOnce() -> R) -> (Box, R) { + // Store the scheduler core in the thread-local context + // + // A drop-guard is employed at a higher level. + *self.core.borrow_mut() = Some(core); - let context = guard.context.as_ref().unwrap(); - let scheduler = &mut *guard.scheduler; + // Execute the closure while tracking the execution budget + let ret = f(); - CURRENT.set(context, || f(scheduler, context)) + // Take the scheduler core back + let core = self.core.borrow_mut().take().expect("core missing"); + (core, ret) + } } -impl Drop for BasicScheduler

{ +impl Drop for BasicScheduler { fn drop(&mut self) { // Avoid a double panic if we are currently panicking and // the lock may be poisoned. - let mut inner = match self.inner.lock().take() { - Some(inner) => inner, + let core = match self.take_core() { + Some(core) => core, None if std::thread::panicking() => return, - None => panic!("Oh no! We never placed the Inner state back, this is a bug!"), + None => panic!("Oh no! We never placed the Core back, this is a bug!"), }; - enter(&mut inner, |scheduler, context| { + core.enter(|mut core, context| { // Drain the OwnedTasks collection. This call also closes the // collection, ensuring that no tasks are ever pushed after this // call returns. - context.shared.owned.close_and_shutdown_all(); + context.spawner.shared.owned.close_and_shutdown_all(); // Drain local queue // We already shut down every task, so we just need to drop the task. - for task in context.tasks.borrow_mut().queue.drain(..) { + while let Some(task) = core.tasks.pop_front() { drop(task); } // Drain remote queue and set it to None - let remote_queue = scheduler.spawner.shared.queue.lock().take(); + let remote_queue = core.spawner.shared.queue.lock().take(); // Using `Option::take` to replace the shared queue with `None`. // We already shut down every task, so we just need to drop the task. @@ -387,12 +335,14 @@ impl Drop for BasicScheduler

{ } } - assert!(context.shared.owned.is_empty()); + assert!(context.spawner.shared.owned.is_empty()); + + (core, ()) }); } } -impl fmt::Debug for BasicScheduler

{ +impl fmt::Debug for BasicScheduler { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BasicScheduler").finish() } @@ -455,8 +405,13 @@ impl Schedule for Arc { fn schedule(&self, task: task::Notified) { CURRENT.with(|maybe_cx| match maybe_cx { - Some(cx) if Arc::ptr_eq(self, &cx.shared) => { - cx.tasks.borrow_mut().queue.push_back(task); + Some(cx) if Arc::ptr_eq(self, &cx.spawner.shared) => { + cx.core + .borrow_mut() + .as_mut() + .expect("core missing") + .tasks + .push_back(task); } _ => { // If the queue is None, then the runtime has shut down. We @@ -484,35 +439,107 @@ impl Wake for Shared { } } -// ===== InnerGuard ===== +// ===== CoreGuard ===== -/// Used to ensure we always place the Inner value -/// back into its slot in `BasicScheduler`, even if the -/// future panics. -struct InnerGuard<'a, P: Park> { - inner: Option>, - basic_scheduler: &'a BasicScheduler

, +/// Used to ensure we always place the `Core` value back into its slot in +/// `BasicScheduler`, even if the future panics. +struct CoreGuard<'a> { + context: Context, + basic_scheduler: &'a BasicScheduler, } -impl InnerGuard<'_, P> { - fn block_on(&mut self, future: F) -> F::Output { - // The only time inner gets set to `None` is if we have dropped - // already so this unwrap is safe. - self.inner.as_mut().unwrap().block_on(future) +impl CoreGuard<'_> { + fn block_on(self, future: F) -> F::Output { + self.enter(|mut core, context| { + let _enter = crate::runtime::enter(false); + let waker = context.spawner.waker_ref(); + let mut cx = std::task::Context::from_waker(&waker); + + pin!(future); + + 'outer: loop { + if core.spawner.reset_woken() { + let (c, res) = context.run_task(core, || future.as_mut().poll(&mut cx)); + + core = c; + + if let Ready(v) = res { + return (core, v); + } + } + + for _ in 0..MAX_TASKS_PER_TICK { + // Get and increment the current tick + let tick = core.tick; + core.tick = core.tick.wrapping_add(1); + + let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { + core.spawner + .pop() + .or_else(|| core.tasks.pop_front().map(RemoteMsg::Schedule)) + } else { + core.tasks + .pop_front() + .map(RemoteMsg::Schedule) + .or_else(|| core.spawner.pop()) + }; + + let entry = match entry { + Some(entry) => entry, + None => { + core = context.park(core); + + // Try polling the `block_on` future next + continue 'outer; + } + }; + + match entry { + RemoteMsg::Schedule(task) => { + let task = context.spawner.shared.owned.assert_owner(task); + + let (c, _) = context.run_task(core, || { + task.run(); + }); + + core = c; + } + } + } + + // Yield to the driver, this drives the timer and pulls any + // pending I/O events. + core = context.park_yield(core); + } + }) + } + + /// Enters the scheduler context. This sets the queue and other necessary + /// scheduler state in the thread-local. + fn enter(self, f: F) -> R + where + F: FnOnce(Box, &Context) -> (Box, R), + { + // Remove `core` from `context` to pass into the closure. + let core = self.context.core.borrow_mut().take().expect("core missing"); + + // Call the closure and place `core` back + let (core, ret) = CURRENT.set(&self.context, || f(core, &self.context)); + + *self.context.core.borrow_mut() = Some(core); + + ret } } -impl Drop for InnerGuard<'_, P> { +impl Drop for CoreGuard<'_> { fn drop(&mut self) { - if let Some(scheduler) = self.inner.take() { - let mut lock = self.basic_scheduler.inner.lock(); - + if let Some(core) = self.context.core.borrow_mut().take() { // Replace old scheduler back into the state to allow // other threads to pick it up and drive it. - lock.replace(scheduler); + self.basic_scheduler.core.set(core); - // Wake up other possible threads that could steal - // the dedicated parker P. + // Wake up other possible threads that could steal the driver. self.basic_scheduler.notify.notify_one() } } diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index 847dd5972e1..e77c5e3a0f8 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -283,7 +283,7 @@ cfg_rt! { #[derive(Debug)] enum Kind { /// Execute all tasks on the current-thread. - CurrentThread(BasicScheduler), + CurrentThread(BasicScheduler), /// Execute tasks across multiple threads. #[cfg(feature = "rt-multi-thread")] diff --git a/tokio/src/runtime/tests/loom_basic_scheduler.rs b/tokio/src/runtime/tests/loom_basic_scheduler.rs index d2894b9b27e..a772603f711 100644 --- a/tokio/src/runtime/tests/loom_basic_scheduler.rs +++ b/tokio/src/runtime/tests/loom_basic_scheduler.rs @@ -34,20 +34,22 @@ fn assert_at_most_num_polls(rt: Arc, at_most_polls: usize) { #[test] fn block_on_num_polls() { loom::model(|| { - // we expect at most 3 number of polls because there are - // three points at which we poll the future. At any of these - // points it can be ready: + // we expect at most 4 number of polls because there are three points at + // which we poll the future and an opportunity for a false-positive.. At + // any of these points it can be ready: // - // - when we fail to steal the parker and we block on a - // notification that it is available. + // - when we fail to steal the parker and we block on a notification + // that it is available. // // - when we steal the parker and we schedule the future // - // - when the future is woken up and we have ran the max - // number of tasks for the current tick or there are no - // more tasks to run. + // - when the future is woken up and we have ran the max number of tasks + // for the current tick or there are no more tasks to run. // - let at_most = 3; + // - a thread is notified that the parker is available but a third + // thread acquires it before the notified thread can. + // + let at_most = 4; let rt1 = Arc::new(Builder::new_current_thread().build().unwrap()); let rt2 = rt1.clone(); diff --git a/tokio/src/runtime/thread_pool/mod.rs b/tokio/src/runtime/thread_pool/mod.rs index 82e34c78d28..3e1ce448215 100644 --- a/tokio/src/runtime/thread_pool/mod.rs +++ b/tokio/src/runtime/thread_pool/mod.rs @@ -1,8 +1,5 @@ //! Threadpool -mod atomic_cell; -use atomic_cell::AtomicCell; - mod idle; use self::idle::Idle; diff --git a/tokio/src/runtime/thread_pool/worker.rs b/tokio/src/runtime/thread_pool/worker.rs index ae8efe6724f..27d0d5e7d32 100644 --- a/tokio/src/runtime/thread_pool/worker.rs +++ b/tokio/src/runtime/thread_pool/worker.rs @@ -66,8 +66,9 @@ use crate::runtime::enter::EnterContext; use crate::runtime::park::{Parker, Unparker}; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{Inject, JoinHandle, OwnedTasks}; -use crate::runtime::thread_pool::{AtomicCell, Idle}; +use crate::runtime::thread_pool::Idle; use crate::runtime::{queue, task, Callback}; +use crate::util::atomic_cell::AtomicCell; use crate::util::FastRand; use std::cell::RefCell; diff --git a/tokio/src/runtime/thread_pool/atomic_cell.rs b/tokio/src/util/atomic_cell.rs similarity index 77% rename from tokio/src/runtime/thread_pool/atomic_cell.rs rename to tokio/src/util/atomic_cell.rs index 98847e6ffa1..07e37303a7b 100644 --- a/tokio/src/runtime/thread_pool/atomic_cell.rs +++ b/tokio/src/util/atomic_cell.rs @@ -3,7 +3,7 @@ use crate::loom::sync::atomic::AtomicPtr; use std::ptr; use std::sync::atomic::Ordering::AcqRel; -pub(super) struct AtomicCell { +pub(crate) struct AtomicCell { data: AtomicPtr, } @@ -11,22 +11,22 @@ unsafe impl Send for AtomicCell {} unsafe impl Sync for AtomicCell {} impl AtomicCell { - pub(super) fn new(data: Option>) -> AtomicCell { + pub(crate) fn new(data: Option>) -> AtomicCell { AtomicCell { data: AtomicPtr::new(to_raw(data)), } } - pub(super) fn swap(&self, val: Option>) -> Option> { + pub(crate) fn swap(&self, val: Option>) -> Option> { let old = self.data.swap(to_raw(val), AcqRel); from_raw(old) } - pub(super) fn set(&self, val: Box) { + pub(crate) fn set(&self, val: Box) { let _ = self.swap(Some(val)); } - pub(super) fn take(&self) -> Option> { + pub(crate) fn take(&self) -> Option> { self.swap(None) } } diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index df30f2b86a9..f0a79a7cca9 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -3,6 +3,9 @@ cfg_io_driver! { pub(crate) mod slab; } +#[cfg(feature = "rt")] +pub(crate) mod atomic_cell; + #[cfg(any( // io driver uses `WakeList` directly feature = "net", From cb9a68eb1ac15a9b8c62915e3fed2ec3ef1e1e2c Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Fri, 7 Jan 2022 20:13:28 -0800 Subject: [PATCH 45/59] examples: update `tracing-subscriber` to 0.3 (#4227) --- .cargo/audit.toml | 2 -- examples/Cargo.toml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.cargo/audit.toml b/.cargo/audit.toml index d03b022ef30..25e764be2b1 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -2,8 +2,6 @@ [advisories] ignore = [ - # https://github.com/tokio-rs/tokio/issues/4177 - "RUSTSEC-2020-0159", # We depend on nix 0.22 only via mio-aio, a dev-dependency. # https://github.com/tokio-rs/tokio/pull/4255#issuecomment-974786349 "RUSTSEC-2021-0119", diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b37adff09d6..d2aca69d84a 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -12,7 +12,7 @@ tokio-util = { version = "0.7.0", path = "../tokio-util", features = ["full"] } tokio-stream = { version = "0.1", path = "../tokio-stream" } tracing = "0.1" -tracing-subscriber = { version = "0.2.7", default-features = false, features = ["fmt", "ansi", "env-filter", "chrono", "tracing-log"] } +tracing-subscriber = { version = "0.3.1", default-features = false, features = ["fmt", "ansi", "env-filter", "tracing-log"] } bytes = "1.0.0" futures = { version = "0.3.0", features = ["thread-pool"]} http = "0.2" From 553cc3b194df875cac8736473e1f01cf3e40a660 Mon Sep 17 00:00:00 2001 From: Trey Smith Date: Sat, 8 Jan 2022 07:21:11 -0500 Subject: [PATCH 46/59] net: document that port 0 picks a random port (#4386) --- tokio/src/net/udp.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index 68cc982390b..a6d80c6f760 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -128,6 +128,10 @@ impl UdpSocket { /// This function will create a new UDP socket and attempt to bind it to /// the `addr` provided. /// + /// Binding with a port number of 0 will request that the OS assigns a port + /// to this listener. The port allocated can be queried via the `local_addr` + /// method. + /// /// # Example /// /// ```no_run From ac2343d9842e1a12ec986a95a73148544ee91a1a Mon Sep 17 00:00:00 2001 From: Jamie Date: Sat, 8 Jan 2022 13:58:26 +0100 Subject: [PATCH 47/59] net: add `UnwindSafe` impl to `PollEvented` (#4384) --- tokio/src/io/poll_evented.rs | 5 +++++ tokio/tests/net_types_unwind.rs | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 tokio/tests/net_types_unwind.rs diff --git a/tokio/src/io/poll_evented.rs b/tokio/src/io/poll_evented.rs index 44e68a2a9a0..ce4c1426acc 100644 --- a/tokio/src/io/poll_evented.rs +++ b/tokio/src/io/poll_evented.rs @@ -4,6 +4,7 @@ use mio::event::Source; use std::fmt; use std::io; use std::ops::Deref; +use std::panic::{RefUnwindSafe, UnwindSafe}; cfg_io_driver! { /// Associates an I/O resource that implements the [`std::io::Read`] and/or @@ -185,6 +186,10 @@ feature! { } } +impl UnwindSafe for PollEvented {} + +impl RefUnwindSafe for PollEvented {} + impl Deref for PollEvented { type Target = E; diff --git a/tokio/tests/net_types_unwind.rs b/tokio/tests/net_types_unwind.rs new file mode 100644 index 00000000000..4eb4a87fd7a --- /dev/null +++ b/tokio/tests/net_types_unwind.rs @@ -0,0 +1,32 @@ +#![warn(rust_2018_idioms)] +#![cfg(feature = "full")] + +use std::panic::{RefUnwindSafe, UnwindSafe}; + +#[test] +fn net_types_are_unwind_safe() { + is_unwind_safe::(); + is_unwind_safe::(); + is_unwind_safe::(); + is_unwind_safe::(); +} + +#[test] +#[cfg(unix)] +fn unix_net_types_are_unwind_safe() { + is_unwind_safe::(); + is_unwind_safe::(); + is_unwind_safe::(); +} + +#[test] +#[cfg(windows)] +fn windows_net_types_are_unwind_safe() { + use tokio::net::windows::named_pipe::NamedPipeClient; + use tokio::net::windows::named_pipe::NamedPipeServer; + + is_unwind_safe::(); + is_unwind_safe::(); +} + +fn is_unwind_safe() {} From c800deaaccf1dd5caeb08d9416c4c570939c0c85 Mon Sep 17 00:00:00 2001 From: b-naber Date: Sun, 9 Jan 2022 12:41:30 +0100 Subject: [PATCH 48/59] util: add `shrink_to_fit` and `compact` methods to `DelayQueue` (#4170) --- tokio-util/Cargo.toml | 2 +- tokio-util/src/time/delay_queue.rs | 362 ++++++++++++++++++++++++--- tokio-util/src/time/wheel/mod.rs | 1 + tokio-util/src/time/wheel/stack.rs | 4 +- tokio-util/tests/time_delay_queue.rs | 173 +++++++++++++ 5 files changed, 503 insertions(+), 39 deletions(-) diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 00fc018cdce..f3c19c1e3f3 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -44,7 +44,7 @@ futures-io = { version = "0.3.0", optional = true } futures-util = { version = "0.3.0", optional = true } log = "0.4" pin-project-lite = "0.2.0" -slab = { version = "0.4.1", optional = true } # Backs `DelayQueue` +slab = { version = "0.4.4", optional = true } # Backs `DelayQueue` [dev-dependencies] tokio = { version = "1.0.0", path = "../tokio", features = ["full"] } diff --git a/tokio-util/src/time/delay_queue.rs b/tokio-util/src/time/delay_queue.rs index 6da83a574d0..697670d7581 100644 --- a/tokio-util/src/time/delay_queue.rs +++ b/tokio-util/src/time/delay_queue.rs @@ -9,8 +9,13 @@ use crate::time::wheel::{self, Wheel}; use futures_core::ready; use tokio::time::{error::Error, sleep_until, Duration, Instant, Sleep}; +use core::ops::{Index, IndexMut}; use slab::Slab; use std::cmp; +use std::collections::HashMap; +use std::convert::From; +use std::fmt; +use std::fmt::Debug; use std::future::Future; use std::marker::PhantomData; use std::pin::Pin; @@ -128,7 +133,7 @@ use std::task::{self, Poll, Waker}; #[derive(Debug)] pub struct DelayQueue { /// Stores data associated with entries - slab: Slab>, + slab: SlabStorage, /// Lookup structure tracking all delays in the queue wheel: Wheel>, @@ -152,6 +157,216 @@ pub struct DelayQueue { waker: Option, } +#[derive(Default)] +struct SlabStorage { + inner: Slab>, + + // A `compact` call requires a re-mapping of the `Key`s that were changed + // during the `compact` call of the `slab`. Since the keys that were given out + // cannot be changed retroactively we need to keep track of these re-mappings. + // The keys of `key_map` correspond to the old keys that were given out and + // the values to the `Key`s that were re-mapped by the `compact` call. + key_map: HashMap, + + // Index used to create new keys to hand out. + next_key_index: usize, + + // Whether `compact` has been called, necessary in order to decide whether + // to include keys in `key_map`. + compact_called: bool, +} + +impl SlabStorage { + pub(crate) fn with_capacity(capacity: usize) -> SlabStorage { + SlabStorage { + inner: Slab::with_capacity(capacity), + key_map: HashMap::new(), + next_key_index: 0, + compact_called: false, + } + } + + // Inserts data into the inner slab and re-maps keys if necessary + pub(crate) fn insert(&mut self, val: Data) -> Key { + let mut key = KeyInternal::new(self.inner.insert(val)); + let key_contained = self.key_map.contains_key(&key.into()); + + if key_contained { + // It's possible that a `compact` call creates capacitiy in `self.inner` in + // such a way that a `self.inner.insert` call creates a `key` which was + // previously given out during an `insert` call prior to the `compact` call. + // If `key` is contained in `self.key_map`, we have encountered this exact situation, + // We need to create a new key `key_to_give_out` and include the relation + // `key_to_give_out` -> `key` in `self.key_map`. + let key_to_give_out = self.create_new_key(); + assert!(!self.key_map.contains_key(&key_to_give_out.into())); + self.key_map.insert(key_to_give_out.into(), key); + key = key_to_give_out; + } else if self.compact_called { + // Include an identity mapping in `self.key_map` in order to allow us to + // panic if a key that was handed out is removed more than once. + self.key_map.insert(key.into(), key); + } + + key.into() + } + + // Re-map the key in case compact was previously called. + // Note: Since we include identity mappings in key_map after compact was called, + // we have information about all keys that were handed out. In the case in which + // compact was called and we try to remove a Key that was previously removed + // we can detect invalid keys if no key is found in `key_map`. This is necessary + // in order to prevent situations in which a previously removed key + // corresponds to a re-mapped key internally and which would then be incorrectly + // removed from the slab. + // + // Example to illuminate this problem: + // + // Let's assume our `key_map` is {1 -> 2, 2 -> 1} and we call remove(1). If we + // were to remove 1 again, we would not find it inside `key_map` anymore. + // If we were to imply from this that no re-mapping was necessary, we would + // incorrectly remove 1 from `self.slab.inner`, which corresponds to the + // handed-out key 2. + pub(crate) fn remove(&mut self, key: &Key) -> Data { + let remapped_key = if self.compact_called { + match self.key_map.remove(key) { + Some(key_internal) => key_internal, + None => panic!("invalid key"), + } + } else { + (*key).into() + }; + + self.inner.remove(remapped_key.index) + } + + pub(crate) fn shrink_to_fit(&mut self) { + self.inner.shrink_to_fit(); + self.key_map.shrink_to_fit(); + } + + pub(crate) fn compact(&mut self) { + if !self.compact_called { + for (key, _) in self.inner.iter() { + self.key_map.insert(Key::new(key), KeyInternal::new(key)); + } + } + + let mut remapping = HashMap::new(); + self.inner.compact(|_, from, to| { + remapping.insert(from, to); + true + }); + + // At this point `key_map` contains a mapping for every element. + for internal_key in self.key_map.values_mut() { + if let Some(new_internal_key) = remapping.get(&internal_key.index) { + *internal_key = KeyInternal::new(*new_internal_key); + } + } + + if self.key_map.capacity() > 2 * self.key_map.len() { + self.key_map.shrink_to_fit(); + } + + self.compact_called = true; + } + + // Tries to re-map a `Key` that was given out to the user to its + // corresponding internal key. + fn remap_key(&self, key: &Key) -> Option { + let key_map = &self.key_map; + if self.compact_called { + key_map.get(&*key).copied() + } else { + Some((*key).into()) + } + } + + fn create_new_key(&mut self) -> KeyInternal { + while self.key_map.contains_key(&Key::new(self.next_key_index)) { + self.next_key_index = self.next_key_index.wrapping_add(1); + } + + KeyInternal::new(self.next_key_index) + } + + pub(crate) fn len(&self) -> usize { + self.inner.len() + } + + pub(crate) fn capacity(&self) -> usize { + self.inner.capacity() + } + + pub(crate) fn clear(&mut self) { + self.inner.clear(); + self.key_map.clear(); + self.compact_called = false; + } + + pub(crate) fn reserve(&mut self, additional: usize) { + self.inner.reserve(additional); + + if self.compact_called { + self.key_map.reserve(additional); + } + } + + pub(crate) fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub(crate) fn contains(&self, key: &Key) -> bool { + let remapped_key = self.remap_key(key); + + match remapped_key { + Some(internal_key) => self.inner.contains(internal_key.index), + None => false, + } + } +} + +impl fmt::Debug for SlabStorage +where + T: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + if fmt.alternate() { + fmt.debug_map().entries(self.inner.iter()).finish() + } else { + fmt.debug_struct("Slab") + .field("len", &self.len()) + .field("cap", &self.capacity()) + .finish() + } + } +} + +impl Index for SlabStorage { + type Output = Data; + + fn index(&self, key: Key) -> &Self::Output { + let remapped_key = self.remap_key(&key); + + match remapped_key { + Some(internal_key) => &self.inner[internal_key.index], + None => panic!("Invalid index {}", key.index), + } + } +} + +impl IndexMut for SlabStorage { + fn index_mut(&mut self, key: Key) -> &mut Data { + let remapped_key = self.remap_key(&key); + + match remapped_key { + Some(internal_key) => &mut self.inner[internal_key.index], + None => panic!("Invalid index {}", key.index), + } + } +} + /// An entry in `DelayQueue` that has expired and been removed. /// /// Values are returned by [`DelayQueue::poll_expired`]. @@ -176,15 +391,23 @@ pub struct Expired { /// /// [`DelayQueue`]: struct@DelayQueue /// [`DelayQueue::insert`]: method@DelayQueue::insert -#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Key { index: usize, } +// Whereas `Key` is given out to users that use `DelayQueue`, internally we use +// `KeyInternal` as the key type in order to make the logic of mapping between keys +// as a result of `compact` calls clearer. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +struct KeyInternal { + index: usize, +} + #[derive(Debug)] struct Stack { /// Head of the stack - head: Option, + head: Option, _p: PhantomData T>, } @@ -201,10 +424,10 @@ struct Data { expired: bool, /// Next entry in the stack - next: Option, + next: Option, /// Previous entry in the stack - prev: Option, + prev: Option, } /// Maximum number of entries the queue can handle @@ -253,7 +476,7 @@ impl DelayQueue { pub fn with_capacity(capacity: usize) -> DelayQueue { DelayQueue { wheel: Wheel::new(), - slab: Slab::with_capacity(capacity), + slab: SlabStorage::with_capacity(capacity), expired: Stack::default(), delay: None, wheel_now: 0, @@ -348,7 +571,7 @@ impl DelayQueue { } } - Key::new(key) + key } /// Attempts to pull out the next value of the delay queue, registering the @@ -369,13 +592,13 @@ impl DelayQueue { let item = ready!(self.poll_idx(cx)); Poll::Ready(item.map(|result| { - result.map(|idx| { - let data = self.slab.remove(idx); + result.map(|key| { + let data = self.slab.remove(&key); debug_assert!(data.next.is_none()); debug_assert!(data.prev.is_none()); Expired { - key: Key::new(idx), + key, data: data.inner, deadline: self.start + Duration::from_millis(data.when), } @@ -437,7 +660,7 @@ impl DelayQueue { self.insert_at(value, Instant::now() + timeout) } - fn insert_idx(&mut self, when: u64, key: usize) { + fn insert_idx(&mut self, when: u64, key: Key) { use self::wheel::{InsertError, Stack}; // Register the deadline with the timer wheel @@ -462,10 +685,10 @@ impl DelayQueue { use crate::time::wheel::Stack; // Special case the `expired` queue - if self.slab[key.index].expired { - self.expired.remove(&key.index, &mut self.slab); + if self.slab[*key].expired { + self.expired.remove(key, &mut self.slab); } else { - self.wheel.remove(&key.index, &mut self.slab); + self.wheel.remove(key, &mut self.slab); } } @@ -501,7 +724,7 @@ impl DelayQueue { let prev_deadline = self.next_deadline(); self.remove_key(key); - let data = self.slab.remove(key.index); + let data = self.slab.remove(key); let next_deadline = self.next_deadline(); if prev_deadline != next_deadline { @@ -559,10 +782,10 @@ impl DelayQueue { // Normalize the deadline. Values cannot be set to expire in the past. let when = self.normalize_deadline(when); - self.slab[key.index].when = when; - self.slab[key.index].expired = false; + self.slab[*key].when = when; + self.slab[*key].expired = false; - self.insert_idx(when, key.index); + self.insert_idx(when, *key); let next_deadline = self.next_deadline(); if let (Some(ref mut delay), Some(deadline)) = (&mut self.delay, next_deadline) { @@ -571,6 +794,50 @@ impl DelayQueue { } } + /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation. + /// This function is not guaranteed to, and in most cases, won't decrease the capacity of the slab + /// to the number of elements still contained in it, because elements cannot be moved to a different + /// index. To decrease the capacity to the size of the slab use [`compact`]. + /// + /// This function can take O(n) time even when the capacity cannot be reduced or the allocation is + /// shrunk in place. Repeated calls run in O(1) though. + /// + /// [`compact`]: method@Self::compact + pub fn shrink_to_fit(&mut self) { + self.slab.shrink_to_fit(); + } + + /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation, + /// to the number of elements that are contained in it. + /// + /// This methods runs in O(n). + /// + /// # Examples + /// + /// Basic usage + /// + /// ```rust + /// use tokio_util::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::with_capacity(10); + /// + /// let key1 = delay_queue.insert(5, Duration::from_secs(5)); + /// let key2 = delay_queue.insert(10, Duration::from_secs(10)); + /// let key3 = delay_queue.insert(15, Duration::from_secs(15)); + /// + /// delay_queue.remove(&key2); + /// + /// delay_queue.compact(); + /// assert_eq!(delay_queue.capacity(), 2); + /// # } + /// ``` + pub fn compact(&mut self) { + self.slab.compact(); + } + /// Returns the next time to poll as determined by the wheel fn next_deadline(&mut self) -> Option { self.wheel @@ -750,7 +1017,7 @@ impl DelayQueue { /// should be returned. /// /// A slot should be returned when the associated deadline has been reached. - fn poll_idx(&mut self, cx: &mut task::Context<'_>) -> Poll>> { + fn poll_idx(&mut self, cx: &mut task::Context<'_>) -> Poll>> { use self::wheel::Stack; let expired = self.expired.pop(&mut self.slab); @@ -816,9 +1083,9 @@ impl futures_core::Stream for DelayQueue { } impl wheel::Stack for Stack { - type Owned = usize; - type Borrowed = usize; - type Store = Slab>; + type Owned = Key; + type Borrowed = Key; + type Store = SlabStorage; fn is_empty(&self) -> bool { self.head.is_none() @@ -837,28 +1104,29 @@ impl wheel::Stack for Stack { } store[item].next = old; - self.head = Some(item) + self.head = Some(item); } fn pop(&mut self, store: &mut Self::Store) -> Option { - if let Some(idx) = self.head { - self.head = store[idx].next; + if let Some(key) = self.head { + self.head = store[key].next; if let Some(idx) = self.head { store[idx].prev = None; } - store[idx].next = None; - debug_assert!(store[idx].prev.is_none()); + store[key].next = None; + debug_assert!(store[key].prev.is_none()); - Some(idx) + Some(key) } else { None } } fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store) { - assert!(store.contains(*item)); + let key = *item; + assert!(store.contains(item)); // Ensure that the entry is in fact contained by the stack debug_assert!({ @@ -867,29 +1135,31 @@ impl wheel::Stack for Stack { let mut contains = false; while let Some(idx) = next { + let data = &store[idx]; + if idx == *item { debug_assert!(!contains); contains = true; } - next = store[idx].next; + next = data.next; } contains }); - if let Some(next) = store[*item].next { - store[next].prev = store[*item].prev; + if let Some(next) = store[key].next { + store[next].prev = store[key].prev; } - if let Some(prev) = store[*item].prev { - store[prev].next = store[*item].next; + if let Some(prev) = store[key].prev { + store[prev].next = store[key].next; } else { - self.head = store[*item].next; + self.head = store[key].next; } - store[*item].next = None; - store[*item].prev = None; + store[key].next = None; + store[key].prev = None; } fn when(item: &Self::Borrowed, store: &Self::Store) -> u64 { @@ -912,6 +1182,24 @@ impl Key { } } +impl KeyInternal { + pub(crate) fn new(index: usize) -> KeyInternal { + KeyInternal { index } + } +} + +impl From for KeyInternal { + fn from(item: Key) -> Self { + KeyInternal::new(item.index) + } +} + +impl From for Key { + fn from(item: KeyInternal) -> Self { + Key::new(item.index) + } +} + impl Expired { /// Returns a reference to the inner value. pub fn get_ref(&self) -> &T { diff --git a/tokio-util/src/time/wheel/mod.rs b/tokio-util/src/time/wheel/mod.rs index 8fed0bf431d..4191e401df4 100644 --- a/tokio-util/src/time/wheel/mod.rs +++ b/tokio-util/src/time/wheel/mod.rs @@ -6,6 +6,7 @@ mod stack; pub(crate) use self::stack::Stack; use std::borrow::Borrow; +use std::fmt::Debug; use std::usize; /// Timing wheel implementation. diff --git a/tokio-util/src/time/wheel/stack.rs b/tokio-util/src/time/wheel/stack.rs index 6e55c38ccda..c87adcafda8 100644 --- a/tokio-util/src/time/wheel/stack.rs +++ b/tokio-util/src/time/wheel/stack.rs @@ -1,4 +1,6 @@ use std::borrow::Borrow; +use std::cmp::Eq; +use std::hash::Hash; /// Abstracts the stack operations needed to track timeouts. pub(crate) trait Stack: Default { @@ -6,7 +8,7 @@ pub(crate) trait Stack: Default { type Owned: Borrow; /// Borrowed item - type Borrowed; + type Borrowed: Eq + Hash; /// Item storage, this allows a slab to be used instead of just the heap type Store; diff --git a/tokio-util/tests/time_delay_queue.rs b/tokio-util/tests/time_delay_queue.rs index 901afcaaa94..1c30446af59 100644 --- a/tokio-util/tests/time_delay_queue.rs +++ b/tokio-util/tests/time_delay_queue.rs @@ -109,6 +109,7 @@ async fn multi_delay_at_start() { let start = Instant::now(); for elapsed in 0..1200 { + println!("elapsed: {:?}", elapsed); let elapsed = elapsed + 1; tokio::time::sleep_until(start + ms(elapsed)).await; @@ -128,10 +129,12 @@ async fn multi_delay_at_start() { assert_pending!(poll!(queue)); } } + println!("finished multi_delay_start"); } #[tokio::test] async fn insert_in_past_fires_immediately() { + println!("running insert_in_past_fires_immediately"); time::pause(); let mut queue = task::spawn(DelayQueue::new()); @@ -142,6 +145,7 @@ async fn insert_in_past_fires_immediately() { queue.insert_at("foo", now); assert_ready!(poll!(queue)); + println!("finished insert_in_past_fires_immediately"); } #[tokio::test] @@ -640,6 +644,175 @@ async fn delay_queue_poll_expired_when_empty() { assert!(assert_ready!(poll!(delay_queue)).is_none()); } +#[tokio::test(start_paused = true)] +async fn compact_expire_empty() { + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + queue.insert_at("foo1", now + ms(10)); + queue.insert_at("foo2", now + ms(10)); + + sleep(ms(10)).await; + + let mut res = vec![]; + while res.len() < 2 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + queue.compact(); + + assert_eq!(queue.len(), 0); + assert_eq!(queue.capacity(), 0); +} + +#[tokio::test(start_paused = true)] +async fn compact_remove_empty() { + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + let key1 = queue.insert_at("foo1", now + ms(10)); + let key2 = queue.insert_at("foo2", now + ms(10)); + + queue.remove(&key1); + queue.remove(&key2); + + queue.compact(); + + assert_eq!(queue.len(), 0); + assert_eq!(queue.capacity(), 0); +} + +#[tokio::test(start_paused = true)] +// Trigger a re-mapping of keys in the slab due to a `compact` call and +// test removal of re-mapped keys +async fn compact_remove_remapped_keys() { + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + queue.insert_at("foo1", now + ms(10)); + queue.insert_at("foo2", now + ms(10)); + + // should be assigned indices 3 and 4 + let key3 = queue.insert_at("foo3", now + ms(20)); + let key4 = queue.insert_at("foo4", now + ms(20)); + + sleep(ms(10)).await; + + let mut res = vec![]; + while res.len() < 2 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + // items corresponding to `foo3` and `foo4` will be assigned + // new indices here + queue.compact(); + + queue.insert_at("foo5", now + ms(10)); + + // test removal of re-mapped keys + let expired3 = queue.remove(&key3); + let expired4 = queue.remove(&key4); + + assert_eq!(expired3.into_inner(), "foo3"); + assert_eq!(expired4.into_inner(), "foo4"); + + queue.compact(); + assert_eq!(queue.len(), 1); + assert_eq!(queue.capacity(), 1); +} + +#[tokio::test(start_paused = true)] +async fn compact_change_deadline() { + let mut queue = task::spawn(DelayQueue::new()); + + let mut now = Instant::now(); + + queue.insert_at("foo1", now + ms(10)); + queue.insert_at("foo2", now + ms(10)); + + // should be assigned indices 3 and 4 + queue.insert_at("foo3", now + ms(20)); + let key4 = queue.insert_at("foo4", now + ms(20)); + + sleep(ms(10)).await; + + let mut res = vec![]; + while res.len() < 2 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + // items corresponding to `foo3` and `foo4` should be assigned + // new indices + queue.compact(); + + now = Instant::now(); + + queue.insert_at("foo5", now + ms(10)); + let key6 = queue.insert_at("foo6", now + ms(10)); + + queue.reset_at(&key4, now + ms(20)); + queue.reset_at(&key6, now + ms(20)); + + // foo3 and foo5 will expire + sleep(ms(10)).await; + + while res.len() < 4 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + sleep(ms(10)).await; + + while res.len() < 6 { + let entry = assert_ready_ok!(poll!(queue)); + res.push(entry.into_inner()); + } + + let entry = assert_ready!(poll!(queue)); + assert!(entry.is_none()); +} + +#[tokio::test(start_paused = true)] +async fn remove_after_compact() { + let now = Instant::now(); + let mut queue = DelayQueue::new(); + + let foo_key = queue.insert_at("foo", now + ms(10)); + queue.insert_at("bar", now + ms(20)); + queue.remove(&foo_key); + queue.compact(); + + let panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + queue.remove(&foo_key); + })); + assert!(panic.is_err()); +} + +#[tokio::test(start_paused = true)] +async fn remove_after_compact_poll() { + let now = Instant::now(); + let mut queue = task::spawn(DelayQueue::new()); + + let foo_key = queue.insert_at("foo", now + ms(10)); + queue.insert_at("bar", now + ms(20)); + + sleep(ms(10)).await; + assert_eq!(assert_ready_ok!(poll!(queue)).key(), foo_key); + + queue.compact(); + + let panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + queue.remove(&foo_key); + })); + assert!(panic.is_err()); +} + fn ms(n: u64) -> Duration { Duration::from_millis(n) } From 1601de119647c7a0413b0f7058dabddc3aae3e66 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Mon, 10 Jan 2022 11:40:28 +0100 Subject: [PATCH 49/59] process: drop pipe after child exits in `wait_with_output` (#4315) --- tokio/src/process/mod.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tokio/src/process/mod.rs b/tokio/src/process/mod.rs index 8a0d9db25fd..67d43b4d80e 100644 --- a/tokio/src/process/mod.rs +++ b/tokio/src/process/mod.rs @@ -1130,19 +1130,26 @@ impl Child { pub async fn wait_with_output(mut self) -> io::Result { use crate::future::try_join3; - async fn read_to_end(io: Option) -> io::Result> { + async fn read_to_end(io: &mut Option) -> io::Result> { let mut vec = Vec::new(); - if let Some(mut io) = io { - crate::io::util::read_to_end(&mut io, &mut vec).await?; + if let Some(io) = io.as_mut() { + crate::io::util::read_to_end(io, &mut vec).await?; } Ok(vec) } - let stdout_fut = read_to_end(self.stdout.take()); - let stderr_fut = read_to_end(self.stderr.take()); + let mut stdout_pipe = self.stdout.take(); + let mut stderr_pipe = self.stderr.take(); + + let stdout_fut = read_to_end(&mut stdout_pipe); + let stderr_fut = read_to_end(&mut stderr_pipe); let (status, stdout, stderr) = try_join3(self.wait(), stdout_fut, stderr_fut).await?; + // Drop happens after `try_join` due to + drop(stdout_pipe); + drop(stderr_pipe); + Ok(Output { status, stdout, From cec1bc151e30b7afe26c44e041c56cc826fda938 Mon Sep 17 00:00:00 2001 From: Matt Schulte Date: Mon, 10 Jan 2022 02:41:01 -0800 Subject: [PATCH 50/59] watch: document recursive `borrow` deadlock (#4360) Under the hood, the watch channel uses a RwLock to implement reading (borrow) and writing (send). This may cause a deadlock if a user has concurrent borrows on the same thread. This is most likely to occur due to a recursive borrow. This PR adds documentation to describe the deadlock so that future users of the watch channel will be aware. --- tokio/src/sync/watch.rs | 51 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/tokio/src/sync/watch.rs b/tokio/src/sync/watch.rs index 5e827fdbb8c..5673e0fca78 100644 --- a/tokio/src/sync/watch.rs +++ b/tokio/src/sync/watch.rs @@ -91,6 +91,23 @@ pub struct Sender { /// Outstanding borrows hold a read lock on the inner value. This means that /// long lived borrows could cause the produce half to block. It is recommended /// to keep the borrow as short lived as possible. +/// +/// The priority policy of the lock is dependent on the underlying lock +/// implementation, and this type does not guarantee that any particular policy +/// will be used. In particular, a producer which is waiting to acquire the lock +/// in `send` might or might not block concurrent calls to `borrow`, e.g.: +/// +///

Potential deadlock example +/// +/// ```text +/// // Task 1 (on thread A) | // Task 2 (on thread B) +/// let _ref1 = rx.borrow(); | +/// | // will block +/// | let _ = tx.send(()); +/// // may deadlock | +/// let _ref2 = rx.borrow(); | +/// ``` +///
#[derive(Debug)] pub struct Ref<'a, T> { inner: RwLockReadGuard<'a, T>, @@ -285,6 +302,23 @@ impl Receiver { /// could cause the send half to block. It is recommended to keep the borrow /// as short lived as possible. /// + /// The priority policy of the lock is dependent on the underlying lock + /// implementation, and this type does not guarantee that any particular policy + /// will be used. In particular, a producer which is waiting to acquire the lock + /// in `send` might or might not block concurrent calls to `borrow`, e.g.: + /// + ///
Potential deadlock example + /// + /// ```text + /// // Task 1 (on thread A) | // Task 2 (on thread B) + /// let _ref1 = rx.borrow(); | + /// | // will block + /// | let _ = tx.send(()); + /// // may deadlock | + /// let _ref2 = rx.borrow(); | + /// ``` + ///
+ /// /// [`changed`]: Receiver::changed /// /// # Examples @@ -311,6 +345,23 @@ impl Receiver { /// could cause the send half to block. It is recommended to keep the borrow /// as short lived as possible. /// + /// The priority policy of the lock is dependent on the underlying lock + /// implementation, and this type does not guarantee that any particular policy + /// will be used. In particular, a producer which is waiting to acquire the lock + /// in `send` might or might not block concurrent calls to `borrow`, e.g.: + /// + ///
Potential deadlock example + /// + /// ```text + /// // Task 1 (on thread A) | // Task 2 (on thread B) + /// let _ref1 = rx1.borrow_and_update(); | + /// | // will block + /// | let _ = tx.send(()); + /// // may deadlock | + /// let _ref2 = rx2.borrow_and_update(); | + /// ``` + ///
+ /// /// [`changed`]: Receiver::changed pub fn borrow_and_update(&mut self) -> Ref<'_, T> { let inner = self.shared.value.read().unwrap(); From bcb968af8494d2808c2986104d6e67d0b276b1fe Mon Sep 17 00:00:00 2001 From: 0xd34d10cc <0xd34d10cc@gmail.com> Date: Mon, 10 Jan 2022 13:42:16 +0000 Subject: [PATCH 51/59] sync: add `blocking_recv` to `oneshot::Receiver` (#4334) --- tokio/src/sync/oneshot.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tokio/src/sync/oneshot.rs b/tokio/src/sync/oneshot.rs index cfc92259d39..2240074e733 100644 --- a/tokio/src/sync/oneshot.rs +++ b/tokio/src/sync/oneshot.rs @@ -1014,6 +1014,36 @@ impl Receiver { self.inner = None; result } + + /// Blocking receive to call outside of asynchronous contexts. + /// + /// # Panics + /// + /// This function panics if called within an asynchronous execution + /// context. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use tokio::sync::oneshot; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = oneshot::channel::(); + /// + /// let sync_code = thread::spawn(move || { + /// assert_eq!(Ok(10), rx.blocking_recv()); + /// }); + /// + /// let _ = tx.send(10); + /// sync_code.join().unwrap(); + /// } + /// ``` + #[cfg(feature = "sync")] + pub fn blocking_recv(self) -> Result { + crate::future::block_on(self) + } } impl Drop for Receiver { From aea26b322c9493f732894265810837bf17a330ca Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 11 Jan 2022 10:53:45 -0800 Subject: [PATCH 52/59] Revert "Update mio to 0.8 (#4270)" and dependent changes (#4392) This reverts commits: * ee0e811a362e4aeb8f47cb530cace2d352fb4b8a * 49a9dc6743a8d90c46a51a42706943acf39a5d85 * 0190831ec1922047751b6d40554cc4a11cf2a82c * 43cdb2cb5004a68d28c4394664b9f9964f3d59e2 * 96370ba4ce9ea5564f094354579d5539af8bbc9d * a9d9bde0688cb88149272d78f8239a89b357974e --- tokio/Cargo.toml | 19 ++--- tokio/src/net/tcp/socket.rs | 126 ++++++------------------------ tokio/src/net/tcp/stream.rs | 26 +++++-- tokio/src/net/udp.rs | 150 ------------------------------------ tokio/tests/udp.rs | 10 --- 5 files changed, 52 insertions(+), 279 deletions(-) diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 3945456520b..2a88b766c0a 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -49,19 +49,20 @@ macros = ["tokio-macros"] stats = [] net = [ "libc", - "mio/net", - "mio/os-ext", "mio/os-poll", - "socket2/all", + "mio/os-util", + "mio/tcp", + "mio/udp", + "mio/uds", "winapi/namedpipeapi", ] process = [ "bytes", "once_cell", "libc", - "mio/net", - "mio/os-ext", "mio/os-poll", + "mio/os-util", + "mio/uds", "signal-hook-registry", "winapi/threadpoollegacyapiset", ] @@ -74,9 +75,9 @@ rt-multi-thread = [ signal = [ "once_cell", "libc", - "mio/net", - "mio/os-ext", "mio/os-poll", + "mio/uds", + "mio/os-util", "signal-hook-registry", "winapi/consoleapi", ] @@ -93,10 +94,9 @@ pin-project-lite = "0.2.0" bytes = { version = "1.0.0", optional = true } once_cell = { version = "1.5.2", optional = true } memchr = { version = "2.2", optional = true } -mio = { version = "0.8.0", optional = true } +mio = { version = "0.7.6", optional = true } num_cpus = { version = "1.8.0", optional = true } parking_lot = { version = "0.11.0", optional = true } -socket2 = { version = "0.4.2", optional = true } # Currently unstable. The API exposed by these features may be broken at any time. # Requires `--cfg tokio_unstable` to enable. @@ -128,6 +128,7 @@ proptest = "1" rand = "0.8.0" tempfile = "3.1.0" async-stream = "0.3" +socket2 = "0.4" [target.'cfg(target_os = "freebsd")'.dev-dependencies] mio-aio = { version = "0.6.0", features = ["tokio"] } diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index ee9633611a1..3c6870221c2 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -1,6 +1,5 @@ use crate::net::{TcpListener, TcpStream}; -use std::convert::TryInto; use std::fmt; use std::io; use std::net::SocketAddr; @@ -85,7 +84,7 @@ cfg_net! { /// [`socket2`]: https://docs.rs/socket2/ #[cfg_attr(docsrs, doc(alias = "connect_std"))] pub struct TcpSocket { - inner: socket2::Socket, + inner: mio::net::TcpSocket, } } @@ -120,11 +119,7 @@ impl TcpSocket { /// } /// ``` pub fn new_v4() -> io::Result { - let inner = socket2::Socket::new( - socket2::Domain::IPV4, - socket2::Type::STREAM, - Some(socket2::Protocol::TCP), - )?; + let inner = mio::net::TcpSocket::new_v4()?; Ok(TcpSocket { inner }) } @@ -158,11 +153,7 @@ impl TcpSocket { /// } /// ``` pub fn new_v6() -> io::Result { - let inner = socket2::Socket::new( - socket2::Domain::IPV6, - socket2::Type::STREAM, - Some(socket2::Protocol::TCP), - )?; + let inner = mio::net::TcpSocket::new_v6()?; Ok(TcpSocket { inner }) } @@ -193,7 +184,7 @@ impl TcpSocket { /// } /// ``` pub fn set_reuseaddr(&self, reuseaddr: bool) -> io::Result<()> { - self.inner.set_reuse_address(reuseaddr) + self.inner.set_reuseaddr(reuseaddr) } /// Retrieves the value set for `SO_REUSEADDR` on this socket. @@ -219,7 +210,7 @@ impl TcpSocket { /// } /// ``` pub fn reuseaddr(&self) -> io::Result { - self.inner.reuse_address() + self.inner.get_reuseaddr() } /// Allows the socket to bind to an in-use port. Only available for unix systems @@ -253,7 +244,7 @@ impl TcpSocket { doc(cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))) )] pub fn set_reuseport(&self, reuseport: bool) -> io::Result<()> { - self.inner.set_reuse_port(reuseport) + self.inner.set_reuseport(reuseport) } /// Allows the socket to bind to an in-use port. Only available for unix systems @@ -288,14 +279,14 @@ impl TcpSocket { doc(cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))) )] pub fn reuseport(&self) -> io::Result { - self.inner.reuse_port() + self.inner.get_reuseport() } /// Sets the size of the TCP send buffer on this socket. /// /// On most operating systems, this sets the `SO_SNDBUF` socket option. pub fn set_send_buffer_size(&self, size: u32) -> io::Result<()> { - self.inner.set_send_buffer_size(size as usize) + self.inner.set_send_buffer_size(size) } /// Returns the size of the TCP send buffer for this socket. @@ -322,14 +313,14 @@ impl TcpSocket { /// /// [`set_send_buffer_size`]: #method.set_send_buffer_size pub fn send_buffer_size(&self) -> io::Result { - self.inner.send_buffer_size().map(|n| n as u32) + self.inner.get_send_buffer_size() } /// Sets the size of the TCP receive buffer on this socket. /// /// On most operating systems, this sets the `SO_RCVBUF` socket option. pub fn set_recv_buffer_size(&self, size: u32) -> io::Result<()> { - self.inner.set_recv_buffer_size(size as usize) + self.inner.set_recv_buffer_size(size) } /// Returns the size of the TCP receive buffer for this socket. @@ -356,7 +347,7 @@ impl TcpSocket { /// /// [`set_recv_buffer_size`]: #method.set_recv_buffer_size pub fn recv_buffer_size(&self) -> io::Result { - self.inner.recv_buffer_size().map(|n| n as u32) + self.inner.get_recv_buffer_size() } /// Sets the linger duration of this socket by setting the SO_LINGER option. @@ -378,62 +369,7 @@ impl TcpSocket { /// /// [`set_linger`]: TcpSocket::set_linger pub fn linger(&self) -> io::Result> { - self.inner.linger() - } - - /// Gets the value of the `IP_TOS` option for this socket. - /// - /// For more information about this option, see [`set_tos`]. - /// - /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or - /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) - /// - /// [`set_tos`]: Self::set_tos - // https://docs.rs/socket2/0.4.2/src/socket2/socket.rs.html#1178 - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - #[cfg_attr( - docsrs, - doc(cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))) - )] - pub fn tos(&self) -> io::Result { - self.inner.tos() - } - - /// Sets the value for the `IP_TOS` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - /// - /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or - /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) - // https://docs.rs/socket2/0.4.2/src/socket2/socket.rs.html#1178 - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - #[cfg_attr( - docsrs, - doc(cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))) - )] - pub fn set_tos(&self, tos: u32) -> io::Result<()> { - self.inner.set_tos(tos) + self.inner.get_linger() } /// Gets the local address of this socket. @@ -459,14 +395,7 @@ impl TcpSocket { /// } /// ``` pub fn local_addr(&self) -> io::Result { - self.inner - .local_addr() - .map(|addr| addr.as_socket().unwrap()) - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.inner.take_error() + self.inner.get_localaddr() } /// Binds the socket to the given address. @@ -498,7 +427,7 @@ impl TcpSocket { /// } /// ``` pub fn bind(&self, addr: SocketAddr) -> io::Result<()> { - self.inner.bind(&addr.into()) + self.inner.bind(addr) } /// Establishes a TCP connection with a peer at the specified socket address. @@ -534,13 +463,7 @@ impl TcpSocket { /// } /// ``` pub async fn connect(self, addr: SocketAddr) -> io::Result { - self.inner.connect(&addr.into())?; - - #[cfg(windows)] - let mio = unsafe { mio::net::TcpStream::from_raw_socket(self.inner.into_raw_socket()) }; - #[cfg(unix)] - let mio = unsafe { mio::net::TcpStream::from_raw_fd(self.inner.into_raw_fd()) }; - + let mio = self.inner.connect(addr)?; TcpStream::connect_mio(mio).await } @@ -580,14 +503,7 @@ impl TcpSocket { /// } /// ``` pub fn listen(self, backlog: u32) -> io::Result { - let backlog = backlog.try_into().unwrap_or(i32::MAX); - self.inner.listen(backlog)?; - - #[cfg(windows)] - let mio = unsafe { mio::net::TcpListener::from_raw_socket(self.inner.into_raw_socket()) }; - #[cfg(unix)] - let mio = unsafe { mio::net::TcpListener::from_raw_fd(self.inner.into_raw_fd()) }; - + let mio = self.inner.listen(backlog)?; TcpListener::new(mio) } @@ -607,7 +523,7 @@ impl TcpSocket { /// /// #[tokio::main] /// async fn main() -> std::io::Result<()> { - /// + /// /// let socket2_socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; /// /// let socket = TcpSocket::from_std_stream(socket2_socket.into()); @@ -618,12 +534,16 @@ impl TcpSocket { pub fn from_std_stream(std_stream: std::net::TcpStream) -> TcpSocket { #[cfg(unix)] { + use std::os::unix::io::{FromRawFd, IntoRawFd}; + let raw_fd = std_stream.into_raw_fd(); unsafe { TcpSocket::from_raw_fd(raw_fd) } } #[cfg(windows)] { + use std::os::windows::io::{FromRawSocket, IntoRawSocket}; + let raw_socket = std_stream.into_raw_socket(); unsafe { TcpSocket::from_raw_socket(raw_socket) } } @@ -652,7 +572,7 @@ impl FromRawFd for TcpSocket { /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. unsafe fn from_raw_fd(fd: RawFd) -> TcpSocket { - let inner = socket2::Socket::from_raw_fd(fd); + let inner = mio::net::TcpSocket::from_raw_fd(fd); TcpSocket { inner } } } @@ -687,7 +607,7 @@ impl FromRawSocket for TcpSocket { /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. unsafe fn from_raw_socket(socket: RawSocket) -> TcpSocket { - let inner = socket2::Socket::from_raw_socket(socket); + let inner = mio::net::TcpSocket::from_raw_socket(socket); TcpSocket { inner } } } diff --git a/tokio/src/net/tcp/stream.rs b/tokio/src/net/tcp/stream.rs index abfc3c6612b..60d20fd74b2 100644 --- a/tokio/src/net/tcp/stream.rs +++ b/tokio/src/net/tcp/stream.rs @@ -387,7 +387,7 @@ impl TcpStream { /// // if the readiness event is a false positive. /// match stream.try_read(&mut data) { /// Ok(n) => { - /// println!("read {} bytes", n); + /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; @@ -1090,8 +1090,9 @@ impl TcpStream { /// # } /// ``` pub fn linger(&self) -> io::Result> { - let socket = self.as_socket(); - socket.linger() + let mio_socket = std::mem::ManuallyDrop::new(self.to_mio()); + + mio_socket.get_linger() } /// Sets the linger duration of this socket by setting the SO_LINGER option. @@ -1116,12 +1117,23 @@ impl TcpStream { /// # } /// ``` pub fn set_linger(&self, dur: Option) -> io::Result<()> { - let socket = self.as_socket(); - socket.set_linger(dur) + let mio_socket = std::mem::ManuallyDrop::new(self.to_mio()); + + mio_socket.set_linger(dur) } - fn as_socket(&self) -> socket2::SockRef<'_> { - socket2::SockRef::from(self) + fn to_mio(&self) -> mio::net::TcpSocket { + #[cfg(windows)] + { + use std::os::windows::io::{AsRawSocket, FromRawSocket}; + unsafe { mio::net::TcpSocket::from_raw_socket(self.as_raw_socket()) } + } + + #[cfg(unix)] + { + use std::os::unix::io::{AsRawFd, FromRawFd}; + unsafe { mio::net::TcpSocket::from_raw_fd(self.as_raw_fd()) } + } } /// Gets the value of the `IP_TTL` option for this socket. diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index a6d80c6f760..12af5152c28 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -257,78 +257,6 @@ impl UdpSocket { } } - /// Sets the size of the UDP send buffer on this socket. - /// - /// On most operating systems, this sets the `SO_SNDBUF` socket option. - pub fn set_send_buffer_size(&self, size: u32) -> io::Result<()> { - self.as_socket().set_send_buffer_size(size as usize) - } - - /// Returns the size of the UDP send buffer for this socket. - /// - /// On most operating systems, this is the value of the `SO_SNDBUF` socket - /// option. - /// - /// Note that if [`set_send_buffer_size`] has been called on this socket - /// previously, the value returned by this function may not be the same as - /// the argument provided to `set_send_buffer_size`. This is for the - /// following reasons: - /// - /// * Most operating systems have minimum and maximum allowed sizes for the - /// send buffer, and will clamp the provided value if it is below the - /// minimum or above the maximum. The minimum and maximum buffer sizes are - /// OS-dependent. - /// * Linux will double the buffer size to account for internal bookkeeping - /// data, and returns the doubled value from `getsockopt(2)`. As per `man - /// 7 socket`: - /// > Sets or gets the maximum socket send buffer in bytes. The - /// > kernel doubles this value (to allow space for bookkeeping - /// > overhead) when it is set using `setsockopt(2)`, and this doubled - /// > value is returned by `getsockopt(2)`. - /// - /// [`set_send_buffer_size`]: Self::set_send_buffer_size - pub fn send_buffer_size(&self) -> io::Result { - self.as_socket().send_buffer_size().map(|n| n as u32) - } - - /// Sets the size of the UDP receive buffer on this socket. - /// - /// On most operating systems, this sets the `SO_RCVBUF` socket option. - pub fn set_recv_buffer_size(&self, size: u32) -> io::Result<()> { - self.as_socket().set_recv_buffer_size(size as usize) - } - - /// Returns the size of the UDP receive buffer for this socket. - /// - /// On most operating systems, this is the value of the `SO_RCVBUF` socket - /// option. - /// - /// Note that if [`set_recv_buffer_size`] has been called on this socket - /// previously, the value returned by this function may not be the same as - /// the argument provided to `set_send_buffer_size`. This is for the - /// following reasons: - /// - /// * Most operating systems have minimum and maximum allowed sizes for the - /// receive buffer, and will clamp the provided value if it is below the - /// minimum or above the maximum. The minimum and maximum buffer sizes are - /// OS-dependent. - /// * Linux will double the buffer size to account for internal bookkeeping - /// data, and returns the doubled value from `getsockopt(2)`. As per `man - /// 7 socket`: - /// > Sets or gets the maximum socket send buffer in bytes. The - /// > kernel doubles this value (to allow space for bookkeeping - /// > overhead) when it is set using `setsockopt(2)`, and this doubled - /// > value is returned by `getsockopt(2)`. - /// - /// [`set_recv_buffer_size`]: Self::set_recv_buffer_size - pub fn recv_buffer_size(&self) -> io::Result { - self.as_socket().recv_buffer_size().map(|n| n as u32) - } - - fn as_socket(&self) -> socket2::SockRef<'_> { - socket2::SockRef::from(self) - } - /// Returns the local address that this socket is bound to. /// /// # Example @@ -350,29 +278,6 @@ impl UdpSocket { self.io.local_addr() } - /// Returns the socket address of the remote peer this socket was connected - /// to. - /// - /// # Example - /// - /// ``` - /// use tokio::net::UdpSocket; - /// # use std::{io, net::SocketAddr}; - /// - /// # #[tokio::main] - /// # async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:0".parse::().unwrap(); - /// let peer_addr = "127.0.0.1:11100".parse::().unwrap(); - /// let sock = UdpSocket::bind(addr).await?; - /// sock.connect(peer_addr).await?; - /// assert_eq!(sock.peer_addr()?.ip(), peer_addr.ip()); - /// # Ok(()) - /// # } - /// ``` - pub fn peer_addr(&self) -> io::Result { - self.io.peer_addr() - } - /// Connects the UDP socket setting the default destination for send() and /// limiting packets that are read via recv from the address specified in /// `addr`. @@ -1579,61 +1484,6 @@ impl UdpSocket { self.io.set_ttl(ttl) } - /// Gets the value of the `IP_TOS` option for this socket. - /// - /// For more information about this option, see [`set_tos`]. - /// - /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or - /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) - /// - /// [`set_tos`]: Self::set_tos - // https://docs.rs/socket2/0.4.2/src/socket2/socket.rs.html#1178 - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - #[cfg_attr( - docsrs, - doc(cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))) - )] - pub fn tos(&self) -> io::Result { - self.as_socket().tos() - } - - /// Sets the value for the `IP_TOS` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - /// - /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or - /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) - // https://docs.rs/socket2/0.4.2/src/socket2/socket.rs.html#1178 - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - #[cfg_attr( - docsrs, - doc(cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))) - )] - pub fn set_tos(&self, tos: u32) -> io::Result<()> { - self.as_socket().set_tos(tos) - } - /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. /// /// This function specifies a new multicast group for this socket to join. diff --git a/tokio/tests/udp.rs b/tokio/tests/udp.rs index 11a97276c1f..ec2a1e96104 100644 --- a/tokio/tests/udp.rs +++ b/tokio/tests/udp.rs @@ -3,7 +3,6 @@ use futures::future::poll_fn; use std::io; -use std::net::SocketAddr; use std::sync::Arc; use tokio::{io::ReadBuf, net::UdpSocket}; use tokio_test::assert_ok; @@ -485,12 +484,3 @@ async fn poll_ready() { } } } - -#[tokio::test] -async fn peer_addr() { - let addr = "127.0.0.1:0".parse::().unwrap(); - let peer_addr = "127.0.0.1:11100".parse::().unwrap(); - let sock = UdpSocket::bind(addr).await.unwrap(); - sock.connect(peer_addr).await.unwrap(); - assert_eq!(sock.peer_addr().unwrap().ip(), peer_addr.ip()); -} From 867f137dc98ec41e7480d7f56158c3e7758b1a7e Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 11 Jan 2022 11:57:14 -0800 Subject: [PATCH 53/59] Revert "rt: refactor current-thread scheduler (#4377)" (#4394) This reverts commit cc8ad367a0e5d8536f8be58fe560bfdea1a976a5. --- tokio/src/runtime/basic_scheduler.rs | 419 ++++++++---------- tokio/src/runtime/mod.rs | 2 +- .../src/runtime/tests/loom_basic_scheduler.rs | 20 +- .../thread_pool}/atomic_cell.rs | 10 +- tokio/src/runtime/thread_pool/mod.rs | 3 + tokio/src/runtime/thread_pool/worker.rs | 3 +- tokio/src/util/mod.rs | 3 - 7 files changed, 215 insertions(+), 245 deletions(-) rename tokio/src/{util => runtime/thread_pool}/atomic_cell.rs (77%) diff --git a/tokio/src/runtime/basic_scheduler.rs b/tokio/src/runtime/basic_scheduler.rs index d873fcf4699..872d0d5b897 100644 --- a/tokio/src/runtime/basic_scheduler.rs +++ b/tokio/src/runtime/basic_scheduler.rs @@ -3,12 +3,10 @@ use crate::loom::sync::atomic::AtomicBool; use crate::loom::sync::Mutex; use crate::park::{Park, Unpark}; use crate::runtime::context::EnterGuard; -use crate::runtime::driver::Driver; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task}; use crate::runtime::Callback; use crate::sync::notify::Notify; -use crate::util::atomic_cell::AtomicCell; use crate::util::{waker_ref, Wake, WakerRef}; use std::cell::RefCell; @@ -21,12 +19,13 @@ use std::task::Poll::{Pending, Ready}; use std::time::Duration; /// Executes tasks on the current thread -pub(crate) struct BasicScheduler { - /// Core scheduler data is acquired by a thread entering `block_on`. - core: AtomicCell, +pub(crate) struct BasicScheduler { + /// Inner state guarded by a mutex that is shared + /// between all `block_on` calls. + inner: Mutex>>, /// Notifier for waking up other threads to steal the - /// driver. + /// parker. notify: Notify, /// Sendable task spawner @@ -39,11 +38,15 @@ pub(crate) struct BasicScheduler { context_guard: Option, } -/// Data required for executing the scheduler. The struct is passed around to -/// a function that will perform the scheduling work and acts as a capability token. -struct Core { +/// The inner scheduler that owns the task queue and the main parker P. +struct Inner { /// Scheduler run queue - tasks: VecDeque>>, + /// + /// When the scheduler is executed, the queue is removed from `self` and + /// moved into `Context`. + /// + /// This indirection is to allow `BasicScheduler` to be `Send`. + tasks: Option, /// Sendable task spawner spawner: Spawner, @@ -51,10 +54,13 @@ struct Core { /// Current tick tick: u8, - /// Runtime driver - /// - /// The driver is removed before starting to park the thread - driver: Option, + /// Thread park handle + park: P, + + /// Callback for a worker parking itself + before_park: Option, + /// Callback for a worker unparking itself + after_unpark: Option, /// Stats batcher stats: WorkerStatsBatcher, @@ -65,6 +71,13 @@ pub(crate) struct Spawner { shared: Arc, } +struct Tasks { + /// Local run queue. + /// + /// Tasks notified from the current thread are pushed into this queue. + queue: VecDeque>>, +} + /// A remote scheduler entry. /// /// These are filled in by remote threads sending instructions to the scheduler. @@ -87,29 +100,22 @@ struct Shared { owned: OwnedTasks>, /// Unpark the blocked thread. - unpark: ::Unpark, + unpark: Box, /// Indicates whether the blocked on thread was woken. woken: AtomicBool, - /// Callback for a worker parking itself - before_park: Option, - - /// Callback for a worker unparking itself - after_unpark: Option, - /// Keeps track of various runtime stats. stats: RuntimeStats, } /// Thread-local context. struct Context { - /// Handle to the spawner - spawner: Spawner, + /// Shared scheduler state + shared: Arc, - /// Scheduler core, enabling the holder of `Context` to execute the - /// scheduler. - core: RefCell>>, + /// Local queue + tasks: RefCell, } /// Initial queue capacity. @@ -127,36 +133,38 @@ const REMOTE_FIRST_INTERVAL: u8 = 31; // Tracks the current BasicScheduler. scoped_thread_local!(static CURRENT: Context); -impl BasicScheduler { +impl BasicScheduler

{ pub(crate) fn new( - driver: Driver, + park: P, before_park: Option, after_unpark: Option, - ) -> BasicScheduler { - let unpark = driver.unpark(); + ) -> BasicScheduler

{ + let unpark = Box::new(park.unpark()); let spawner = Spawner { shared: Arc::new(Shared { queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))), owned: OwnedTasks::new(), - unpark, + unpark: unpark as Box, woken: AtomicBool::new(false), - before_park, - after_unpark, stats: RuntimeStats::new(1), }), }; - let core = AtomicCell::new(Some(Box::new(Core { - tasks: VecDeque::with_capacity(INITIAL_CAPACITY), + let inner = Mutex::new(Some(Inner { + tasks: Some(Tasks { + queue: VecDeque::with_capacity(INITIAL_CAPACITY), + }), spawner: spawner.clone(), tick: 0, - driver: Some(driver), + park, + before_park, + after_unpark, stats: WorkerStatsBatcher::new(0), - }))); + })); BasicScheduler { - core, + inner, notify: Notify::new(), spawner, context_guard: None, @@ -170,12 +178,12 @@ impl BasicScheduler { pub(crate) fn block_on(&self, future: F) -> F::Output { pin!(future); - // Attempt to steal the scheduler core and block_on the future if we can - // there, otherwise, lets select on a notification that the core is - // available or the future is complete. + // Attempt to steal the dedicated parker and block_on the future if we can there, + // otherwise, lets select on a notification that the parker is available + // or the future is complete. loop { - if let Some(core) = self.take_core() { - return core.block_on(future); + if let Some(inner) = &mut self.take_inner() { + return inner.block_on(future); } else { let mut enter = crate::runtime::enter(false); @@ -202,14 +210,11 @@ impl BasicScheduler { } } - fn take_core(&self) -> Option> { - let core = self.core.take()?; + fn take_inner(&self) -> Option> { + let inner = self.inner.lock().take()?; - Some(CoreGuard { - context: Context { - spawner: self.spawner.clone(), - core: RefCell::new(Some(core)), - }, + Some(InnerGuard { + inner: Some(inner), basic_scheduler: self, }) } @@ -219,109 +224,156 @@ impl BasicScheduler { } } -impl Context { - /// Execute the closure with the given scheduler core stored in the - /// thread-local context. - fn run_task(&self, mut core: Box, f: impl FnOnce() -> R) -> (Box, R) { - core.stats.incr_poll_count(); - self.enter(core, || crate::coop::budget(f)) - } +impl Inner

{ + /// Blocks on the provided future and drives the runtime's driver. + fn block_on(&mut self, future: F) -> F::Output { + enter(self, |scheduler, context| { + let _enter = crate::runtime::enter(false); + let waker = scheduler.spawner.waker_ref(); + let mut cx = std::task::Context::from_waker(&waker); - /// Blocks the current thread until an event is received by the driver, - /// including I/O events, timer events, ... - fn park(&self, mut core: Box) -> Box { - let mut driver = core.driver.take().expect("driver missing"); - - if let Some(f) = &self.spawner.shared.before_park { - // Incorrect lint, the closures are actually different types so `f` - // cannot be passed as an argument to `enter`. - #[allow(clippy::redundant_closure)] - let (c, _) = self.enter(core, || f()); - core = c; - } + pin!(future); - // This check will fail if `before_park` spawns a task for us to run - // instead of parking the thread - if core.tasks.is_empty() { - // Park until the thread is signaled - core.stats.about_to_park(); - core.stats.submit(&core.spawner.shared.stats); + 'outer: loop { + if scheduler.spawner.reset_woken() { + scheduler.stats.incr_poll_count(); + if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) { + return v; + } + } - let (c, _) = self.enter(core, || { - driver.park().expect("failed to park"); - }); + for _ in 0..MAX_TASKS_PER_TICK { + // Get and increment the current tick + let tick = scheduler.tick; + scheduler.tick = scheduler.tick.wrapping_add(1); - core = c; - core.stats.returned_from_park(); - } + let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { + scheduler.spawner.pop().or_else(|| { + context + .tasks + .borrow_mut() + .queue + .pop_front() + .map(RemoteMsg::Schedule) + }) + } else { + context + .tasks + .borrow_mut() + .queue + .pop_front() + .map(RemoteMsg::Schedule) + .or_else(|| scheduler.spawner.pop()) + }; - if let Some(f) = &self.spawner.shared.after_unpark { - // Incorrect lint, the closures are actually different types so `f` - // cannot be passed as an argument to `enter`. - #[allow(clippy::redundant_closure)] - let (c, _) = self.enter(core, || f()); - core = c; - } + let entry = match entry { + Some(entry) => entry, + None => { + if let Some(f) = &scheduler.before_park { + f(); + } + // This check will fail if `before_park` spawns a task for us to run + // instead of parking the thread + if context.tasks.borrow_mut().queue.is_empty() { + // Park until the thread is signaled + scheduler.stats.about_to_park(); + scheduler.stats.submit(&scheduler.spawner.shared.stats); + scheduler.park.park().expect("failed to park"); + scheduler.stats.returned_from_park(); + } + if let Some(f) = &scheduler.after_unpark { + f(); + } - core.driver = Some(driver); - core - } + // Try polling the `block_on` future next + continue 'outer; + } + }; - /// Checks the driver for new events without blocking the thread. - fn park_yield(&self, mut core: Box) -> Box { - let mut driver = core.driver.take().expect("driver missing"); + match entry { + RemoteMsg::Schedule(task) => { + scheduler.stats.incr_poll_count(); + let task = context.shared.owned.assert_owner(task); + crate::coop::budget(|| task.run()) + } + } + } - core.stats.submit(&core.spawner.shared.stats); - let (mut core, _) = self.enter(core, || { - driver - .park_timeout(Duration::from_millis(0)) - .expect("failed to park"); - }); + // Yield to the park, this drives the timer and pulls any pending + // I/O events. + scheduler.stats.submit(&scheduler.spawner.shared.stats); + scheduler + .park + .park_timeout(Duration::from_millis(0)) + .expect("failed to park"); + } + }) + } +} - core.driver = Some(driver); - core +/// Enters the scheduler context. This sets the queue and other necessary +/// scheduler state in the thread-local. +fn enter(scheduler: &mut Inner

, f: F) -> R +where + F: FnOnce(&mut Inner

, &Context) -> R, + P: Park, +{ + // Ensures the run queue is placed back in the `BasicScheduler` instance + // once `block_on` returns.` + struct Guard<'a, P: Park> { + context: Option, + scheduler: &'a mut Inner

, } - fn enter(&self, core: Box, f: impl FnOnce() -> R) -> (Box, R) { - // Store the scheduler core in the thread-local context - // - // A drop-guard is employed at a higher level. - *self.core.borrow_mut() = Some(core); + impl Drop for Guard<'_, P> { + fn drop(&mut self) { + let Context { tasks, .. } = self.context.take().expect("context missing"); + self.scheduler.tasks = Some(tasks.into_inner()); + } + } - // Execute the closure while tracking the execution budget - let ret = f(); + // Remove `tasks` from `self` and place it in a `Context`. + let tasks = scheduler.tasks.take().expect("invalid state"); - // Take the scheduler core back - let core = self.core.borrow_mut().take().expect("core missing"); - (core, ret) - } + let guard = Guard { + context: Some(Context { + shared: scheduler.spawner.shared.clone(), + tasks: RefCell::new(tasks), + }), + scheduler, + }; + + let context = guard.context.as_ref().unwrap(); + let scheduler = &mut *guard.scheduler; + + CURRENT.set(context, || f(scheduler, context)) } -impl Drop for BasicScheduler { +impl Drop for BasicScheduler

{ fn drop(&mut self) { // Avoid a double panic if we are currently panicking and // the lock may be poisoned. - let core = match self.take_core() { - Some(core) => core, + let mut inner = match self.inner.lock().take() { + Some(inner) => inner, None if std::thread::panicking() => return, - None => panic!("Oh no! We never placed the Core back, this is a bug!"), + None => panic!("Oh no! We never placed the Inner state back, this is a bug!"), }; - core.enter(|mut core, context| { + enter(&mut inner, |scheduler, context| { // Drain the OwnedTasks collection. This call also closes the // collection, ensuring that no tasks are ever pushed after this // call returns. - context.spawner.shared.owned.close_and_shutdown_all(); + context.shared.owned.close_and_shutdown_all(); // Drain local queue // We already shut down every task, so we just need to drop the task. - while let Some(task) = core.tasks.pop_front() { + for task in context.tasks.borrow_mut().queue.drain(..) { drop(task); } // Drain remote queue and set it to None - let remote_queue = core.spawner.shared.queue.lock().take(); + let remote_queue = scheduler.spawner.shared.queue.lock().take(); // Using `Option::take` to replace the shared queue with `None`. // We already shut down every task, so we just need to drop the task. @@ -335,14 +387,12 @@ impl Drop for BasicScheduler { } } - assert!(context.spawner.shared.owned.is_empty()); - - (core, ()) + assert!(context.shared.owned.is_empty()); }); } } -impl fmt::Debug for BasicScheduler { +impl fmt::Debug for BasicScheduler

{ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BasicScheduler").finish() } @@ -405,13 +455,8 @@ impl Schedule for Arc { fn schedule(&self, task: task::Notified) { CURRENT.with(|maybe_cx| match maybe_cx { - Some(cx) if Arc::ptr_eq(self, &cx.spawner.shared) => { - cx.core - .borrow_mut() - .as_mut() - .expect("core missing") - .tasks - .push_back(task); + Some(cx) if Arc::ptr_eq(self, &cx.shared) => { + cx.tasks.borrow_mut().queue.push_back(task); } _ => { // If the queue is None, then the runtime has shut down. We @@ -439,107 +484,35 @@ impl Wake for Shared { } } -// ===== CoreGuard ===== +// ===== InnerGuard ===== -/// Used to ensure we always place the `Core` value back into its slot in -/// `BasicScheduler`, even if the future panics. -struct CoreGuard<'a> { - context: Context, - basic_scheduler: &'a BasicScheduler, +/// Used to ensure we always place the Inner value +/// back into its slot in `BasicScheduler`, even if the +/// future panics. +struct InnerGuard<'a, P: Park> { + inner: Option>, + basic_scheduler: &'a BasicScheduler

, } -impl CoreGuard<'_> { - fn block_on(self, future: F) -> F::Output { - self.enter(|mut core, context| { - let _enter = crate::runtime::enter(false); - let waker = context.spawner.waker_ref(); - let mut cx = std::task::Context::from_waker(&waker); - - pin!(future); - - 'outer: loop { - if core.spawner.reset_woken() { - let (c, res) = context.run_task(core, || future.as_mut().poll(&mut cx)); - - core = c; - - if let Ready(v) = res { - return (core, v); - } - } - - for _ in 0..MAX_TASKS_PER_TICK { - // Get and increment the current tick - let tick = core.tick; - core.tick = core.tick.wrapping_add(1); - - let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { - core.spawner - .pop() - .or_else(|| core.tasks.pop_front().map(RemoteMsg::Schedule)) - } else { - core.tasks - .pop_front() - .map(RemoteMsg::Schedule) - .or_else(|| core.spawner.pop()) - }; - - let entry = match entry { - Some(entry) => entry, - None => { - core = context.park(core); - - // Try polling the `block_on` future next - continue 'outer; - } - }; - - match entry { - RemoteMsg::Schedule(task) => { - let task = context.spawner.shared.owned.assert_owner(task); - - let (c, _) = context.run_task(core, || { - task.run(); - }); - - core = c; - } - } - } - - // Yield to the driver, this drives the timer and pulls any - // pending I/O events. - core = context.park_yield(core); - } - }) - } - - /// Enters the scheduler context. This sets the queue and other necessary - /// scheduler state in the thread-local. - fn enter(self, f: F) -> R - where - F: FnOnce(Box, &Context) -> (Box, R), - { - // Remove `core` from `context` to pass into the closure. - let core = self.context.core.borrow_mut().take().expect("core missing"); - - // Call the closure and place `core` back - let (core, ret) = CURRENT.set(&self.context, || f(core, &self.context)); - - *self.context.core.borrow_mut() = Some(core); - - ret +impl InnerGuard<'_, P> { + fn block_on(&mut self, future: F) -> F::Output { + // The only time inner gets set to `None` is if we have dropped + // already so this unwrap is safe. + self.inner.as_mut().unwrap().block_on(future) } } -impl Drop for CoreGuard<'_> { +impl Drop for InnerGuard<'_, P> { fn drop(&mut self) { - if let Some(core) = self.context.core.borrow_mut().take() { + if let Some(scheduler) = self.inner.take() { + let mut lock = self.basic_scheduler.inner.lock(); + // Replace old scheduler back into the state to allow // other threads to pick it up and drive it. - self.basic_scheduler.core.set(core); + lock.replace(scheduler); - // Wake up other possible threads that could steal the driver. + // Wake up other possible threads that could steal + // the dedicated parker P. self.basic_scheduler.notify.notify_one() } } diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index e77c5e3a0f8..847dd5972e1 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -283,7 +283,7 @@ cfg_rt! { #[derive(Debug)] enum Kind { /// Execute all tasks on the current-thread. - CurrentThread(BasicScheduler), + CurrentThread(BasicScheduler), /// Execute tasks across multiple threads. #[cfg(feature = "rt-multi-thread")] diff --git a/tokio/src/runtime/tests/loom_basic_scheduler.rs b/tokio/src/runtime/tests/loom_basic_scheduler.rs index a772603f711..d2894b9b27e 100644 --- a/tokio/src/runtime/tests/loom_basic_scheduler.rs +++ b/tokio/src/runtime/tests/loom_basic_scheduler.rs @@ -34,22 +34,20 @@ fn assert_at_most_num_polls(rt: Arc, at_most_polls: usize) { #[test] fn block_on_num_polls() { loom::model(|| { - // we expect at most 4 number of polls because there are three points at - // which we poll the future and an opportunity for a false-positive.. At - // any of these points it can be ready: + // we expect at most 3 number of polls because there are + // three points at which we poll the future. At any of these + // points it can be ready: // - // - when we fail to steal the parker and we block on a notification - // that it is available. + // - when we fail to steal the parker and we block on a + // notification that it is available. // // - when we steal the parker and we schedule the future // - // - when the future is woken up and we have ran the max number of tasks - // for the current tick or there are no more tasks to run. + // - when the future is woken up and we have ran the max + // number of tasks for the current tick or there are no + // more tasks to run. // - // - a thread is notified that the parker is available but a third - // thread acquires it before the notified thread can. - // - let at_most = 4; + let at_most = 3; let rt1 = Arc::new(Builder::new_current_thread().build().unwrap()); let rt2 = rt1.clone(); diff --git a/tokio/src/util/atomic_cell.rs b/tokio/src/runtime/thread_pool/atomic_cell.rs similarity index 77% rename from tokio/src/util/atomic_cell.rs rename to tokio/src/runtime/thread_pool/atomic_cell.rs index 07e37303a7b..98847e6ffa1 100644 --- a/tokio/src/util/atomic_cell.rs +++ b/tokio/src/runtime/thread_pool/atomic_cell.rs @@ -3,7 +3,7 @@ use crate::loom::sync::atomic::AtomicPtr; use std::ptr; use std::sync::atomic::Ordering::AcqRel; -pub(crate) struct AtomicCell { +pub(super) struct AtomicCell { data: AtomicPtr, } @@ -11,22 +11,22 @@ unsafe impl Send for AtomicCell {} unsafe impl Sync for AtomicCell {} impl AtomicCell { - pub(crate) fn new(data: Option>) -> AtomicCell { + pub(super) fn new(data: Option>) -> AtomicCell { AtomicCell { data: AtomicPtr::new(to_raw(data)), } } - pub(crate) fn swap(&self, val: Option>) -> Option> { + pub(super) fn swap(&self, val: Option>) -> Option> { let old = self.data.swap(to_raw(val), AcqRel); from_raw(old) } - pub(crate) fn set(&self, val: Box) { + pub(super) fn set(&self, val: Box) { let _ = self.swap(Some(val)); } - pub(crate) fn take(&self) -> Option> { + pub(super) fn take(&self) -> Option> { self.swap(None) } } diff --git a/tokio/src/runtime/thread_pool/mod.rs b/tokio/src/runtime/thread_pool/mod.rs index 3e1ce448215..82e34c78d28 100644 --- a/tokio/src/runtime/thread_pool/mod.rs +++ b/tokio/src/runtime/thread_pool/mod.rs @@ -1,5 +1,8 @@ //! Threadpool +mod atomic_cell; +use atomic_cell::AtomicCell; + mod idle; use self::idle::Idle; diff --git a/tokio/src/runtime/thread_pool/worker.rs b/tokio/src/runtime/thread_pool/worker.rs index 27d0d5e7d32..ae8efe6724f 100644 --- a/tokio/src/runtime/thread_pool/worker.rs +++ b/tokio/src/runtime/thread_pool/worker.rs @@ -66,9 +66,8 @@ use crate::runtime::enter::EnterContext; use crate::runtime::park::{Parker, Unparker}; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{Inject, JoinHandle, OwnedTasks}; -use crate::runtime::thread_pool::Idle; +use crate::runtime::thread_pool::{AtomicCell, Idle}; use crate::runtime::{queue, task, Callback}; -use crate::util::atomic_cell::AtomicCell; use crate::util::FastRand; use std::cell::RefCell; diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index f0a79a7cca9..df30f2b86a9 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -3,9 +3,6 @@ cfg_io_driver! { pub(crate) mod slab; } -#[cfg(feature = "rt")] -pub(crate) mod atomic_cell; - #[cfg(any( // io driver uses `WakeList` directly feature = "net", From 1d698b5a901537f80fe741c568e0fd64b47ed62d Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Wed, 12 Jan 2022 06:38:18 +0900 Subject: [PATCH 54/59] chore: test hyper on CI (#4393) --- .github/workflows/ci.yml | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 247c6f62f03..c0945165ae0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,6 +12,10 @@ env: nightly: nightly-2021-11-23 minrust: 1.46 +defaults: + run: + shell: bash + jobs: # Depends on all action sthat are required for a "successful" CI run. tests-pass: @@ -31,6 +35,7 @@ jobs: - valgrind - loom-compile - check-readme + - test-hyper steps: - run: exit 0 @@ -355,3 +360,34 @@ jobs: - name: Verify that Tokio version is up to date in README working-directory: tokio run: grep -q "$(sed '/^version = /!d' Cargo.toml | head -n1)" README.md + + test-hyper: + name: Test hyper + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - windows-latest + - ubuntu-latest + - macos-latest + steps: + - uses: actions/checkout@v2 + - name: Install Rust + run: rustup update stable + - uses: Swatinem/rust-cache@v1 + - name: Test hyper + run: | + set -x + git clone https://github.com/hyperium/hyper.git + cd hyper + # checkout the latest release because HEAD maybe contains breakage. + tag=$(git describe --abbrev=0 --tags) + git checkout "${tag}" + echo '[workspace]' >>Cargo.toml + echo '[patch.crates-io]' >>Cargo.toml + echo 'tokio = { path = "../tokio" }' >>Cargo.toml + echo 'tokio-util = { path = "../tokio-util" }' >>Cargo.toml + echo 'tokio-stream = { path = "../tokio-stream" }' >>Cargo.toml + echo 'tokio-test = { path = "../tokio-test" }' >>Cargo.toml + git diff + cargo test --features full From e951d55720689ae5058d8af97a71a66d063f87cd Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 11 Jan 2022 18:39:56 -0800 Subject: [PATCH 55/59] rt: refactor current-thread scheduler (take 2) (#4395) Re-applies #4377 and fixes the bug resulting in Hyper's double panic. Revert: #4394 Original PR: This PR does some refactoring to the current-thread scheduler bringing it closer to the structure of the multi-threaded scheduler. More specifically, the core scheduler data is stored in a Core struct and that struct is passed around as a "token" indicating permission to do work. The Core structure is also stored in the thread-local context. This refactor is intended to support #4373, making it easier to track counters in more locations in the current-thread scheduler. I tried to keep commits small, but the "set Core in thread-local context" is both the biggest commit and the key one. --- tokio/src/runtime/basic_scheduler.rs | 420 ++++++++++-------- tokio/src/runtime/mod.rs | 2 +- .../src/runtime/tests/loom_basic_scheduler.rs | 20 +- tokio/src/runtime/thread_pool/mod.rs | 3 - tokio/src/runtime/thread_pool/worker.rs | 3 +- .../thread_pool => util}/atomic_cell.rs | 10 +- tokio/src/util/mod.rs | 3 + tokio/tests/rt_basic.rs | 29 ++ 8 files changed, 275 insertions(+), 215 deletions(-) rename tokio/src/{runtime/thread_pool => util}/atomic_cell.rs (77%) diff --git a/tokio/src/runtime/basic_scheduler.rs b/tokio/src/runtime/basic_scheduler.rs index 872d0d5b897..f70fa656925 100644 --- a/tokio/src/runtime/basic_scheduler.rs +++ b/tokio/src/runtime/basic_scheduler.rs @@ -3,10 +3,12 @@ use crate::loom::sync::atomic::AtomicBool; use crate::loom::sync::Mutex; use crate::park::{Park, Unpark}; use crate::runtime::context::EnterGuard; +use crate::runtime::driver::Driver; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task}; use crate::runtime::Callback; use crate::sync::notify::Notify; +use crate::util::atomic_cell::AtomicCell; use crate::util::{waker_ref, Wake, WakerRef}; use std::cell::RefCell; @@ -19,13 +21,12 @@ use std::task::Poll::{Pending, Ready}; use std::time::Duration; /// Executes tasks on the current thread -pub(crate) struct BasicScheduler { - /// Inner state guarded by a mutex that is shared - /// between all `block_on` calls. - inner: Mutex>>, +pub(crate) struct BasicScheduler { + /// Core scheduler data is acquired by a thread entering `block_on`. + core: AtomicCell, /// Notifier for waking up other threads to steal the - /// parker. + /// driver. notify: Notify, /// Sendable task spawner @@ -38,15 +39,11 @@ pub(crate) struct BasicScheduler { context_guard: Option, } -/// The inner scheduler that owns the task queue and the main parker P. -struct Inner { +/// Data required for executing the scheduler. The struct is passed around to +/// a function that will perform the scheduling work and acts as a capability token. +struct Core { /// Scheduler run queue - /// - /// When the scheduler is executed, the queue is removed from `self` and - /// moved into `Context`. - /// - /// This indirection is to allow `BasicScheduler` to be `Send`. - tasks: Option, + tasks: VecDeque>>, /// Sendable task spawner spawner: Spawner, @@ -54,13 +51,10 @@ struct Inner { /// Current tick tick: u8, - /// Thread park handle - park: P, - - /// Callback for a worker parking itself - before_park: Option, - /// Callback for a worker unparking itself - after_unpark: Option, + /// Runtime driver + /// + /// The driver is removed before starting to park the thread + driver: Option, /// Stats batcher stats: WorkerStatsBatcher, @@ -71,13 +65,6 @@ pub(crate) struct Spawner { shared: Arc, } -struct Tasks { - /// Local run queue. - /// - /// Tasks notified from the current thread are pushed into this queue. - queue: VecDeque>>, -} - /// A remote scheduler entry. /// /// These are filled in by remote threads sending instructions to the scheduler. @@ -100,22 +87,29 @@ struct Shared { owned: OwnedTasks>, /// Unpark the blocked thread. - unpark: Box, + unpark: ::Unpark, /// Indicates whether the blocked on thread was woken. woken: AtomicBool, + /// Callback for a worker parking itself + before_park: Option, + + /// Callback for a worker unparking itself + after_unpark: Option, + /// Keeps track of various runtime stats. stats: RuntimeStats, } /// Thread-local context. struct Context { - /// Shared scheduler state - shared: Arc, + /// Handle to the spawner + spawner: Spawner, - /// Local queue - tasks: RefCell, + /// Scheduler core, enabling the holder of `Context` to execute the + /// scheduler. + core: RefCell>>, } /// Initial queue capacity. @@ -133,38 +127,36 @@ const REMOTE_FIRST_INTERVAL: u8 = 31; // Tracks the current BasicScheduler. scoped_thread_local!(static CURRENT: Context); -impl BasicScheduler

{ +impl BasicScheduler { pub(crate) fn new( - park: P, + driver: Driver, before_park: Option, after_unpark: Option, - ) -> BasicScheduler

{ - let unpark = Box::new(park.unpark()); + ) -> BasicScheduler { + let unpark = driver.unpark(); let spawner = Spawner { shared: Arc::new(Shared { queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))), owned: OwnedTasks::new(), - unpark: unpark as Box, + unpark, woken: AtomicBool::new(false), + before_park, + after_unpark, stats: RuntimeStats::new(1), }), }; - let inner = Mutex::new(Some(Inner { - tasks: Some(Tasks { - queue: VecDeque::with_capacity(INITIAL_CAPACITY), - }), + let core = AtomicCell::new(Some(Box::new(Core { + tasks: VecDeque::with_capacity(INITIAL_CAPACITY), spawner: spawner.clone(), tick: 0, - park, - before_park, - after_unpark, + driver: Some(driver), stats: WorkerStatsBatcher::new(0), - })); + }))); BasicScheduler { - inner, + core, notify: Notify::new(), spawner, context_guard: None, @@ -178,12 +170,12 @@ impl BasicScheduler

{ pub(crate) fn block_on(&self, future: F) -> F::Output { pin!(future); - // Attempt to steal the dedicated parker and block_on the future if we can there, - // otherwise, lets select on a notification that the parker is available - // or the future is complete. + // Attempt to steal the scheduler core and block_on the future if we can + // there, otherwise, lets select on a notification that the core is + // available or the future is complete. loop { - if let Some(inner) = &mut self.take_inner() { - return inner.block_on(future); + if let Some(core) = self.take_core() { + return core.block_on(future); } else { let mut enter = crate::runtime::enter(false); @@ -210,11 +202,14 @@ impl BasicScheduler

{ } } - fn take_inner(&self) -> Option> { - let inner = self.inner.lock().take()?; + fn take_core(&self) -> Option> { + let core = self.core.take()?; - Some(InnerGuard { - inner: Some(inner), + Some(CoreGuard { + context: Context { + spawner: self.spawner.clone(), + core: RefCell::new(Some(core)), + }, basic_scheduler: self, }) } @@ -224,156 +219,109 @@ impl BasicScheduler

{ } } -impl Inner

{ - /// Blocks on the provided future and drives the runtime's driver. - fn block_on(&mut self, future: F) -> F::Output { - enter(self, |scheduler, context| { - let _enter = crate::runtime::enter(false); - let waker = scheduler.spawner.waker_ref(); - let mut cx = std::task::Context::from_waker(&waker); - - pin!(future); - - 'outer: loop { - if scheduler.spawner.reset_woken() { - scheduler.stats.incr_poll_count(); - if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) { - return v; - } - } +impl Context { + /// Execute the closure with the given scheduler core stored in the + /// thread-local context. + fn run_task(&self, mut core: Box, f: impl FnOnce() -> R) -> (Box, R) { + core.stats.incr_poll_count(); + self.enter(core, || crate::coop::budget(f)) + } - for _ in 0..MAX_TASKS_PER_TICK { - // Get and increment the current tick - let tick = scheduler.tick; - scheduler.tick = scheduler.tick.wrapping_add(1); + /// Blocks the current thread until an event is received by the driver, + /// including I/O events, timer events, ... + fn park(&self, mut core: Box) -> Box { + let mut driver = core.driver.take().expect("driver missing"); + + if let Some(f) = &self.spawner.shared.before_park { + // Incorrect lint, the closures are actually different types so `f` + // cannot be passed as an argument to `enter`. + #[allow(clippy::redundant_closure)] + let (c, _) = self.enter(core, || f()); + core = c; + } - let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { - scheduler.spawner.pop().or_else(|| { - context - .tasks - .borrow_mut() - .queue - .pop_front() - .map(RemoteMsg::Schedule) - }) - } else { - context - .tasks - .borrow_mut() - .queue - .pop_front() - .map(RemoteMsg::Schedule) - .or_else(|| scheduler.spawner.pop()) - }; + // This check will fail if `before_park` spawns a task for us to run + // instead of parking the thread + if core.tasks.is_empty() { + // Park until the thread is signaled + core.stats.about_to_park(); + core.stats.submit(&core.spawner.shared.stats); - let entry = match entry { - Some(entry) => entry, - None => { - if let Some(f) = &scheduler.before_park { - f(); - } - // This check will fail if `before_park` spawns a task for us to run - // instead of parking the thread - if context.tasks.borrow_mut().queue.is_empty() { - // Park until the thread is signaled - scheduler.stats.about_to_park(); - scheduler.stats.submit(&scheduler.spawner.shared.stats); - scheduler.park.park().expect("failed to park"); - scheduler.stats.returned_from_park(); - } - if let Some(f) = &scheduler.after_unpark { - f(); - } + let (c, _) = self.enter(core, || { + driver.park().expect("failed to park"); + }); - // Try polling the `block_on` future next - continue 'outer; - } - }; + core = c; + core.stats.returned_from_park(); + } - match entry { - RemoteMsg::Schedule(task) => { - scheduler.stats.incr_poll_count(); - let task = context.shared.owned.assert_owner(task); - crate::coop::budget(|| task.run()) - } - } - } + if let Some(f) = &self.spawner.shared.after_unpark { + // Incorrect lint, the closures are actually different types so `f` + // cannot be passed as an argument to `enter`. + #[allow(clippy::redundant_closure)] + let (c, _) = self.enter(core, || f()); + core = c; + } - // Yield to the park, this drives the timer and pulls any pending - // I/O events. - scheduler.stats.submit(&scheduler.spawner.shared.stats); - scheduler - .park - .park_timeout(Duration::from_millis(0)) - .expect("failed to park"); - } - }) + core.driver = Some(driver); + core } -} -/// Enters the scheduler context. This sets the queue and other necessary -/// scheduler state in the thread-local. -fn enter(scheduler: &mut Inner

, f: F) -> R -where - F: FnOnce(&mut Inner

, &Context) -> R, - P: Park, -{ - // Ensures the run queue is placed back in the `BasicScheduler` instance - // once `block_on` returns.` - struct Guard<'a, P: Park> { - context: Option, - scheduler: &'a mut Inner

, - } + /// Checks the driver for new events without blocking the thread. + fn park_yield(&self, mut core: Box) -> Box { + let mut driver = core.driver.take().expect("driver missing"); - impl Drop for Guard<'_, P> { - fn drop(&mut self) { - let Context { tasks, .. } = self.context.take().expect("context missing"); - self.scheduler.tasks = Some(tasks.into_inner()); - } - } + core.stats.submit(&core.spawner.shared.stats); + let (mut core, _) = self.enter(core, || { + driver + .park_timeout(Duration::from_millis(0)) + .expect("failed to park"); + }); - // Remove `tasks` from `self` and place it in a `Context`. - let tasks = scheduler.tasks.take().expect("invalid state"); + core.driver = Some(driver); + core + } - let guard = Guard { - context: Some(Context { - shared: scheduler.spawner.shared.clone(), - tasks: RefCell::new(tasks), - }), - scheduler, - }; + fn enter(&self, core: Box, f: impl FnOnce() -> R) -> (Box, R) { + // Store the scheduler core in the thread-local context + // + // A drop-guard is employed at a higher level. + *self.core.borrow_mut() = Some(core); - let context = guard.context.as_ref().unwrap(); - let scheduler = &mut *guard.scheduler; + // Execute the closure while tracking the execution budget + let ret = f(); - CURRENT.set(context, || f(scheduler, context)) + // Take the scheduler core back + let core = self.core.borrow_mut().take().expect("core missing"); + (core, ret) + } } -impl Drop for BasicScheduler

{ +impl Drop for BasicScheduler { fn drop(&mut self) { // Avoid a double panic if we are currently panicking and // the lock may be poisoned. - let mut inner = match self.inner.lock().take() { - Some(inner) => inner, + let core = match self.take_core() { + Some(core) => core, None if std::thread::panicking() => return, - None => panic!("Oh no! We never placed the Inner state back, this is a bug!"), + None => panic!("Oh no! We never placed the Core back, this is a bug!"), }; - enter(&mut inner, |scheduler, context| { + core.enter(|mut core, context| { // Drain the OwnedTasks collection. This call also closes the // collection, ensuring that no tasks are ever pushed after this // call returns. - context.shared.owned.close_and_shutdown_all(); + context.spawner.shared.owned.close_and_shutdown_all(); // Drain local queue // We already shut down every task, so we just need to drop the task. - for task in context.tasks.borrow_mut().queue.drain(..) { + while let Some(task) = core.tasks.pop_front() { drop(task); } // Drain remote queue and set it to None - let remote_queue = scheduler.spawner.shared.queue.lock().take(); + let remote_queue = core.spawner.shared.queue.lock().take(); // Using `Option::take` to replace the shared queue with `None`. // We already shut down every task, so we just need to drop the task. @@ -387,12 +335,14 @@ impl Drop for BasicScheduler

{ } } - assert!(context.shared.owned.is_empty()); + assert!(context.spawner.shared.owned.is_empty()); + + (core, ()) }); } } -impl fmt::Debug for BasicScheduler

{ +impl fmt::Debug for BasicScheduler { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BasicScheduler").finish() } @@ -455,8 +405,14 @@ impl Schedule for Arc { fn schedule(&self, task: task::Notified) { CURRENT.with(|maybe_cx| match maybe_cx { - Some(cx) if Arc::ptr_eq(self, &cx.shared) => { - cx.tasks.borrow_mut().queue.push_back(task); + Some(cx) if Arc::ptr_eq(self, &cx.spawner.shared) => { + let mut core = cx.core.borrow_mut(); + + // If `None`, the runtime is shutting down, so there is no need + // to schedule the task. + if let Some(core) = core.as_mut() { + core.tasks.push_back(task); + } } _ => { // If the queue is None, then the runtime has shut down. We @@ -484,35 +440,107 @@ impl Wake for Shared { } } -// ===== InnerGuard ===== +// ===== CoreGuard ===== -/// Used to ensure we always place the Inner value -/// back into its slot in `BasicScheduler`, even if the -/// future panics. -struct InnerGuard<'a, P: Park> { - inner: Option>, - basic_scheduler: &'a BasicScheduler

, +/// Used to ensure we always place the `Core` value back into its slot in +/// `BasicScheduler`, even if the future panics. +struct CoreGuard<'a> { + context: Context, + basic_scheduler: &'a BasicScheduler, } -impl InnerGuard<'_, P> { - fn block_on(&mut self, future: F) -> F::Output { - // The only time inner gets set to `None` is if we have dropped - // already so this unwrap is safe. - self.inner.as_mut().unwrap().block_on(future) +impl CoreGuard<'_> { + fn block_on(self, future: F) -> F::Output { + self.enter(|mut core, context| { + let _enter = crate::runtime::enter(false); + let waker = context.spawner.waker_ref(); + let mut cx = std::task::Context::from_waker(&waker); + + pin!(future); + + 'outer: loop { + if core.spawner.reset_woken() { + let (c, res) = context.run_task(core, || future.as_mut().poll(&mut cx)); + + core = c; + + if let Ready(v) = res { + return (core, v); + } + } + + for _ in 0..MAX_TASKS_PER_TICK { + // Get and increment the current tick + let tick = core.tick; + core.tick = core.tick.wrapping_add(1); + + let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { + core.spawner + .pop() + .or_else(|| core.tasks.pop_front().map(RemoteMsg::Schedule)) + } else { + core.tasks + .pop_front() + .map(RemoteMsg::Schedule) + .or_else(|| core.spawner.pop()) + }; + + let entry = match entry { + Some(entry) => entry, + None => { + core = context.park(core); + + // Try polling the `block_on` future next + continue 'outer; + } + }; + + match entry { + RemoteMsg::Schedule(task) => { + let task = context.spawner.shared.owned.assert_owner(task); + + let (c, _) = context.run_task(core, || { + task.run(); + }); + + core = c; + } + } + } + + // Yield to the driver, this drives the timer and pulls any + // pending I/O events. + core = context.park_yield(core); + } + }) + } + + /// Enters the scheduler context. This sets the queue and other necessary + /// scheduler state in the thread-local. + fn enter(self, f: F) -> R + where + F: FnOnce(Box, &Context) -> (Box, R), + { + // Remove `core` from `context` to pass into the closure. + let core = self.context.core.borrow_mut().take().expect("core missing"); + + // Call the closure and place `core` back + let (core, ret) = CURRENT.set(&self.context, || f(core, &self.context)); + + *self.context.core.borrow_mut() = Some(core); + + ret } } -impl Drop for InnerGuard<'_, P> { +impl Drop for CoreGuard<'_> { fn drop(&mut self) { - if let Some(scheduler) = self.inner.take() { - let mut lock = self.basic_scheduler.inner.lock(); - + if let Some(core) = self.context.core.borrow_mut().take() { // Replace old scheduler back into the state to allow // other threads to pick it up and drive it. - lock.replace(scheduler); + self.basic_scheduler.core.set(core); - // Wake up other possible threads that could steal - // the dedicated parker P. + // Wake up other possible threads that could steal the driver. self.basic_scheduler.notify.notify_one() } } diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index 847dd5972e1..e77c5e3a0f8 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -283,7 +283,7 @@ cfg_rt! { #[derive(Debug)] enum Kind { /// Execute all tasks on the current-thread. - CurrentThread(BasicScheduler), + CurrentThread(BasicScheduler), /// Execute tasks across multiple threads. #[cfg(feature = "rt-multi-thread")] diff --git a/tokio/src/runtime/tests/loom_basic_scheduler.rs b/tokio/src/runtime/tests/loom_basic_scheduler.rs index d2894b9b27e..a772603f711 100644 --- a/tokio/src/runtime/tests/loom_basic_scheduler.rs +++ b/tokio/src/runtime/tests/loom_basic_scheduler.rs @@ -34,20 +34,22 @@ fn assert_at_most_num_polls(rt: Arc, at_most_polls: usize) { #[test] fn block_on_num_polls() { loom::model(|| { - // we expect at most 3 number of polls because there are - // three points at which we poll the future. At any of these - // points it can be ready: + // we expect at most 4 number of polls because there are three points at + // which we poll the future and an opportunity for a false-positive.. At + // any of these points it can be ready: // - // - when we fail to steal the parker and we block on a - // notification that it is available. + // - when we fail to steal the parker and we block on a notification + // that it is available. // // - when we steal the parker and we schedule the future // - // - when the future is woken up and we have ran the max - // number of tasks for the current tick or there are no - // more tasks to run. + // - when the future is woken up and we have ran the max number of tasks + // for the current tick or there are no more tasks to run. // - let at_most = 3; + // - a thread is notified that the parker is available but a third + // thread acquires it before the notified thread can. + // + let at_most = 4; let rt1 = Arc::new(Builder::new_current_thread().build().unwrap()); let rt2 = rt1.clone(); diff --git a/tokio/src/runtime/thread_pool/mod.rs b/tokio/src/runtime/thread_pool/mod.rs index 82e34c78d28..3e1ce448215 100644 --- a/tokio/src/runtime/thread_pool/mod.rs +++ b/tokio/src/runtime/thread_pool/mod.rs @@ -1,8 +1,5 @@ //! Threadpool -mod atomic_cell; -use atomic_cell::AtomicCell; - mod idle; use self::idle::Idle; diff --git a/tokio/src/runtime/thread_pool/worker.rs b/tokio/src/runtime/thread_pool/worker.rs index ae8efe6724f..27d0d5e7d32 100644 --- a/tokio/src/runtime/thread_pool/worker.rs +++ b/tokio/src/runtime/thread_pool/worker.rs @@ -66,8 +66,9 @@ use crate::runtime::enter::EnterContext; use crate::runtime::park::{Parker, Unparker}; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{Inject, JoinHandle, OwnedTasks}; -use crate::runtime::thread_pool::{AtomicCell, Idle}; +use crate::runtime::thread_pool::Idle; use crate::runtime::{queue, task, Callback}; +use crate::util::atomic_cell::AtomicCell; use crate::util::FastRand; use std::cell::RefCell; diff --git a/tokio/src/runtime/thread_pool/atomic_cell.rs b/tokio/src/util/atomic_cell.rs similarity index 77% rename from tokio/src/runtime/thread_pool/atomic_cell.rs rename to tokio/src/util/atomic_cell.rs index 98847e6ffa1..07e37303a7b 100644 --- a/tokio/src/runtime/thread_pool/atomic_cell.rs +++ b/tokio/src/util/atomic_cell.rs @@ -3,7 +3,7 @@ use crate::loom::sync::atomic::AtomicPtr; use std::ptr; use std::sync::atomic::Ordering::AcqRel; -pub(super) struct AtomicCell { +pub(crate) struct AtomicCell { data: AtomicPtr, } @@ -11,22 +11,22 @@ unsafe impl Send for AtomicCell {} unsafe impl Sync for AtomicCell {} impl AtomicCell { - pub(super) fn new(data: Option>) -> AtomicCell { + pub(crate) fn new(data: Option>) -> AtomicCell { AtomicCell { data: AtomicPtr::new(to_raw(data)), } } - pub(super) fn swap(&self, val: Option>) -> Option> { + pub(crate) fn swap(&self, val: Option>) -> Option> { let old = self.data.swap(to_raw(val), AcqRel); from_raw(old) } - pub(super) fn set(&self, val: Box) { + pub(crate) fn set(&self, val: Box) { let _ = self.swap(Some(val)); } - pub(super) fn take(&self) -> Option> { + pub(crate) fn take(&self) -> Option> { self.swap(None) } } diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index df30f2b86a9..f0a79a7cca9 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -3,6 +3,9 @@ cfg_io_driver! { pub(crate) mod slab; } +#[cfg(feature = "rt")] +pub(crate) mod atomic_cell; + #[cfg(any( // io driver uses `WakeList` directly feature = "net", diff --git a/tokio/tests/rt_basic.rs b/tokio/tests/rt_basic.rs index 70056b16f01..149b3bfaad7 100644 --- a/tokio/tests/rt_basic.rs +++ b/tokio/tests/rt_basic.rs @@ -168,6 +168,35 @@ fn drop_tasks_in_context() { assert!(SUCCESS.load(Ordering::SeqCst)); } +#[test] +#[should_panic(expected = "boom")] +fn wake_in_drop_after_panic() { + let (tx, rx) = oneshot::channel::<()>(); + + struct WakeOnDrop(Option>); + + impl Drop for WakeOnDrop { + fn drop(&mut self) { + self.0.take().unwrap().send(()).unwrap(); + } + } + + let rt = rt(); + + rt.spawn(async move { + let _wake_on_drop = WakeOnDrop(Some(tx)); + // wait forever + futures::future::pending::<()>().await; + }); + + let _join = rt.spawn(async move { rx.await }); + + rt.block_on(async { + tokio::task::yield_now().await; + panic!("boom"); + }); +} + #[test] #[should_panic( expected = "A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers." From e255a265d3c0e92fd6c1b1dc58f17690621533f2 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Thu, 13 Jan 2022 00:27:05 +0900 Subject: [PATCH 56/59] ci: upgrade to new nightly (#4396) --- .cirrus.yml | 2 +- .github/workflows/ci.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index 73d77abfa1d..1f431b2d201 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -29,7 +29,7 @@ task: setup_script: - pkg install -y bash curl - curl https://sh.rustup.rs -sSf --output rustup.sh - - sh rustup.sh -y --profile minimal --default-toolchain nightly-2021-11-23 + - sh rustup.sh -y --profile minimal --default-toolchain nightly-2022-01-12 - . $HOME/.cargo/env - | echo "~~~~ rustc --version ~~~~" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0945165ae0..f9eded120da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,7 @@ name: CI env: RUSTFLAGS: -Dwarnings RUST_BACKTRACE: 1 - nightly: nightly-2021-11-23 + nightly: nightly-2022-01-12 minrust: 1.46 defaults: From 089eeae24b660ba22245fd2ae7e55510a0cc34ba Mon Sep 17 00:00:00 2001 From: Matthew Pomes Date: Wed, 12 Jan 2022 12:49:57 -0600 Subject: [PATCH 57/59] runtime: add better error message when spawning blocking threads (#4398) --- tokio/src/runtime/blocking/pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/runtime/blocking/pool.rs b/tokio/src/runtime/blocking/pool.rs index 77ab4958683..bb6c1ee6606 100644 --- a/tokio/src/runtime/blocking/pool.rs +++ b/tokio/src/runtime/blocking/pool.rs @@ -244,7 +244,7 @@ impl Spawner { rt.blocking_spawner.inner.run(id); drop(shutdown_tx); }) - .unwrap() + .expect("OS can't spawn a new worker thread") } } From 16a84049676fe168836a6e58d1526990e482bd51 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Thu, 13 Jan 2022 11:49:13 -0800 Subject: [PATCH 58/59] chore: fix ci to track Rust 1.58 (#4401) --- tests-build/tests/fail/macros_type_mismatch.stderr | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tests-build/tests/fail/macros_type_mismatch.stderr b/tests-build/tests/fail/macros_type_mismatch.stderr index a8fa99bc63b..f98031514ff 100644 --- a/tests-build/tests/fail/macros_type_mismatch.stderr +++ b/tests-build/tests/fail/macros_type_mismatch.stderr @@ -1,5 +1,5 @@ error[E0308]: mismatched types - --> $DIR/macros_type_mismatch.rs:5:5 + --> tests/fail/macros_type_mismatch.rs:5:5 | 5 | Ok(()) | ^^^^^^ expected `()`, found enum `Result` @@ -16,7 +16,7 @@ help: try adding a return type | ++++++++++++++++ error[E0308]: mismatched types - --> $DIR/macros_type_mismatch.rs:10:5 + --> tests/fail/macros_type_mismatch.rs:10:5 | 9 | async fn missing_return_type() { | - help: try adding a return type: `-> Result<(), _>` @@ -27,7 +27,7 @@ error[E0308]: mismatched types found enum `Result<(), _>` error[E0308]: mismatched types - --> $DIR/macros_type_mismatch.rs:23:5 + --> tests/fail/macros_type_mismatch.rs:23:5 | 14 | async fn extra_semicolon() -> Result<(), ()> { | -------------- expected `Result<(), ()>` because of return type @@ -37,9 +37,8 @@ error[E0308]: mismatched types | = note: expected enum `Result<(), ()>` found unit type `()` -help: try using a variant of the expected enum +help: try adding an expression at the end of the block | -23 | Ok(Ok(());) - | -23 | Err(Ok(());) +23 ~ Ok(());; +24 + Ok(()) | From 4eed411519783ef6f58cbf74f886f91142b5cfa6 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Thu, 13 Jan 2022 15:18:32 -0800 Subject: [PATCH 59/59] rt: reduce no-op wakeups in the multi-threaded scheduler (#4383) This patch reduces the number of times worker threads wake up without having work to do in the multi-threaded scheduler. Unnecessary wake-ups are expensive and slow down the scheduler. I have observed this change reduce no-op wakes by up to 50%. The multi-threaded scheduler is work-stealing. When a worker has tasks to process, and other workers are idle (parked), these idle workers must be unparked so that they can steal work from the busy worker. However, unparking threads is expensive, so there is an optimization that avoids unparking a worker if there already exists workers in a "searching" state (the worker is unparked and looking for work). This works pretty well, but transitioning from 1 "searching" worker to 0 searching workers introduces a race condition where a thread unpark can be lost: * thread 1: last searching worker about to exit searching state * thread 2: needs to unpark a thread, but skip because there is a searching worker. * thread 1: exits searching state w/o seeing thread 2's work. Because this should be a rare condition, Tokio solves this by always unparking a new worker when the current worker: * is the last searching worker * is transitioning out of searching * has work to process. When the newly unparked worker wakes, if the race condition described above happened, "thread 2"'s work will be found. Otherwise, it will just go back to sleep. Now we come to the issue at hand. A bug incorrectly set a worker to "searching" when the I/O driver unparked the thread. In a situation where the scheduler was only partially under load and is able to operate with 1 active worker, the I/O driver would unpark the thread when new I/O events are received, incorrectly transition it to "searching", find new work generated by inbound I/O events, incorrectly transition itself from the last searcher -> no searchers, and unpark a new thread. This new thread would wake, find no work and go back to sleep. Note that, when the scheduler is fully saturated, this change will make no impact as most workers are always unparked and the optimization to avoid unparking threads described at the top apply. --- tokio/src/runtime/thread_pool/idle.rs | 16 ++++++++++------ tokio/src/runtime/thread_pool/worker.rs | 12 ++++++++---- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/tokio/src/runtime/thread_pool/idle.rs b/tokio/src/runtime/thread_pool/idle.rs index 6b7ee1289ce..a57bf6a0b13 100644 --- a/tokio/src/runtime/thread_pool/idle.rs +++ b/tokio/src/runtime/thread_pool/idle.rs @@ -64,7 +64,7 @@ impl Idle { // A worker should be woken up, atomically increment the number of // searching workers as well as the number of unparked workers. - State::unpark_one(&self.state); + State::unpark_one(&self.state, 1); // Get the worker to unpark let ret = sleepers.pop(); @@ -111,7 +111,9 @@ impl Idle { /// Unpark a specific worker. This happens if tasks are submitted from /// within the worker's park routine. - pub(super) fn unpark_worker_by_id(&self, worker_id: usize) { + /// + /// Returns `true` if the worker was parked before calling the method. + pub(super) fn unpark_worker_by_id(&self, worker_id: usize) -> bool { let mut sleepers = self.sleepers.lock(); for index in 0..sleepers.len() { @@ -119,11 +121,13 @@ impl Idle { sleepers.swap_remove(index); // Update the state accordingly while the lock is held. - State::unpark_one(&self.state); + State::unpark_one(&self.state, 0); - return; + return true; } } + + false } /// Returns `true` if `worker_id` is contained in the sleep set. @@ -151,8 +155,8 @@ impl State { State(cell.load(ordering)) } - fn unpark_one(cell: &AtomicUsize) { - cell.fetch_add(1 | (1 << UNPARK_SHIFT), SeqCst); + fn unpark_one(cell: &AtomicUsize, num_searching: usize) { + cell.fetch_add(num_searching | (1 << UNPARK_SHIFT), SeqCst); } fn inc_num_searching(cell: &AtomicUsize, ordering: Ordering) { diff --git a/tokio/src/runtime/thread_pool/worker.rs b/tokio/src/runtime/thread_pool/worker.rs index 27d0d5e7d32..f499e14b834 100644 --- a/tokio/src/runtime/thread_pool/worker.rs +++ b/tokio/src/runtime/thread_pool/worker.rs @@ -511,8 +511,9 @@ impl Context { // Place `park` back in `core` core.park = Some(park); - // If there are tasks available to steal, notify a worker - if core.run_queue.is_stealable() { + // If there are tasks available to steal, but this worker is not + // looking for tasks to steal, notify another worker. + if !core.is_searching && core.run_queue.is_stealable() { self.worker.shared.notify_parked(); } @@ -621,8 +622,11 @@ impl Core { // If a task is in the lifo slot, then we must unpark regardless of // being notified if self.lifo_slot.is_some() { - worker.shared.idle.unpark_worker_by_id(worker.index); - self.is_searching = true; + // When a worker wakes, it should only transition to the "searching" + // state when the wake originates from another worker *or* a new task + // is pushed. We do *not* want the worker to transition to "searching" + // when it wakes when the I/O driver receives new events. + self.is_searching = !worker.shared.idle.unpark_worker_by_id(worker.index); return true; }