From 952af0109a73901d38d8b60b3409d2a108d41d75 Mon Sep 17 00:00:00 2001 From: Milo <50248166+Milo123459@users.noreply.github.com> Date: Sat, 28 May 2022 09:08:18 +0100 Subject: [PATCH 01/44] bump scheduled-thread-pool --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index a6309279..cc5cc16c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,7 +44,7 @@ crossbeam-utils = "0.8" num_cpus = "1.13" once_cell = "1.7" parking_lot = "0.12" -scheduled-thread-pool = "0.2.5" +scheduled-thread-pool = "0.2.6" smallvec = "1.8" tagptr = "0.2" thiserror = "1.0" From cb9bc3cf94d095e2a25411ddee58a5b105838450 Mon Sep 17 00:00:00 2001 From: Milo <50248166+Milo123459@users.noreply.github.com> Date: Sat, 28 May 2022 09:13:04 +0100 Subject: [PATCH 02/44] apply visibility changes --- Cargo.toml | 10 ++++++---- src/lib.rs | 6 +++--- src/sync/builder.rs | 1 + src/sync/cache.rs | 1 + src/sync/segment.rs | 1 + src/sync/value_initializer.rs | 2 +- 6 files changed, 13 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cc5cc16c..b6e989a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,9 @@ build = "build.rs" default = ["atomic64", "quanta"] # Enable this feature to use `moka::future::Cache`. -future = ["async-io", "async-lock", "futures-util"] +future = ["async-io", "async-lock", "futures-util", "crossbeam-epoch", "thiserror", "uuid"] + +sync = ["crossbeam-epoch", "thiserror", "uuid"] # Enable this feature to use **experimental** `moka::dash::Cache`. # Please note that the APIs for this feature will be frequently changed in next @@ -39,7 +41,7 @@ unstable-debug-counters = ["future"] [dependencies] crossbeam-channel = "0.5.4" -crossbeam-epoch = "0.8.2" +crossbeam-epoch = { version = "0.8.2", optional = true } crossbeam-utils = "0.8" num_cpus = "1.13" once_cell = "1.7" @@ -47,8 +49,8 @@ parking_lot = "0.12" scheduled-thread-pool = "0.2.6" smallvec = "1.8" tagptr = "0.2" -thiserror = "1.0" -uuid = { version = "0.8", features = ["v4"] } +thiserror = { version = "1.0", optional = true } +uuid = { version = "0.8", features = ["v4"], optional = true } # Opt-out serde and stable_deref_trait features # https://github.com/Manishearth/triomphe/pull/5 diff --git a/src/lib.rs b/src/lib.rs index 6227133c..2a3154b4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -165,14 +165,14 @@ pub mod dash; #[cfg(feature = "future")] #[cfg_attr(docsrs, doc(cfg(feature = "future")))] pub mod future; - +#[cfg(any(feature="sync", feature="future"))] pub mod sync; pub mod unsync; - +#[cfg(any(feature="sync", feature="future"))] pub(crate) mod cht; pub(crate) mod common; pub(crate) mod policy; - +#[cfg(any(feature="sync", feature="future"))] pub use common::error::PredicateError; pub use policy::Policy; diff --git a/src/sync/builder.rs b/src/sync/builder.rs index d5753efd..dff1e3e1 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -41,6 +41,7 @@ use std::{ /// // after 30 minutes (TTL) from the insert(). /// ``` /// +#[cfg(feature="sync")] #[must_use] pub struct CacheBuilder { max_capacity: Option, diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 87af4fcd..3493f201 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -227,6 +227,7 @@ use std::{ /// [build-with-hasher-method]: ./struct.CacheBuilder.html#method.build_with_hasher /// [ahash-crate]: https://crates.io/crates/ahash /// +#[cfg(feature="sync")] pub struct Cache { base: BaseCache, value_initializer: Arc>, diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 326a8439..e0d01a8f 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -24,6 +24,7 @@ use std::{ /// /// [cache-struct]: ./struct.Cache.html /// +#[cfg(feature="sync")] pub struct SegmentedCache { inner: Arc>, } diff --git a/src/sync/value_initializer.rs b/src/sync/value_initializer.rs index 154f4af8..fe035794 100644 --- a/src/sync/value_initializer.rs +++ b/src/sync/value_initializer.rs @@ -17,7 +17,7 @@ pub(crate) enum InitResult { ReadExisting(V), InitErr(Arc), } - +#[cfg(feature="sync")] pub(crate) struct ValueInitializer { // TypeId is the type ID of the concrete error type of generic type E in // try_init_or_read(). We use the type ID as a part of the key to ensure that From 3410c0460e95b76e69c570788c96f27455eee4a4 Mon Sep 17 00:00:00 2001 From: Milo <50248166+Milo123459@users.noreply.github.com> Date: Sat, 28 May 2022 09:41:00 +0100 Subject: [PATCH 03/44] fixes? --- src/common/error.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/common/error.rs b/src/common/error.rs index 7fa5a3d6..2c6ef3f1 100644 --- a/src/common/error.rs +++ b/src/common/error.rs @@ -2,6 +2,7 @@ /// [`Cache#invalidate_entries_if`][invalidate-if] method. /// /// [invalidate-if]: ./sync/struct.Cache.html#method.invalidate_entries_if +#[cfg(any(feature="sync", feature="future"))] #[derive(thiserror::Error, Debug)] pub enum PredicateError { /// This cache does not have a necessary configuration enabled to support From 264f048b6374ec15f642706cae035a0e21ba6770 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 29 May 2022 13:18:16 +0800 Subject: [PATCH 04/44] Introduce a new crate feature called `sync` - The `sync` feature: - It controls the availability of the `sync` module. (e.g. `sync::Cache` and `sync::SegmentedCache`) - It is enabled by default. - `future::Cache`: - The iterator type for `future::Cache` has been changed from `sync::Iter` to `future::Iter`. - The return type of `future::Cache::invalidate_entries_if` method has been changed from `Result` to `Result`. - Update the CI configurations for the `sync` feature. --- .circleci/config.yml | 2 +- .github/workflows/CI.yml | 22 +++++++++------ .github/workflows/CIQuantaDisabled.yml | 20 +++++++++----- .github/workflows/LinuxCrossCompileTest.yml | 4 +-- .github/workflows/Skeptic.yml | 10 +++---- Cargo.toml | 18 +++++++------ src/common.rs | 4 +-- src/common/concurrent.rs | 2 +- src/common/concurrent/thread_pool.rs | 8 +++--- src/common/deque.rs | 2 +- src/common/error.rs | 1 - src/common/time.rs | 2 +- src/common/time/clock_quanta.rs | 3 +-- src/future.rs | 30 +++++++++++++++++++++ src/future/cache.rs | 8 +++--- src/lib.rs | 9 ++++--- src/policy.rs | 4 +-- src/sync/builder.rs | 1 - src/sync/cache.rs | 1 - src/sync/segment.rs | 1 - src/sync/value_initializer.rs | 1 - src/sync_base/base_cache.rs | 2 +- src/sync_base/iter.rs | 2 +- 23 files changed, 98 insertions(+), 59 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 59cfc0e7..faa6b9b8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,7 @@ jobs: --env RUSTFLAGS='--cfg circleci' \ xd009642/tarpaulin \ cargo tarpaulin -v \ - --features 'future, dash' \ + --features 'sync, future, dash' \ --ciserver circle-ci \ --coveralls ${COVERALLS_TOKEN} \ --timeout 600 \ diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 2a07527e..8627782e 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -59,25 +59,31 @@ jobs: cargo update -p dashmap --precise 5.2.0 cargo update -p hashbrown --precise 0.11.2 - - name: Build (no features) + - name: Run tests (debug, sync feature) uses: actions-rs/cargo@v1 with: - command: build + command: test --features sync - - name: Run tests (release, no features) + - name: Run tests (release, sync feature) uses: actions-rs/cargo@v1 with: command: test - args: --release + args: --release --features sync - - name: Run tests (future feature) + - name: Run tests (future feature, but no sync feature) uses: actions-rs/cargo@v1 with: command: test - args: --features future + args: --no-default-features --features 'future, atomic64, quanta' - - name: Run tests (dash feature) + - name: Run tests (future and sync features) uses: actions-rs/cargo@v1 with: command: test - args: --features dash + args: --features 'future, sync' + + - name: Run tests (dash feature, but no sync feature) + uses: actions-rs/cargo@v1 + with: + command: test + args: --no-default-features --features 'dash, atomic64, quanta' diff --git a/.github/workflows/CIQuantaDisabled.yml b/.github/workflows/CIQuantaDisabled.yml index 78b7220f..21ed22fd 100644 --- a/.github/workflows/CIQuantaDisabled.yml +++ b/.github/workflows/CIQuantaDisabled.yml @@ -59,25 +59,31 @@ jobs: cargo update -p dashmap --precise 5.2.0 cargo update -p hashbrown --precise 0.11.2 - - name: Build (no quanta feature) + - name: Run tests (debug, but no quanta feature) uses: actions-rs/cargo@v1 with: - command: build - args: --no-default-features --features atomic64 + command: test + args: --no-default-features --features 'sync, atomic64' - - name: Run tests (release, no quanta feature) + - name: Run tests (release, but no quanta feature) uses: actions-rs/cargo@v1 with: command: test - args: --release --no-default-features --features atomic64 + args: --release --no-default-features --features 'sync, atomic64' - - name: Run tests (future feature, but no quanta feature) + - name: Run tests (future feature, but no quanta and sync features) uses: actions-rs/cargo@v1 with: command: test args: --no-default-features --features 'future, atomic64' - - name: Run tests (dash feature, but no quanta feature) + - name: Run tests (future feature, but no quanta feature) + uses: actions-rs/cargo@v1 + with: + command: test + args: --no-default-features --features 'sync, future, atomic64' + + - name: Run tests (dash feature, but no quanta and sync features) uses: actions-rs/cargo@v1 with: command: test diff --git a/.github/workflows/LinuxCrossCompileTest.yml b/.github/workflows/LinuxCrossCompileTest.yml index ebe90a96..64840b71 100644 --- a/.github/workflows/LinuxCrossCompileTest.yml +++ b/.github/workflows/LinuxCrossCompileTest.yml @@ -59,12 +59,12 @@ jobs: with: command: clean - - name: Run tests (no features) + - name: Run tests (sync feature) uses: actions-rs/cargo@v1 with: use-cross: true command: test - args: --release --target ${{ matrix.platform.target }} ${{ matrix.platform.cargo-opts }} + args: --release --features sync --target ${{ matrix.platform.target }} ${{ matrix.platform.cargo-opts }} - name: Run tests (future feature) uses: actions-rs/cargo@v1 diff --git a/.github/workflows/Skeptic.yml b/.github/workflows/Skeptic.yml index 7c021aca..686add75 100644 --- a/.github/workflows/Skeptic.yml +++ b/.github/workflows/Skeptic.yml @@ -50,23 +50,23 @@ jobs: uses: actions-rs/cargo@v1 with: command: test - args: --release --features 'future, dash' + args: --release --features 'sync, future, dash' env: RUSTFLAGS: '--cfg skeptic' - - name: Run tests (future and dash, without atomic64) + - name: Run tests (sync, future and dash, without atomic64 and quanta) uses: actions-rs/cargo@v1 with: command: test - args: --release --no-default-features --features 'future, dash' + args: --release --no-default-features --features 'sync, future, dash' env: RUSTFLAGS: '--cfg skeptic' - - name: Run compile error tests (future and dash features, trybuild) + - name: Run compile error tests (sync, future and dash features, trybuild) uses: actions-rs/cargo@v1 if: ${{ matrix.rust == 'stable' }} with: command: test - args: ui_trybuild --release --features 'future, dash' + args: ui_trybuild --release --features 'sync, future, dash' env: RUSTFLAGS: '--cfg trybuild' diff --git a/Cargo.toml b/Cargo.toml index 29c26770..71f625d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,13 +16,15 @@ exclude = [".circleci", ".devcontainer", ".github", ".gitpod.yml", ".vscode"] build = "build.rs" [features] -default = ["atomic64", "quanta"] - -# Enable this feature to use `moka::future::Cache`. -future = ["async-io", "async-lock", "futures-util", "crossbeam-epoch", "thiserror", "uuid"] +default = ["sync", "atomic64", "quanta"] +# This feature is enabled by default. Disable it when you do not need +# `moka::sync::{Cache, SegmentedCache}` sync = ["crossbeam-epoch", "thiserror", "uuid"] +# Enable this feature to use `moka::future::Cache`. +future = ["crossbeam-epoch", "thiserror", "uuid", "async-io", "async-lock", "futures-util"] + # Enable this feature to use **experimental** `moka::dash::Cache`. # Please note that the APIs for this feature will be frequently changed in next # few releases. @@ -41,7 +43,6 @@ unstable-debug-counters = ["future"] [dependencies] crossbeam-channel = "0.5.4" -crossbeam-epoch = { version = "0.8.2", optional = true } crossbeam-utils = "0.8" num_cpus = "1.13" once_cell = "1.7" @@ -49,15 +50,16 @@ parking_lot = "0.12" scheduled-thread-pool = "0.2.6" smallvec = "1.8" tagptr = "0.2" -thiserror = { version = "1.0", optional = true } -uuid = { version = "0.8", features = ["v4"], optional = true } # Opt-out serde and stable_deref_trait features # https://github.com/Manishearth/triomphe/pull/5 triomphe = { version = "0.1", default-features = false } -# Optional dependencies (quanta, enabled by default) +# Optional dependencies (enabled by default) +crossbeam-epoch = { version = "0.8.2", optional = true } quanta = { version = "0.10.0", optional = true } +thiserror = { version = "1.0", optional = true } +uuid = { version = "0.8", features = ["v4"], optional = true } # Optional dependencies (dashmap) dashmap = { version = "5.2", optional = true } diff --git a/src/common.rs b/src/common.rs index 0e3fe59d..47d49927 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,9 +1,9 @@ use std::convert::TryInto; -// #[cfg(any(feature = "sync", feature = "future", feature = "dash"))] +#[cfg(any(feature = "sync", feature = "future", feature = "dash"))] pub(crate) mod concurrent; -// #[cfg(any(feature = "sync", feature = "future"))] +#[cfg(any(feature = "sync", feature = "future"))] pub(crate) mod error; pub(crate) mod builder_utils; diff --git a/src/common/concurrent.rs b/src/common/concurrent.rs index 2deefc8e..34668061 100644 --- a/src/common/concurrent.rs +++ b/src/common/concurrent.rs @@ -77,7 +77,7 @@ impl KeyDate { &self.key } - // #[cfg(any(feature = "sync", feature = "future"))] + #[cfg(any(feature = "sync", feature = "future"))] pub(crate) fn last_modified(&self) -> Option { self.entry_info.last_modified() } diff --git a/src/common/concurrent/thread_pool.rs b/src/common/concurrent/thread_pool.rs index 9baf7fe8..cb121c40 100644 --- a/src/common/concurrent/thread_pool.rs +++ b/src/common/concurrent/thread_pool.rs @@ -5,11 +5,11 @@ use std::{collections::HashMap, sync::Arc}; static REGISTRY: Lazy = Lazy::new(ThreadPoolRegistry::default); -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] -// #[cfg_attr(any(feature = "sync", feature = "future"), derive(Debug))] +#[derive(Clone, Copy, Hash, PartialEq, Eq)] +#[cfg_attr(any(feature = "sync", feature = "future"), derive(Debug))] pub(crate) enum PoolName { Housekeeper, - // #[cfg(any(feature = "sync", feature = "future"))] + #[cfg(any(feature = "sync", feature = "future"))] Invalidator, } @@ -17,7 +17,7 @@ impl PoolName { fn thread_name_template(&self) -> &'static str { match self { PoolName::Housekeeper => "moka-housekeeper-{}", - // #[cfg(any(feature = "sync", feature = "future"))] + #[cfg(any(feature = "sync", feature = "future"))] PoolName::Invalidator => "moka-invalidator-{}", } } diff --git a/src/common/deque.rs b/src/common/deque.rs index 01e34687..c0c2115c 100644 --- a/src/common/deque.rs +++ b/src/common/deque.rs @@ -116,7 +116,7 @@ impl Deque { self.region } - // #[cfg(any(test, feature = "sync", feature = "future"))] + #[cfg(any(test, feature = "sync", feature = "future"))] pub(crate) fn len(&self) -> usize { self.len } diff --git a/src/common/error.rs b/src/common/error.rs index 2c6ef3f1..7fa5a3d6 100644 --- a/src/common/error.rs +++ b/src/common/error.rs @@ -2,7 +2,6 @@ /// [`Cache#invalidate_entries_if`][invalidate-if] method. /// /// [invalidate-if]: ./sync/struct.Cache.html#method.invalidate_entries_if -#[cfg(any(feature="sync", feature="future"))] #[derive(thiserror::Error, Debug)] pub enum PredicateError { /// This cache does not have a necessary configuration enabled to support diff --git a/src/common/time.rs b/src/common/time.rs index 4f51258b..163955d2 100644 --- a/src/common/time.rs +++ b/src/common/time.rs @@ -7,7 +7,7 @@ pub(crate) mod clock; pub(crate) use clock::Clock; #[cfg(test)] -// #[cfg(all(test, feature = "sync"))] +#[cfg(all(test, feature = "sync"))] pub(crate) use clock::Mock; /// a wrapper type over Instant to force checked additions and prevent diff --git a/src/common/time/clock_quanta.rs b/src/common/time/clock_quanta.rs index 6bb71ec9..b26f04df 100644 --- a/src/common/time/clock_quanta.rs +++ b/src/common/time/clock_quanta.rs @@ -1,6 +1,5 @@ pub(crate) type Clock = quanta::Clock; pub(crate) type Instant = quanta::Instant; -#[cfg(test)] -// #[cfg(all(test, feature = "sync"))] +#[cfg(all(test, feature = "sync"))] pub(crate) type Mock = quanta::Mock; diff --git a/src/future.rs b/src/future.rs index 3794cddc..04319ccf 100644 --- a/src/future.rs +++ b/src/future.rs @@ -3,6 +3,8 @@ //! //! To use this module, enable a crate feature called "future". +use std::{hash::Hash, sync::Arc}; + mod builder; mod cache; mod value_initializer; @@ -12,6 +14,34 @@ pub use { cache::{BlockingOp, Cache}, }; +/// The type of the unique ID to identify a predicate used by +/// [`Cache#invalidate_entries_if`][invalidate-if] method. +/// +/// A `PredicateId` is a `String` of UUID (version 4). +/// +/// [invalidate-if]: ./struct.Cache.html#method.invalidate_entries_if +pub type PredicateId = String; + +pub struct Iter<'i, K, V>(crate::sync_base::iter::Iter<'i, K, V>); + +impl<'i, K, V> Iter<'i, K, V> { + pub(crate) fn new(inner: crate::sync_base::iter::Iter<'i, K, V>) -> Self { + Self(inner) + } +} + +impl<'i, K, V> Iterator for Iter<'i, K, V> +where + K: Eq + Hash + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, +{ + type Item = (Arc, V); + + fn next(&mut self) -> Option { + self.0.next() + } +} + /// Provides extra methods that will be useful for testing. pub trait ConcurrentCacheExt { /// Performs any pending maintenance operations needed by the cache. diff --git a/src/future/cache.rs b/src/future/cache.rs index 68d85cd4..1ebe432b 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1,6 +1,6 @@ use super::{ value_initializer::{InitResult, ValueInitializer}, - CacheBuilder, ConcurrentCacheExt, + CacheBuilder, ConcurrentCacheExt, Iter, PredicateId, }; use crate::{ common::concurrent::{ @@ -8,7 +8,6 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, - sync::{Iter, PredicateId}, sync_base::base_cache::{BaseCache, HouseKeeperArc}, Policy, PredicateError, }; @@ -881,9 +880,10 @@ where /// ``` /// pub fn iter(&self) -> Iter<'_, K, V> { - use crate::sync_base::iter::ScanningGet; + use crate::sync_base::iter::{Iter as InnerIter, ScanningGet}; - Iter::with_single_cache_segment(&self.base, self.base.num_cht_segments()) + let inner = InnerIter::with_single_cache_segment(&self.base, self.base.num_cht_segments()); + Iter::new(inner) } /// Returns a `BlockingOp` for this cache. It provides blocking diff --git a/src/lib.rs b/src/lib.rs index 72e4a1b5..b379a61b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -162,7 +162,7 @@ pub(crate) mod common; pub(crate) mod policy; pub mod unsync; -// #[cfg(any(feature = "sync", feature = "future"))] +#[cfg(any(feature = "sync", feature = "future"))] pub(crate) mod cht; #[cfg(feature = "dash")] @@ -173,13 +173,14 @@ pub mod dash; #[cfg_attr(docsrs, doc(cfg(feature = "future")))] pub mod future; -// #[cfg(feature = "sync")] +#[cfg(feature = "sync")] +#[cfg_attr(docsrs, doc(cfg(feature = "sync")))] pub mod sync; -// #[cfg(any(feature = "sync", feature = "future"))] +#[cfg(any(feature = "sync", feature = "future"))] pub(crate) mod sync_base; -// #[cfg(any(feature = "sync", feature = "future"))] +#[cfg(any(feature = "sync", feature = "future"))] pub use common::error::PredicateError; pub use policy::Policy; diff --git a/src/policy.rs b/src/policy.rs index 4a1d49e4..2c2212e2 100644 --- a/src/policy.rs +++ b/src/policy.rs @@ -29,7 +29,7 @@ impl Policy { self.max_capacity } - // #[cfg(feature = "sync")] + #[cfg(feature = "sync")] pub(crate) fn set_max_capacity(&mut self, capacity: Option) { self.max_capacity = capacity; } @@ -39,7 +39,7 @@ impl Policy { self.num_segments } - // #[cfg(feature = "sync")] + #[cfg(feature = "sync")] pub(crate) fn set_num_segments(&mut self, num: usize) { self.num_segments = num; } diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 319493f6..8e284f28 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -41,7 +41,6 @@ use std::{ /// // after 30 minutes (TTL) from the insert(). /// ``` /// -#[cfg(feature="sync")] #[must_use] pub struct CacheBuilder { max_capacity: Option, diff --git a/src/sync/cache.rs b/src/sync/cache.rs index f57b9e31..df8a81c4 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -236,7 +236,6 @@ use std::{ /// [build-with-hasher-method]: ./struct.CacheBuilder.html#method.build_with_hasher /// [ahash-crate]: https://crates.io/crates/ahash /// -#[cfg(feature="sync")] pub struct Cache { base: BaseCache, value_initializer: Arc>, diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 76eeeb69..c4417754 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -24,7 +24,6 @@ use std::{ /// /// [cache-struct]: ./struct.Cache.html /// -#[cfg(feature="sync")] pub struct SegmentedCache { inner: Arc>, } diff --git a/src/sync/value_initializer.rs b/src/sync/value_initializer.rs index fe035794..2685fd8a 100644 --- a/src/sync/value_initializer.rs +++ b/src/sync/value_initializer.rs @@ -17,7 +17,6 @@ pub(crate) enum InitResult { ReadExisting(V), InitErr(Arc), } -#[cfg(feature="sync")] pub(crate) struct ValueInitializer { // TypeId is the type ID of the concrete error type of generic type E in // try_init_or_read(). We use the type ID as a part of the key to ensure that diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 1876fded..dd6e7e6a 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -1,6 +1,7 @@ use super::{ invalidator::{GetOrRemoveEntry, InvalidationResult, Invalidator, KeyDateLite, PredicateFun}, iter::ScanningGet, + PredicateId, }; use crate::{ @@ -23,7 +24,6 @@ use crate::{ time::{CheckedTimeOps, Clock, Instant}, CacheRegion, }, - sync::PredicateId, Policy, PredicateError, }; diff --git a/src/sync_base/iter.rs b/src/sync_base/iter.rs index 7110df46..c52d72d4 100644 --- a/src/sync_base/iter.rs +++ b/src/sync_base/iter.rs @@ -40,7 +40,7 @@ impl<'i, K, V> Iter<'i, K, V> { } } - // #[cfg(feature = "sync")] + #[cfg(feature = "sync")] pub(crate) fn with_multiple_cache_segments( cache_segments: Box<[&'i dyn ScanningGet]>, num_cht_segments: usize, From abdace208e51d81d87a77add0034edb3d2b7e33c Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 29 May 2022 13:29:04 +0800 Subject: [PATCH 05/44] Fix a CI job --- .github/workflows/CI.yml | 3 ++- src/sync/value_initializer.rs | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 8627782e..97f9d654 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -62,7 +62,8 @@ jobs: - name: Run tests (debug, sync feature) uses: actions-rs/cargo@v1 with: - command: test --features sync + command: test + args: --features sync - name: Run tests (release, sync feature) uses: actions-rs/cargo@v1 diff --git a/src/sync/value_initializer.rs b/src/sync/value_initializer.rs index 2685fd8a..154f4af8 100644 --- a/src/sync/value_initializer.rs +++ b/src/sync/value_initializer.rs @@ -17,6 +17,7 @@ pub(crate) enum InitResult { ReadExisting(V), InitErr(Arc), } + pub(crate) struct ValueInitializer { // TypeId is the type ID of the concrete error type of generic type E in // try_init_or_read(). We use the type ID as a part of the key to ensure that From 3f28855069b90317a89345970963baddd36ccbde Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 31 May 2022 00:09:15 +0800 Subject: [PATCH 06/44] Support notification on eviction (WIP) Add support for notification on eviction to the following caches: - `sync::Cache` - `sync::SegmentedCache` - `future::Cache` Details: - Add `eviction_listener` method to the cache builders. - Add `notification` module with `RemovalCause` enum. - Add internal `sync_base::removal_notifier` module. - Implement some of the notifications. - Add a unit test case to `sync::Cache` for the implemented notifications. --- src/common/concurrent/thread_pool.rs | 4 + src/future/builder.rs | 21 ++- src/future/cache.rs | 8 +- src/lib.rs | 4 + src/notification.rs | 38 +++++ src/sync/builder.rs | 25 +++- src/sync/cache.rs | 69 ++++++++- src/sync/segment.rs | 8 +- src/sync_base.rs | 1 + src/sync_base/base_cache.rs | 88 ++++++++++- src/sync_base/removal_notifier.rs | 209 +++++++++++++++++++++++++++ 11 files changed, 462 insertions(+), 13 deletions(-) create mode 100644 src/notification.rs create mode 100644 src/sync_base/removal_notifier.rs diff --git a/src/common/concurrent/thread_pool.rs b/src/common/concurrent/thread_pool.rs index cb121c40..f900a15f 100644 --- a/src/common/concurrent/thread_pool.rs +++ b/src/common/concurrent/thread_pool.rs @@ -11,6 +11,8 @@ pub(crate) enum PoolName { Housekeeper, #[cfg(any(feature = "sync", feature = "future"))] Invalidator, + #[cfg(any(feature = "sync", feature = "future"))] + RemovalNotifier, } impl PoolName { @@ -19,6 +21,8 @@ impl PoolName { PoolName::Housekeeper => "moka-housekeeper-{}", #[cfg(any(feature = "sync", feature = "future"))] PoolName::Invalidator => "moka-invalidator-{}", + #[cfg(any(feature = "sync", feature = "future"))] + PoolName::RemovalNotifier => "moka-notifier-{}", } } } diff --git a/src/future/builder.rs b/src/future/builder.rs index 5cb8b001..41507693 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -1,5 +1,8 @@ use super::Cache; -use crate::common::{builder_utils, concurrent::Weigher}; +use crate::{ + common::{builder_utils, concurrent::Weigher}, + notification::{EvictionListener, RemovalCause}, +}; use std::{ collections::hash_map::RandomState, @@ -8,6 +11,7 @@ use std::{ sync::Arc, time::Duration, }; +// use parking_lot::Mutex; /// Builds a [`Cache`][cache-struct] with various configuration knobs. /// @@ -54,6 +58,7 @@ pub struct CacheBuilder { max_capacity: Option, initial_capacity: Option, weigher: Option>, + eviction_listener: Option>, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -70,6 +75,7 @@ where max_capacity: None, initial_capacity: None, weigher: None, + eviction_listener: None, time_to_live: None, time_to_idle: None, invalidator_enabled: false, @@ -107,6 +113,7 @@ where self.initial_capacity, build_hasher, self.weigher, + self.eviction_listener, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -130,6 +137,7 @@ where self.initial_capacity, hasher, self.weigher, + self.eviction_listener, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -165,6 +173,17 @@ impl CacheBuilder { } } + pub fn eviction_listener( + self, + listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, + ) -> Self { + Self { + // eviction_listener: Some(Arc::new(Mutex::new(listener))), + eviction_listener: Some(Arc::new(listener)), + ..self + } + } + /// Sets the time to live of the cache. /// /// A cached entry will be expired after the specified duration past from diff --git a/src/future/cache.rs b/src/future/cache.rs index 1ebe432b..ba798f36 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -8,6 +8,7 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, + notification::EvictionListener, sync_base::base_cache::{BaseCache, HouseKeeperArc}, Policy, PredicateError, }; @@ -426,6 +427,7 @@ where None, None, None, + None, false, ) } @@ -445,11 +447,14 @@ where V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { + // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments + #[allow(clippy::too_many_arguments)] pub(crate) fn with_everything( max_capacity: Option, initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_listener: Option>, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -460,6 +465,7 @@ where initial_capacity, build_hasher.clone(), weigher, + eviction_listener, time_to_live, time_to_idle, invalidator_enabled, @@ -912,7 +918,7 @@ where impl ConcurrentCacheExt for Cache where K: Hash + Eq + Send + Sync + 'static, - V: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn sync(&self) { diff --git a/src/lib.rs b/src/lib.rs index b379a61b..502debd5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -173,6 +173,10 @@ pub mod dash; #[cfg_attr(docsrs, doc(cfg(feature = "future")))] pub mod future; +#[cfg(any(feature = "sync", feature = "future"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "sync", feature = "future"))))] +pub mod notification; + #[cfg(feature = "sync")] #[cfg_attr(docsrs, doc(cfg(feature = "sync")))] pub mod sync; diff --git a/src/notification.rs b/src/notification.rs new file mode 100644 index 00000000..d213ced8 --- /dev/null +++ b/src/notification.rs @@ -0,0 +1,38 @@ +use std::sync::Arc; + +// use parking_lot::Mutex; + +// TODO: Perhaps `Arc` is enough for the most use cases because +// Sync would require captured values to be interior mutable? +// pub(crate) type EvictionListener = +// Arc, V, RemovalCause) + Send + Sync + 'static>>; +pub(crate) type EvictionListener = + Arc, V, RemovalCause) + Send + Sync + 'static>; + +// pub(crate) type EvictionListenerRef<'a, K, V> = &'a mut dyn FnMut(Arc, V, RemovalCause); +pub(crate) type EvictionListenerRef<'a, K, V> = + &'a Arc, V, RemovalCause) + Send + Sync + 'static>; + +// NOTE: Currently, dropping the cache will drop all entries without sending +// notifications. Calling `invalidate_all` method of the cache will trigger +// the notifications, but currently there is no way to know when all entries +// have been invalidated and their notifications have been sent. + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum RemovalCause { + /// The entry's expiration timestamp has passed. + Expired, + /// The entry was manually removed by the user. + Explicit, + /// The entry itself was not actually removed, but its value was replaced by + /// the user. + Replaced, + /// The entry was evicted due to size constraints. + Size, +} + +impl RemovalCause { + pub fn was_evicted(&self) -> bool { + matches!(self, Self::Expired | Self::Size) + } +} diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 8e284f28..29263608 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -1,5 +1,8 @@ use super::{Cache, SegmentedCache}; -use crate::common::{builder_utils, concurrent::Weigher}; +use crate::{ + common::{builder_utils, concurrent::Weigher}, + notification::{EvictionListener, RemovalCause}, +}; use std::{ collections::hash_map::RandomState, @@ -8,6 +11,7 @@ use std::{ sync::Arc, time::Duration, }; +// use parking_lot::Mutex; /// Builds a [`Cache`][cache-struct] or [`SegmentedCache`][seg-cache-struct] /// with various configuration knobs. @@ -47,6 +51,7 @@ pub struct CacheBuilder { initial_capacity: Option, num_segments: Option, weigher: Option>, + eviction_listener: Option>, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -64,6 +69,7 @@ where initial_capacity: None, num_segments: None, weigher: None, + eviction_listener: None, time_to_live: None, time_to_idle: None, invalidator_enabled: false, @@ -102,6 +108,7 @@ where initial_capacity: self.initial_capacity, num_segments: Some(num_segments), weigher: None, + eviction_listener: None, time_to_live: self.time_to_live, time_to_idle: self.time_to_idle, invalidator_enabled: self.invalidator_enabled, @@ -127,6 +134,7 @@ where self.initial_capacity, build_hasher, self.weigher, + self.eviction_listener, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -153,6 +161,7 @@ where self.initial_capacity, hasher, self.weigher, + self.eviction_listener, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -184,6 +193,7 @@ where self.num_segments.unwrap(), build_hasher, self.weigher, + self.eviction_listener, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -211,6 +221,7 @@ where self.num_segments.unwrap(), hasher, self.weigher, + self.eviction_listener, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -246,6 +257,18 @@ impl CacheBuilder { } } + pub fn eviction_listener( + self, + // listener: impl FnMut(Arc, V, RemovalCause) + Send + Sync + 'static, + listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, + ) -> Self { + Self { + // eviction_listener: Some(Arc::new(Mutex::new(listener))), + eviction_listener: Some(Arc::new(listener)), + ..self + } + } + /// Sets the time to live of the cache. /// /// A cached entry will be expired after the specified duration past from diff --git a/src/sync/cache.rs b/src/sync/cache.rs index df8a81c4..c22f67c1 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -8,6 +8,7 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, + notification::{EvictionListener, RemovalCause}, sync::{Iter, PredicateId}, sync_base::{ base_cache::{BaseCache, HouseKeeperArc}, @@ -370,6 +371,7 @@ where None, None, None, + None, false, ) } @@ -389,11 +391,14 @@ where V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { + // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments + #[allow(clippy::too_many_arguments)] pub(crate) fn with_everything( max_capacity: Option, initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_listener: Option>, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -404,6 +409,7 @@ where initial_capacity, build_hasher.clone(), weigher, + eviction_listener, time_to_live, time_to_idle, invalidator_enabled, @@ -762,6 +768,10 @@ where Q: Hash + Eq + ?Sized, { if let Some(kv) = self.base.remove_entry(key, hash) { + if self.base.is_removal_notifier_enabled() { + self.base + .notify_single_removal(&kv.key, &kv.entry, RemovalCause::Explicit); + } let op = WriteOp::Remove(kv); let hk = self.base.housekeeper.as_ref(); Self::schedule_write_op(&self.base.write_op_ch, op, hk).expect("Failed to remove"); @@ -894,7 +904,7 @@ where impl ConcurrentCacheExt for Cache where K: Hash + Eq + Send + Sync + 'static, - V: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn sync(&self) { @@ -988,8 +998,10 @@ where // To see the debug prints, run test as `cargo test -- --nocapture` #[cfg(test)] mod tests { + use parking_lot::Mutex; + use super::{Cache, ConcurrentCacheExt}; - use crate::common::time::Clock; + use crate::{common::time::Clock, notification::RemovalCause}; use std::{convert::Infallible, sync::Arc, time::Duration}; @@ -1928,6 +1940,59 @@ mod tests { ); } + #[test] + fn test_removal_notifications() { + let notifications = Arc::new(Mutex::new(Vec::new())); + + let n1 = Arc::clone(¬ifications); + let listener = move |k, v, cause| n1.lock().push((k, v, cause)); + + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert('a', "alice"); + cache.invalidate(&'a'); // Notification 0 for 'a' (explicit) + cache.sync(); + assert_eq!(cache.entry_count(), 0); + + cache.insert('b', "bob"); + cache.insert('c', "cathy"); + cache.insert('d', "david"); + cache.sync(); + assert_eq!(cache.entry_count(), 3); + + // This will be rejected due to the size constraint. + cache.insert('e', "emily"); // Notification 1 for 'e' (size) + cache.sync(); + assert_eq!(cache.entry_count(), 3); + + // Raise the popularity of 'e' so it will not be rejected next time. + cache.get(&'e'); + cache.sync(); + + cache.insert('e', "eliza"); // Notification 2 for 'b' (size) + cache.sync(); + assert_eq!(cache.entry_count(), 3); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + let nx = notifications.lock(); + // dbg!(&*nx); + + // Verify the notifications. + assert_eq!(nx.len(), 3); + assert_eq!(nx[0], (Arc::new('a'), "alice", RemovalCause::Explicit)); + assert_eq!(nx[1], (Arc::new('e'), "emily", RemovalCause::Size)); + assert_eq!(nx[2], (Arc::new('b'), "bob", RemovalCause::Size)); + } + #[test] fn test_debug_format() { let cache = Cache::new(10); diff --git a/src/sync/segment.rs b/src/sync/segment.rs index c4417754..ceedb175 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -1,6 +1,7 @@ use super::{cache::Cache, CacheBuilder, ConcurrentCacheExt}; use crate::{ common::concurrent::Weigher, + notification::EvictionListener, sync_base::iter::{Iter, ScanningGet}, Policy, PredicateError, }; @@ -102,6 +103,7 @@ where None, None, None, + None, false, ) } @@ -201,6 +203,7 @@ where num_segments: usize, build_hasher: S, weigher: Option>, + eviction_listener: Option>, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -212,6 +215,7 @@ where num_segments, build_hasher, weigher, + eviction_listener, time_to_live, time_to_idle, invalidator_enabled, @@ -489,7 +493,7 @@ where impl ConcurrentCacheExt for SegmentedCache where K: Hash + Eq + Send + Sync + 'static, - V: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn sync(&self) { @@ -576,6 +580,7 @@ where num_segments: usize, build_hasher: S, weigher: Option>, + eviction_listener: Option>, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -596,6 +601,7 @@ where seg_init_capacity, build_hasher.clone(), weigher.as_ref().map(Arc::clone), + eviction_listener.as_ref().map(Arc::clone), time_to_live, time_to_idle, invalidator_enabled, diff --git a/src/sync_base.rs b/src/sync_base.rs index 24c022a3..1447d7e6 100644 --- a/src/sync_base.rs +++ b/src/sync_base.rs @@ -1,6 +1,7 @@ pub(crate) mod base_cache; mod invalidator; pub(crate) mod iter; +mod removal_notifier; /// The type of the unique ID to identify a predicate used by /// [`Cache#invalidate_entries_if`][invalidate-if] method. diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index dd6e7e6a..99e22b7e 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -1,6 +1,7 @@ use super::{ invalidator::{GetOrRemoveEntry, InvalidationResult, Invalidator, KeyDateLite, PredicateFun}, iter::ScanningGet, + removal_notifier::RemovalNotifier, PredicateId, }; @@ -24,6 +25,7 @@ use crate::{ time::{CheckedTimeOps, Clock, Instant}, CacheRegion, }, + notification::{EvictionListener, RemovalCause}, Policy, PredicateError, }; @@ -92,6 +94,23 @@ impl BaseCache { self.inner.weighted_size() } + #[inline] + pub(crate) fn is_removal_notifier_enabled(&self) -> bool { + self.inner.is_removal_notifier_enabled() + } + + pub(crate) fn notify_single_removal( + &self, + key: &Arc, + entry: &TrioArc>, + cause: RemovalCause, + ) where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + { + self.inner.notify_single_removal(key, entry, cause) + } + #[cfg(feature = "unstable-debug-counters")] pub fn debug_stats(&self) -> CacheDebugStats { self.inner.debug_stats() @@ -104,11 +123,14 @@ where V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { + // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments + #[allow(clippy::too_many_arguments)] pub(crate) fn new( max_capacity: Option, initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_listener: Option>, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -120,6 +142,7 @@ where initial_capacity, build_hasher, weigher, + eviction_listener, r_rcv, w_rcv, time_to_live, @@ -527,6 +550,7 @@ pub(crate) struct Inner { time_to_idle: Option, valid_after: AtomicInstant, weigher: Option>, + removal_notifier: Option>, invalidator_enabled: bool, invalidator: RwLock>>, has_expiration_clock: AtomicBool, @@ -545,10 +569,15 @@ impl Inner { } #[inline] - pub(crate) fn weighted_size(&self) -> u64 { + fn weighted_size(&self) -> u64 { self.weighted_size.load() } + #[inline] + fn is_removal_notifier_enabled(&self) -> bool { + self.removal_notifier.is_some() + } + #[cfg(feature = "unstable-debug-counters")] pub fn debug_stats(&self) -> CacheDebugStats { let ec = self.entry_count.load(); @@ -578,6 +607,7 @@ where initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_listener: Option>, read_op_ch: Receiver>, write_op_ch: Receiver>, time_to_live: Option, @@ -593,6 +623,7 @@ where initial_capacity, build_hasher.clone(), ); + let removal_notifier = eviction_listener.map(RemovalNotifier::new); Self { max_capacity: max_capacity.map(|n| n as u64), @@ -609,6 +640,7 @@ where time_to_idle, valid_after: Default::default(), weigher, + removal_notifier, invalidator_enabled, // When enabled, this field will be set later via the set_invalidator method. invalidator: RwLock::new(None), @@ -798,7 +830,7 @@ mod batch_size { impl InnerSync for Inner where K: Hash + Eq + Send + Sync + 'static, - V: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn sync(&self, max_repeats: usize) -> Option { @@ -858,6 +890,10 @@ where ); } + if let Some(notifier) = &self.removal_notifier { + notifier.submit_task(); + } + debug_assert_eq!(self.entry_count.load(), current_ec); debug_assert_eq!(self.weighted_size.load(), current_ws); self.entry_count.store(counters.entry_count); @@ -950,7 +986,10 @@ where } } - fn apply_writes(&self, deqs: &mut Deques, count: usize, counters: &mut EvictionCounters) { + fn apply_writes(&self, deqs: &mut Deques, count: usize, counters: &mut EvictionCounters) + where + V: Clone, + { use WriteOp::*; let freq = self.frequency_sketch.read(); let ch = &self.write_op_ch; @@ -985,7 +1024,9 @@ where deqs: &mut Deques, freq: &FrequencySketch, counters: &mut EvictionCounters, - ) { + ) where + V: Clone, + { entry.set_last_accessed(timestamp); entry.set_last_modified(timestamp); @@ -1008,7 +1049,10 @@ where if let Some(max) = self.max_capacity { if new_weight as u64 > max { // The candidate is too big to fit in the cache. Reject it. - self.cache.remove(&Arc::clone(&kh.key), kh.hash); + let removed = self.cache.remove(&Arc::clone(&kh.key), kh.hash); + if let Some(entry) = removed { + self.notify_single_removal(&kh.key, &entry, RemovalCause::Size); + } return; } } @@ -1026,9 +1070,12 @@ where // Try to remove the victims from the cache (hash map). for victim in victim_nodes { let element = unsafe { &victim.as_ref().element }; - if let Some((_vic_key, vic_entry)) = + if let Some((vic_key, vic_entry)) = self.cache.remove_entry(element.key(), element.hash()) { + if self.removal_notifier.is_some() { + self.notify_single_removal(&vic_key, &vic_entry, RemovalCause::Size); + } // And then remove the victim from the deques. Self::handle_remove(deqs, vic_entry, counters); } else { @@ -1047,6 +1094,9 @@ where skipped_nodes = s; // Remove the candidate from the cache (hash map). self.cache.remove(&Arc::clone(&kh.key), kh.hash); + if self.removal_notifier.is_some() { + self.notify_single_removal(&kh.key, &entry, RemovalCause::Size); + } } }; @@ -1251,7 +1301,7 @@ where // Remove the key from the map only when the entry is really // expired. This check is needed because it is possible that the entry in // the map has been updated or deleted but its deque node we checked - // above have not been updated yet. + // above has not been updated yet. let maybe_entry = self .cache .remove_if(key, hash, |_, v| is_expired_entry_ao(tti, va, v, now)); @@ -1473,6 +1523,29 @@ where } } +impl Inner +where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, +{ + fn notify_single_removal( + &self, + key: &Arc, + entry: &TrioArc>, + cause: RemovalCause, + ) { + if let Some(notifier) = &self.removal_notifier { + notifier.add_single_notification(key, entry.value.clone(), cause) + } + } + + // fn notify_multiple_removals(&self, entries: Vec>) { + // if let Some(notifier) = &self.removal_notifier { + // notifier.add_multiple_notifications(entries) + // } + // } +} + // // for testing // @@ -1573,6 +1646,7 @@ mod tests { None, None, None, + None, false, ); cache.inner.enable_frequency_sketch_for_testing(); diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs new file mode 100644 index 00000000..13c60ecd --- /dev/null +++ b/src/sync_base/removal_notifier.rs @@ -0,0 +1,209 @@ +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + +use crate::{ + common::concurrent::thread_pool::{PoolName, ThreadPool, ThreadPoolRegistry}, + notification::{EvictionListener, EvictionListenerRef, RemovalCause}, +}; + +use crossbeam_channel::{Receiver, Sender}; +use parking_lot::Mutex; + +const CHANNEL_CAPACITY: usize = 1_024; +const SUBMIT_TASK_THRESHOLD: usize = 100; +const MAX_NOTIFICATIONS_PER_TASK: u16 = 5_000; + +pub(crate) struct RemovalNotifier { + snd: Sender>, + state: Arc>, + thread_pool: Arc, +} + +impl Drop for RemovalNotifier { + fn drop(&mut self) { + let state = &self.state; + // Disallow to create and run a notification task by now. + state.shutdown(); + + // Wait for the notification task to finish. (busy loop) + while state.is_running() { + std::thread::sleep(Duration::from_millis(1)); + } + + ThreadPoolRegistry::release_pool(&self.thread_pool); + } +} + +impl RemovalNotifier { + pub(crate) fn new(listener: EvictionListener) -> Self { + let (snd, rcv) = crossbeam_channel::bounded(CHANNEL_CAPACITY); + let thread_pool = ThreadPoolRegistry::acquire_pool(PoolName::RemovalNotifier); + let state = NotifierState { + task_lock: Default::default(), + rcv, + listener, + is_running: Default::default(), + is_shutting_down: Default::default(), + }; + Self { + snd, + state: Arc::new(state), + thread_pool, + } + } +} + +impl RemovalNotifier +where + K: Send + Sync + 'static, + V: Send + Sync + 'static, +{ + pub(crate) fn add_single_notification(&self, key: &Arc, value: V, cause: RemovalCause) { + let entry = RemovedEntries::new_single(key, value, cause); + self.snd.send(entry).unwrap(); + self.submit_task_if_necessary(); + } + + // pub(crate) fn add_multiple_notifications(&self, entries: Vec>) { + // let entries = RemovedEntries::new_multi(entries); + // self.snd.send(entries).unwrap(); // TODO: Error handling? + // self.submit_task_if_necessary(); + // } + + pub(crate) fn submit_task(&self) { + // TODO: Use compare and exchange to ensure it was false. + + if self.state.is_running() { + return; + } + self.state.set_running(true); + + let task = NotificationTask::new(&self.state); + self.thread_pool.pool.execute(move || { + task.execute(); + }); + } + + fn submit_task_if_necessary(&self) { + if self.snd.len() >= SUBMIT_TASK_THRESHOLD && !self.state.is_running() { + self.submit_task(); // TODO: Error handling? + } + } +} + +struct NotificationTask { + state: Arc>, +} + +impl NotificationTask { + fn new(state: &Arc>) -> Self { + Self { + state: Arc::clone(state), + } + } + + fn execute(&self) { + let task_lock = self.state.task_lock.lock(); + // let mut listener = self.state.listener.lock(); + let mut count = 0u16; + + while let Ok(entries) = self.state.rcv.try_recv() { + match entries { + RemovedEntries::Single(entry) => { + // self.notify(&mut *listener, entry); + self.notify(&self.state.listener, entry); + count += 1; + } // RemovedEntries::Multi(entries) => { + // for entry in entries { + // self.notify(&mut *listener, entry); + // count += 1; + + // if self.state.is_shutting_down() { + // break; + // } + // } + // } + } + + if count > MAX_NOTIFICATIONS_PER_TASK || self.state.is_shutting_down() { + break; + } + } + + // std::mem::drop(listener); + std::mem::drop(task_lock); + self.state.set_running(false); + } + + fn notify(&self, listener: EvictionListenerRef<'_, K, V>, entry: RemovedEntry) { + // use std::panic::{catch_unwind, AssertUnwindSafe}; + + let RemovedEntry { key, value, cause } = entry; + listener(key, value, cause); + + // let listener_clo = || listener(key, value, cause); + // match catch_unwind(AssertUnwindSafe(listener_clo)) { + // Ok(_) => todo!(), + // Err(_) => todo!(), + // } + } +} + +struct NotifierState { + task_lock: Mutex<()>, + rcv: Receiver>, + listener: EvictionListener, + is_running: AtomicBool, + is_shutting_down: AtomicBool, +} + +impl NotifierState { + fn is_running(&self) -> bool { + self.is_running.load(Ordering::Acquire) + } + + fn set_running(&self, value: bool) { + self.is_running.store(value, Ordering::Release); + } + + fn is_shutting_down(&self) -> bool { + self.is_shutting_down.load(Ordering::Acquire) + } + + fn shutdown(&self) { + self.is_shutting_down.store(true, Ordering::Release); + } +} + +pub(crate) struct RemovedEntry { + key: Arc, + value: V, + cause: RemovalCause, +} + +impl RemovedEntry { + pub(crate) fn new(key: Arc, value: V, cause: RemovalCause) -> Self { + Self { key, value, cause } + } +} + +enum RemovedEntries { + Single(RemovedEntry), + // Multi(Vec>), +} + +impl RemovedEntries { + fn new_single(key: &Arc, value: V, cause: RemovalCause) -> Self { + let key = Arc::clone(key); + Self::Single(RemovedEntry::new(key, value, cause)) + } + + // fn new_multi(entries: Vec>) -> Self { + // Self::Multi(entries) + // } +} From aecfef164559b7068a8236865e07e1a91855fa14 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 31 May 2022 07:46:39 +0800 Subject: [PATCH 07/44] Support notification on eviction Enable `RemovalCause::Replaced` events. --- src/future/cache.rs | 7 ++++- src/sync/cache.rs | 44 ++++++++++++++++++++----------- src/sync_base/base_cache.rs | 20 +++++++++----- src/sync_base/removal_notifier.rs | 5 ++-- 4 files changed, 51 insertions(+), 25 deletions(-) diff --git a/src/future/cache.rs b/src/future/cache.rs index ba798f36..88d4b6d1 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -8,7 +8,7 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, - notification::EvictionListener, + notification::{EvictionListener, RemovalCause}, sync_base::base_cache::{BaseCache, HouseKeeperArc}, Policy, PredicateError, }; @@ -762,6 +762,11 @@ where { let hash = self.base.hash(key); if let Some(kv) = self.base.remove_entry(key, hash) { + if self.base.is_removal_notifier_enabled() { + let key = Arc::clone(&kv.key); + self.base + .notify_single_removal(key, &kv.entry, RemovalCause::Explicit); + } let op = WriteOp::Remove(kv); let hk = self.base.housekeeper.as_ref(); Self::schedule_write_op(&self.base.write_op_ch, op, hk) diff --git a/src/sync/cache.rs b/src/sync/cache.rs index c22f67c1..55ce41f4 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -769,8 +769,9 @@ where { if let Some(kv) = self.base.remove_entry(key, hash) { if self.base.is_removal_notifier_enabled() { + let key = Arc::clone(&kv.key); self.base - .notify_single_removal(&kv.key, &kv.entry, RemovalCause::Explicit); + .notify_single_removal(key, &kv.entry, RemovalCause::Explicit); } let op = WriteOp::Remove(kv); let hk = self.base.housekeeper.as_ref(); @@ -1942,11 +1943,15 @@ mod tests { #[test] fn test_removal_notifications() { - let notifications = Arc::new(Mutex::new(Vec::new())); + // These `Vec`s will store actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - let n1 = Arc::clone(¬ifications); + // Create an eviction listener. + let n1 = Arc::clone(&actual); let listener = move |k, v, cause| n1.lock().push((k, v, cause)); + // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) .eviction_listener(listener) @@ -1957,7 +1962,9 @@ mod tests { let cache = cache; cache.insert('a', "alice"); - cache.invalidate(&'a'); // Notification 0 for 'a' (explicit) + cache.invalidate(&'a'); + expected.push((Arc::new('a'), "alice", RemovalCause::Explicit)); + cache.sync(); assert_eq!(cache.entry_count(), 0); @@ -1968,29 +1975,36 @@ mod tests { assert_eq!(cache.entry_count(), 3); // This will be rejected due to the size constraint. - cache.insert('e', "emily"); // Notification 1 for 'e' (size) + cache.insert('e', "emily"); + expected.push((Arc::new('e'), "emily", RemovalCause::Size)); cache.sync(); assert_eq!(cache.entry_count(), 3); - // Raise the popularity of 'e' so it will not be rejected next time. + // Raise the popularity of 'e' so it will be accepted next time. cache.get(&'e'); cache.sync(); - cache.insert('e', "eliza"); // Notification 2 for 'b' (size) + // Retry. + cache.insert('e', "eliza"); + expected.push((Arc::new('b'), "bob", RemovalCause::Size)); + cache.sync(); + assert_eq!(cache.entry_count(), 3); + + // Replace an existing entry. + cache.insert('d', "dennis"); + expected.push((Arc::new('d'), "david", RemovalCause::Replaced)); cache.sync(); assert_eq!(cache.entry_count(), 3); // Ensure all scheduled notifications have been processed. std::thread::sleep(Duration::from_secs(1)); - let nx = notifications.lock(); - // dbg!(&*nx); - - // Verify the notifications. - assert_eq!(nx.len(), 3); - assert_eq!(nx[0], (Arc::new('a'), "alice", RemovalCause::Explicit)); - assert_eq!(nx[1], (Arc::new('e'), "emily", RemovalCause::Size)); - assert_eq!(nx[2], (Arc::new('b'), "bob", RemovalCause::Size)); + // Verify the events. + let actual_events = &*actual.lock(); + assert_eq!(actual_events.len(), expected.len()); + for (i, (actual, expected)) in actual_events.into_iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[test] diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 99e22b7e..4f2fc32e 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -101,7 +101,7 @@ impl BaseCache { pub(crate) fn notify_single_removal( &self, - key: &Arc, + key: Arc, entry: &TrioArc>, cause: RemovalCause, ) where @@ -382,6 +382,9 @@ where (Some((_cnt, ins_op)), None) => ins_op, (None, Some((_cnt, old_entry, upd_op))) => { old_entry.unset_q_nodes(); + if self.is_removal_notifier_enabled() { + self.notify_single_removal(key, &old_entry, RemovalCause::Replaced); + } upd_op } (Some((cnt1, ins_op)), Some((cnt2, old_entry, upd_op))) => { @@ -389,6 +392,9 @@ where ins_op } else { old_entry.unset_q_nodes(); + if self.is_removal_notifier_enabled() { + self.notify_single_removal(key, &old_entry, RemovalCause::Replaced); + } upd_op } } @@ -1051,7 +1057,8 @@ where // The candidate is too big to fit in the cache. Reject it. let removed = self.cache.remove(&Arc::clone(&kh.key), kh.hash); if let Some(entry) = removed { - self.notify_single_removal(&kh.key, &entry, RemovalCause::Size); + let key = Arc::clone(&kh.key); + self.notify_single_removal(key, &entry, RemovalCause::Size); } return; } @@ -1074,7 +1081,7 @@ where self.cache.remove_entry(element.key(), element.hash()) { if self.removal_notifier.is_some() { - self.notify_single_removal(&vic_key, &vic_entry, RemovalCause::Size); + self.notify_single_removal(vic_key, &vic_entry, RemovalCause::Size); } // And then remove the victim from the deques. Self::handle_remove(deqs, vic_entry, counters); @@ -1093,9 +1100,10 @@ where AdmissionResult::Rejected { skipped_nodes: s } => { skipped_nodes = s; // Remove the candidate from the cache (hash map). - self.cache.remove(&Arc::clone(&kh.key), kh.hash); + let key = Arc::clone(&kh.key); + self.cache.remove(&key, kh.hash); if self.removal_notifier.is_some() { - self.notify_single_removal(&kh.key, &entry, RemovalCause::Size); + self.notify_single_removal(key, &entry, RemovalCause::Size); } } }; @@ -1530,7 +1538,7 @@ where { fn notify_single_removal( &self, - key: &Arc, + key: Arc, entry: &TrioArc>, cause: RemovalCause, ) { diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index 13c60ecd..c309fd2c 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -63,7 +63,7 @@ where K: Send + Sync + 'static, V: Send + Sync + 'static, { - pub(crate) fn add_single_notification(&self, key: &Arc, value: V, cause: RemovalCause) { + pub(crate) fn add_single_notification(&self, key: Arc, value: V, cause: RemovalCause) { let entry = RemovedEntries::new_single(key, value, cause); self.snd.send(entry).unwrap(); self.submit_task_if_necessary(); @@ -198,8 +198,7 @@ enum RemovedEntries { } impl RemovedEntries { - fn new_single(key: &Arc, value: V, cause: RemovalCause) -> Self { - let key = Arc::clone(key); + fn new_single(key: Arc, value: V, cause: RemovalCause) -> Self { Self::Single(RemovedEntry::new(key, value, cause)) } From b4e017184d27e718453b70fcfc48634a48e4f7cd Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 31 May 2022 08:00:25 +0800 Subject: [PATCH 08/44] Fix a Clippy warning --- src/sync/cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 55ce41f4..efae04d9 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -2002,7 +2002,7 @@ mod tests { // Verify the events. let actual_events = &*actual.lock(); assert_eq!(actual_events.len(), expected.len()); - for (i, (actual, expected)) in actual_events.into_iter().zip(expected).enumerate() { + for (i, (actual, expected)) in actual_events.iter().zip(expected).enumerate() { assert_eq!(actual, &expected, "expected[{}]", i); } } From 1727b78e8913154430ad1df42732ddd08f5153d6 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 31 May 2022 08:07:19 +0800 Subject: [PATCH 09/44] Support notification on eviction Tweak an unit test. --- src/notification.rs | 2 +- src/sync/cache.rs | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/notification.rs b/src/notification.rs index d213ced8..aff2c22b 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -5,7 +5,7 @@ use std::sync::Arc; // TODO: Perhaps `Arc` is enough for the most use cases because // Sync would require captured values to be interior mutable? // pub(crate) type EvictionListener = -// Arc, V, RemovalCause) + Send + Sync + 'static>>; +// Arc, V, RemovalCause) + Send + Sync + 'static>>; pub(crate) type EvictionListener = Arc, V, RemovalCause) + Send + Sync + 'static>; diff --git a/src/sync/cache.rs b/src/sync/cache.rs index efae04d9..10bb5294 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1943,7 +1943,7 @@ mod tests { #[test] fn test_removal_notifications() { - // These `Vec`s will store actual and expected notifications. + // These `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); @@ -1986,6 +1986,7 @@ mod tests { // Retry. cache.insert('e', "eliza"); + // and the LRU entry will be evicted. expected.push((Arc::new('b'), "bob", RemovalCause::Size)); cache.sync(); assert_eq!(cache.entry_count(), 3); @@ -1999,10 +2000,10 @@ mod tests { // Ensure all scheduled notifications have been processed. std::thread::sleep(Duration::from_secs(1)); - // Verify the events. - let actual_events = &*actual.lock(); - assert_eq!(actual_events.len(), expected.len()); - for (i, (actual, expected)) in actual_events.iter().zip(expected).enumerate() { + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { assert_eq!(actual, &expected, "expected[{}]", i); } } From 31d687456e9ce9c47c09cd2a48234741c9cfec58 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 31 May 2022 20:11:31 +0800 Subject: [PATCH 10/44] Support notification on eviction - Enable `RemovalCause::Expired` events. - Update existing unit test cases to verify the notifications after invalidating, replacing, evicting and expiring entries. - Update the README. --- README.md | 16 ++- src/future/builder.rs | 1 - src/notification.rs | 7 - src/sync/builder.rs | 2 - src/sync/cache.rs | 181 +++++++++++++++++++++-- src/sync_base/base_cache.rs | 231 ++++++++++++++++++++++-------- src/sync_base/removal_notifier.rs | 42 +++--- 7 files changed, 376 insertions(+), 104 deletions(-) diff --git a/README.md b/README.md index 05facfd2..c1bc3fee 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,9 @@ algorithm to determine which entries to evict when the capacity is exceeded. - Supports expiration policies: - Time to live - Time to idle +- Supports eviction listener, a callback function that will be called when an entry + is removed from the cache. + [tiny-lfu]: https://github.com/moka-rs/moka/wiki#admission-and-eviction-policies @@ -528,20 +531,23 @@ $ cargo +nightly -Z unstable-options --config 'build.rustdocflags="--cfg docsrs" ## Road Map - [x] `async` optimized caches. (`v0.2.0`) -- [x] Size-aware eviction. (`v0.7.0` via - [#24](https://github.com/moka-rs/moka/pull/24)) -- [X] API stabilization. (Smaller core cache API, shorter names for frequently - used methods) (`v0.8.0` via [#105](https://github.com/moka-rs/moka/pull/105)) +- [x] Size-aware eviction. (`v0.7.0` via [#24][gh-pull-024]) +- [x] API stabilization. (Smaller core cache API, shorter names for frequently + used methods) (`v0.8.0` via [#105][gh-pull-105]) - e.g. - `get_or_insert_with(K, F)` → `get_with(K, F)` - `get_or_try_insert_with(K, F)` → `try_get_with(K, F)` - `blocking_insert(K, V)` → `blocking().insert(K, V)` - `time_to_live()` → `policy().time_to_live()` -- [ ] Notifications on eviction, etc. +- [x] Notifications on eviction. (`v0.9.0` via [#145][gh-pull-145]) - [ ] Cache statistics. (Hit rate, etc.) - [ ] Upgrade TinyLFU to Window-TinyLFU. ([details][tiny-lfu]) - [ ] The variable (per-entry) expiration, using a hierarchical timer wheel. +[gh-pull-024]: https://github.com/moka-rs/moka/pull/24 +[gh-pull-105]: https://github.com/moka-rs/moka/pull/105 +[gh-pull-145]: https://github.com/moka-rs/moka/pull/145 + ## About the Name diff --git a/src/future/builder.rs b/src/future/builder.rs index 41507693..623c0b6c 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -178,7 +178,6 @@ impl CacheBuilder { listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, ) -> Self { Self { - // eviction_listener: Some(Arc::new(Mutex::new(listener))), eviction_listener: Some(Arc::new(listener)), ..self } diff --git a/src/notification.rs b/src/notification.rs index aff2c22b..af131c5b 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -1,15 +1,8 @@ use std::sync::Arc; -// use parking_lot::Mutex; - -// TODO: Perhaps `Arc` is enough for the most use cases because -// Sync would require captured values to be interior mutable? -// pub(crate) type EvictionListener = -// Arc, V, RemovalCause) + Send + Sync + 'static>>; pub(crate) type EvictionListener = Arc, V, RemovalCause) + Send + Sync + 'static>; -// pub(crate) type EvictionListenerRef<'a, K, V> = &'a mut dyn FnMut(Arc, V, RemovalCause); pub(crate) type EvictionListenerRef<'a, K, V> = &'a Arc, V, RemovalCause) + Send + Sync + 'static>; diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 29263608..2a573b21 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -259,11 +259,9 @@ impl CacheBuilder { pub fn eviction_listener( self, - // listener: impl FnMut(Arc, V, RemovalCause) + Send + Sync + 'static, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, ) -> Self { Self { - // eviction_listener: Some(Arc::new(Mutex::new(listener))), eviction_listener: Some(Arc::new(listener)), ..self } diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 10bb5294..96364fdf 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1008,7 +1008,19 @@ mod tests { #[test] fn basic_single_thread() { - let mut cache = Cache::new(3); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_listener(listener) + .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. @@ -1038,11 +1050,13 @@ mod tests { // "d" should not be admitted because its frequency is too low. cache.insert("d", "david"); // count: d -> 0 + expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", "david"); + expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.sync(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 @@ -1050,6 +1064,7 @@ mod tests { // "d" should be admitted and "c" should be evicted // because d's frequency is higher than c's. cache.insert("d", "dennis"); + expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); @@ -1061,8 +1076,20 @@ mod tests { assert!(cache.contains_key(&"d")); cache.invalidate(&"b"); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + cache.sync(); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"b")); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[test] @@ -1076,7 +1103,20 @@ mod tests { let david = ("david", 15); let dennis = ("dennis", 15); - let mut cache = Cache::builder().max_capacity(31).weigher(weigher).build(); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(31) + .weigher(weigher) + .eviction_listener(listener) + .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. @@ -1108,27 +1148,33 @@ mod tests { // "d" must have higher count than 3, which is the aggregated count // of "a" and "c". cache.insert("d", david); // count: d -> 0 + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"d"), None); // d -> 3 assert!(!cache.contains_key(&"d")); cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 4 // Finally "d" should be admitted by evicting "c" and "a". cache.insert("d", dennis); + expected.push((Arc::new("c"), cindy, RemovalCause::Size)); + expected.push((Arc::new("a"), alice, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), Some(bob)); @@ -1141,6 +1187,8 @@ mod tests { // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). cache.insert("b", bill); + expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); + expected.push((Arc::new("d"), dennis, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"b"), Some(bill)); assert_eq!(cache.get(&"d"), None); @@ -1150,6 +1198,7 @@ mod tests { // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). cache.insert("a", alice); cache.insert("b", bob); + expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); cache.sync(); assert_eq!(cache.get(&"a"), Some(alice)); assert_eq!(cache.get(&"b"), Some(bob)); @@ -1161,6 +1210,16 @@ mod tests { // Verify the sizes. assert_eq!(cache.entry_count(), 2); assert_eq!(cache.weighted_size(), 25); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[test] @@ -1192,7 +1251,19 @@ mod tests { #[test] fn invalidate_all() { - let mut cache = Cache::new(100); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .eviction_listener(listener) + .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. @@ -1210,6 +1281,13 @@ mod tests { cache.sync(); cache.invalidate_all(); + // expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); + // expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + // expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); + expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); + expected.push((Arc::new("c"), "cindy", RemovalCause::Expired)); + cache.sync(); cache.insert("d", "david"); @@ -1223,15 +1301,35 @@ mod tests { assert!(!cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[test] fn invalidate_entries_if() -> Result<(), Box> { use std::collections::HashSet; + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -1259,6 +1357,8 @@ mod tests { let names = ["alice", "alex"].iter().cloned().collect::>(); cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; assert_eq!(cache.base.invalidation_predicate_count(), 1); + expected.push((Arc::new(0), "alice", RemovalCause::Explicit)); + expected.push((Arc::new(2), "alex", RemovalCause::Explicit)); mock.increment(Duration::from_secs(5)); // 10 secs from the start. @@ -1289,6 +1389,9 @@ mod tests { cache.invalidate_entries_if(|_k, &v| v == "alice")?; cache.invalidate_entries_if(|_k, &v| v == "bob")?; assert_eq!(cache.invalidation_predicate_count(), 2); + // key 1 was inserted before key 3. + expected.push((Arc::new(1), "bob", RemovalCause::Explicit)); + expected.push((Arc::new(3), "alice", RemovalCause::Explicit)); // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) cache.sync(); // To submit the invalidation task. @@ -1305,16 +1408,35 @@ mod tests { assert_eq!(cache.entry_count(), 0); assert_eq!(cache.invalidation_predicate_count(), 0); + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } + Ok(()) } #[test] fn time_to_live() { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) + .eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); let (clock, mock) = Clock::mock(); @@ -1333,6 +1455,7 @@ mod tests { assert!(cache.contains_key(&"a")); mock.increment(Duration::from_secs(5)); // 10 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert!(!cache.contains_key(&"a")); @@ -1354,6 +1477,7 @@ mod tests { assert_eq!(cache.entry_count(), 1); cache.insert("b", "bill"); + expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); cache.sync(); mock.increment(Duration::from_secs(5)); // 20 secs @@ -1364,6 +1488,7 @@ mod tests { assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 25 secs + expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), None); @@ -1374,15 +1499,34 @@ mod tests { cache.sync(); assert!(cache.is_table_empty()); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[test] fn time_to_idle() { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) + .eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); let (clock, mock) = Clock::mock(); @@ -1418,6 +1562,8 @@ mod tests { assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(3)); // 15 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); + assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), Some("bob")); assert!(!cache.contains_key(&"a")); @@ -1429,6 +1575,8 @@ mod tests { assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(10)); // 25 secs + expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); + assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"a")); @@ -1438,6 +1586,16 @@ mod tests { cache.sync(); assert!(cache.is_table_empty()); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[test] @@ -1943,13 +2101,20 @@ mod tests { #[test] fn test_removal_notifications() { - // These `Vec`s will hold actual and expected notifications. + // NOTE: The following tests also check the notifications: + // - basic_single_thread + // - size_aware_eviction + // - invalidate_entries_if + // - time_to_live + // - time_to_idle + + // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. - let n1 = Arc::clone(&actual); - let listener = move |k, v, cause| n1.lock().push((k, v, cause)); + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); // Create a cache with the eviction listener. let mut cache = Cache::builder() diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 4f2fc32e..6bcb8923 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -1,7 +1,7 @@ use super::{ invalidator::{GetOrRemoveEntry, InvalidationResult, Invalidator, KeyDateLite, PredicateFun}, iter::ScanningGet, - removal_notifier::RemovalNotifier, + removal_notifier::{RemovalNotifier, RemovedEntry}, PredicateId, }; @@ -474,6 +474,53 @@ where } } +struct EvictionState { + counters: EvictionCounters, + removed_entries: Option>>, +} + +impl EvictionState { + fn new(entry_count: u64, weighted_size: u64, is_notifier_enabled: bool) -> Self { + let removed_entries = if is_notifier_enabled { + Some(Vec::new()) + } else { + None + }; + Self { + counters: EvictionCounters::new(entry_count, weighted_size), + removed_entries, + } + } + + fn is_notifier_enabled(&self) -> bool { + self.removed_entries.is_some() + } + + fn add_removed_entry( + &mut self, + key: Arc, + entry: &TrioArc>, + cause: RemovalCause, + ) where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + { + if let Some(removed) = &mut self.removed_entries { + removed.push(RemovedEntry::new(key, entry.value.clone(), cause)); + } + } + + fn notify_multiple_removals(&mut self, notifier: &RemovalNotifier) + where + K: Send + Sync + 'static, + V: Send + Sync + 'static, + { + if let Some(removed) = self.removed_entries.take() { + notifier.add_multiple_notifications(removed) + } + } +} + struct EvictionCounters { entry_count: u64, weighted_size: u64, @@ -846,7 +893,8 @@ where let current_ec = self.entry_count.load(); let current_ws = self.weighted_size.load(); - let mut counters = EvictionCounters::new(current_ec, current_ws); + let mut eviction_state = + EvictionState::new(current_ec, current_ws, self.is_removal_notifier_enabled()); while should_sync && calls <= max_repeats { let r_len = self.read_op_ch.len(); @@ -856,11 +904,11 @@ where let w_len = self.write_op_ch.len(); if w_len > 0 { - self.apply_writes(&mut deqs, w_len, &mut counters); + self.apply_writes(&mut deqs, w_len, &mut eviction_state); } - if self.should_enable_frequency_sketch(&counters) { - self.enable_frequency_sketch(&counters); + if self.should_enable_frequency_sketch(&eviction_state.counters) { + self.enable_frequency_sketch(&eviction_state.counters); } calls += 1; @@ -869,7 +917,11 @@ where } if self.has_expiry() || self.has_valid_after() { - self.evict_expired(&mut deqs, batch_size::EVICTION_BATCH_SIZE, &mut counters); + self.evict_expired( + &mut deqs, + batch_size::EVICTION_BATCH_SIZE, + &mut eviction_state, + ); } if self.invalidator_enabled { @@ -879,31 +931,33 @@ where invalidator, &mut deqs, batch_size::INVALIDATION_BATCH_SIZE, - &mut counters, + &mut eviction_state, ); } } } // Evict if this cache has more entries than its capacity. - let weights_to_evict = self.weights_to_evict(&counters); + let weights_to_evict = self.weights_to_evict(&eviction_state.counters); if weights_to_evict > 0 { self.evict_lru_entries( &mut deqs, batch_size::EVICTION_BATCH_SIZE, weights_to_evict, - &mut counters, + &mut eviction_state, ); } if let Some(notifier) = &self.removal_notifier { + eviction_state.notify_multiple_removals(notifier); notifier.submit_task(); } debug_assert_eq!(self.entry_count.load(), current_ec); debug_assert_eq!(self.weighted_size.load(), current_ws); - self.entry_count.store(counters.entry_count); - self.weighted_size.store(counters.weighted_size); + self.entry_count.store(eviction_state.counters.entry_count); + self.weighted_size + .store(eviction_state.counters.weighted_size); if should_sync { Some(SyncPace::Fast) @@ -992,8 +1046,12 @@ where } } - fn apply_writes(&self, deqs: &mut Deques, count: usize, counters: &mut EvictionCounters) - where + fn apply_writes( + &self, + deqs: &mut Deques, + count: usize, + eviction_state: &mut EvictionState, + ) where V: Clone, { use WriteOp::*; @@ -1008,11 +1066,18 @@ where value_entry: entry, old_weight, new_weight, - }) => { - self.handle_upsert(kh, entry, old_weight, new_weight, ts, deqs, &freq, counters) - } + }) => self.handle_upsert( + kh, + entry, + old_weight, + new_weight, + ts, + deqs, + &freq, + eviction_state, + ), Ok(Remove(KvEntry { key: _key, entry })) => { - Self::handle_remove(deqs, entry, counters) + Self::handle_remove(deqs, entry, &mut eviction_state.counters) } Err(_) => break, }; @@ -1029,27 +1094,31 @@ where timestamp: Instant, deqs: &mut Deques, freq: &FrequencySketch, - counters: &mut EvictionCounters, + eviction_state: &mut EvictionState, ) where V: Clone, { entry.set_last_accessed(timestamp); entry.set_last_modified(timestamp); - if entry.is_admitted() { - // The entry has been already admitted, so treat this as an update. - counters.saturating_sub(0, old_weight); - counters.saturating_add(0, new_weight); - deqs.move_to_back_ao(&entry); - deqs.move_to_back_wo(&entry); - return; - } + { + let counters = &mut eviction_state.counters; + + if entry.is_admitted() { + // The entry has been already admitted, so treat this as an update. + counters.saturating_sub(0, old_weight); + counters.saturating_add(0, new_weight); + deqs.move_to_back_ao(&entry); + deqs.move_to_back_wo(&entry); + return; + } - if self.has_enough_capacity(new_weight, counters) { - // There are enough room in the cache (or the cache is unbounded). - // Add the candidate to the deques. - self.handle_admit(kh, &entry, new_weight, deqs, counters); - return; + if self.has_enough_capacity(new_weight, counters) { + // There are enough room in the cache (or the cache is unbounded). + // Add the candidate to the deques. + self.handle_admit(kh, &entry, new_weight, deqs, counters); + return; + } } if let Some(max) = self.max_capacity { @@ -1057,8 +1126,10 @@ where // The candidate is too big to fit in the cache. Reject it. let removed = self.cache.remove(&Arc::clone(&kh.key), kh.hash); if let Some(entry) = removed { - let key = Arc::clone(&kh.key); - self.notify_single_removal(key, &entry, RemovalCause::Size); + if eviction_state.is_notifier_enabled() { + let key = Arc::clone(&kh.key); + eviction_state.add_removed_entry(key, &entry, RemovalCause::Size); + } } return; } @@ -1080,11 +1151,15 @@ where if let Some((vic_key, vic_entry)) = self.cache.remove_entry(element.key(), element.hash()) { - if self.removal_notifier.is_some() { - self.notify_single_removal(vic_key, &vic_entry, RemovalCause::Size); + if eviction_state.is_notifier_enabled() { + eviction_state.add_removed_entry( + vic_key, + &vic_entry, + RemovalCause::Size, + ); } // And then remove the victim from the deques. - Self::handle_remove(deqs, vic_entry, counters); + Self::handle_remove(deqs, vic_entry, &mut eviction_state.counters); } else { // Could not remove the victim from the cache. Skip this // victim node as its ValueEntry might have been @@ -1095,15 +1170,15 @@ where skipped_nodes = skipped; // Add the candidate to the deques. - self.handle_admit(kh, &entry, new_weight, deqs, counters); + self.handle_admit(kh, &entry, new_weight, deqs, &mut eviction_state.counters); } AdmissionResult::Rejected { skipped_nodes: s } => { skipped_nodes = s; // Remove the candidate from the cache (hash map). let key = Arc::clone(&kh.key); self.cache.remove(&key, kh.hash); - if self.removal_notifier.is_some() { - self.notify_single_removal(key, &entry, RemovalCause::Size); + if eviction_state.is_notifier_enabled() { + eviction_state.add_removed_entry(key, &entry, RemovalCause::Size); } } }; @@ -1253,12 +1328,14 @@ where &self, deqs: &mut Deques, batch_size: usize, - counters: &mut EvictionCounters, - ) { + eviction_state: &mut EvictionState, + ) where + V: Clone, + { let now = self.current_time_from_expiration_clock(); if self.is_write_order_queue_enabled() { - self.remove_expired_wo(deqs, batch_size, now, counters); + self.remove_expired_wo(deqs, batch_size, now, eviction_state); } if self.time_to_idle.is_some() || self.has_valid_after() { @@ -1270,7 +1347,7 @@ where ); let mut rm_expired_ao = - |name, deq| self.remove_expired_ao(name, deq, wo, batch_size, now, counters); + |name, deq| self.remove_expired_ao(name, deq, wo, batch_size, now, eviction_state); rm_expired_ao("window", window); rm_expired_ao("probation", probation); @@ -1286,8 +1363,10 @@ where write_order_deq: &mut Deque>, batch_size: usize, now: Instant, - counters: &mut EvictionCounters, - ) { + eviction_state: &mut EvictionState, + ) where + V: Clone, + { let tti = &self.time_to_idle; let va = &self.valid_after(); for _ in 0..batch_size { @@ -1315,7 +1394,17 @@ where .remove_if(key, hash, |_, v| is_expired_entry_ao(tti, va, v, now)); if let Some(entry) = maybe_entry { - Self::handle_remove_with_deques(deq_name, deq, write_order_deq, entry, counters); + if eviction_state.is_notifier_enabled() { + let key = Arc::clone(key); + eviction_state.add_removed_entry(key, &entry, RemovalCause::Expired); + } + Self::handle_remove_with_deques( + deq_name, + deq, + write_order_deq, + entry, + &mut eviction_state.counters, + ); } else if !self.try_skip_updated_entry(key, hash, deq_name, deq, write_order_deq) { break; } @@ -1360,8 +1449,10 @@ where deqs: &mut Deques, batch_size: usize, now: Instant, - counters: &mut EvictionCounters, - ) { + eviction_state: &mut EvictionState, + ) where + V: Clone, + { let ttl = &self.time_to_live; let va = &self.valid_after(); for _ in 0..batch_size { @@ -1385,7 +1476,11 @@ where .remove_if(key, hash, |_, v| is_expired_entry_wo(ttl, va, v, now)); if let Some(entry) = maybe_entry { - Self::handle_remove(deqs, entry, counters); + if eviction_state.is_notifier_enabled() { + let key = Arc::clone(key); + eviction_state.add_removed_entry(key, &entry, RemovalCause::Expired); + } + Self::handle_remove(deqs, entry, &mut eviction_state.counters); } else if let Some(entry) = self.cache.get(key, hash) { if entry.last_modified().is_none() { deqs.move_to_back_ao(&entry); @@ -1412,9 +1507,11 @@ where invalidator: &Invalidator, deqs: &mut Deques, batch_size: usize, - counters: &mut EvictionCounters, - ) { - self.process_invalidation_result(invalidator, deqs, counters); + eviction_state: &mut EvictionState, + ) where + V: Clone, + { + self.process_invalidation_result(invalidator, deqs, eviction_state); self.submit_invalidation_task(invalidator, &mut deqs.write_order, batch_size); } @@ -1422,15 +1519,20 @@ where &self, invalidator: &Invalidator, deqs: &mut Deques, - counters: &mut EvictionCounters, - ) { + eviction_state: &mut EvictionState, + ) where + V: Clone, + { if let Some(InvalidationResult { invalidated, is_done, }) = invalidator.task_result() { - for KvEntry { key: _, entry } in invalidated { - Self::handle_remove(deqs, entry, counters); + for KvEntry { key, entry } in invalidated { + if eviction_state.is_notifier_enabled() { + eviction_state.add_removed_entry(key, &entry, RemovalCause::Explicit); + } + Self::handle_remove(deqs, entry, &mut eviction_state.counters); } if is_done { deqs.write_order.reset_cursor(); @@ -1481,8 +1583,10 @@ where deqs: &mut Deques, batch_size: usize, weights_to_evict: u64, - counters: &mut EvictionCounters, - ) { + eviction_state: &mut EvictionState, + ) where + V: Clone, + { const DEQ_NAME: &str = "probation"; let mut evicted = 0u64; let (deq, write_order_deq) = (&mut deqs.probation, &mut deqs.write_order); @@ -1521,8 +1625,17 @@ where }); if let Some(entry) = maybe_entry { + if eviction_state.is_notifier_enabled() { + eviction_state.add_removed_entry(key, &entry, RemovalCause::Size); + } let weight = entry.policy_weight(); - Self::handle_remove_with_deques(DEQ_NAME, deq, write_order_deq, entry, counters); + Self::handle_remove_with_deques( + DEQ_NAME, + deq, + write_order_deq, + entry, + &mut eviction_state.counters, + ); evicted = evicted.saturating_add(weight as u64); } else if !self.try_skip_updated_entry(&key, hash, DEQ_NAME, deq, write_order_deq) { break; diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index c309fd2c..103cf84e 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -69,11 +69,11 @@ where self.submit_task_if_necessary(); } - // pub(crate) fn add_multiple_notifications(&self, entries: Vec>) { - // let entries = RemovedEntries::new_multi(entries); - // self.snd.send(entries).unwrap(); // TODO: Error handling? - // self.submit_task_if_necessary(); - // } + pub(crate) fn add_multiple_notifications(&self, entries: Vec>) { + let entries = RemovedEntries::new_multi(entries); + self.snd.send(entries).unwrap(); // TODO: Error handling? + self.submit_task_if_necessary(); + } pub(crate) fn submit_task(&self) { // TODO: Use compare and exchange to ensure it was false. @@ -109,25 +109,24 @@ impl NotificationTask { fn execute(&self) { let task_lock = self.state.task_lock.lock(); - // let mut listener = self.state.listener.lock(); let mut count = 0u16; while let Ok(entries) = self.state.rcv.try_recv() { match entries { RemovedEntries::Single(entry) => { - // self.notify(&mut *listener, entry); self.notify(&self.state.listener, entry); count += 1; - } // RemovedEntries::Multi(entries) => { - // for entry in entries { - // self.notify(&mut *listener, entry); - // count += 1; - - // if self.state.is_shutting_down() { - // break; - // } - // } - // } + } + RemovedEntries::Multi(entries) => { + for entry in entries { + self.notify(&self.state.listener, entry); + count += 1; + + if self.state.is_shutting_down() { + break; + } + } + } } if count > MAX_NOTIFICATIONS_PER_TASK || self.state.is_shutting_down() { @@ -135,7 +134,6 @@ impl NotificationTask { } } - // std::mem::drop(listener); std::mem::drop(task_lock); self.state.set_running(false); } @@ -194,7 +192,7 @@ impl RemovedEntry { enum RemovedEntries { Single(RemovedEntry), - // Multi(Vec>), + Multi(Vec>), } impl RemovedEntries { @@ -202,7 +200,7 @@ impl RemovedEntries { Self::Single(RemovedEntry::new(key, value, cause)) } - // fn new_multi(entries: Vec>) -> Self { - // Self::Multi(entries) - // } + fn new_multi(entries: Vec>) -> Self { + Self::Multi(entries) + } } From 14ba86a0d8d97b77371ea877fb95ba52ce910ca9 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 31 May 2022 21:35:45 +0800 Subject: [PATCH 11/44] Support notification on eviction - Fix the `RemovalCause` for entries removed by `invalidate_all`. --- src/sync/cache.rs | 10 +-- src/sync_base/base_cache.rs | 152 +++++++++++++++++++++++++----------- 2 files changed, 110 insertions(+), 52 deletions(-) diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 96364fdf..bbed854d 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1281,13 +1281,9 @@ mod tests { cache.sync(); cache.invalidate_all(); - // expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); - // expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); - // expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); - expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); - expected.push((Arc::new("c"), "cindy", RemovalCause::Expired)); - + expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); cache.sync(); cache.insert("d", "david"); diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 6bcb8923..f8c3d75c 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -1371,19 +1371,30 @@ where let va = &self.valid_after(); for _ in 0..batch_size { // Peek the front node of the deque and check if it is expired. - let key_hash = deq.peek_front().and_then(|node| { - if is_expired_entry_ao(tti, va, &*node, now) { - Some((Arc::clone(node.element.key()), node.element.hash())) - } else { - None + let key_hash_cause = deq.peek_front().and_then(|node| { + match is_entry_expired_ao_or_invalid(tti, va, &*node, now) { + (true, _) => Some(( + Arc::clone(node.element.key()), + node.element.hash(), + RemovalCause::Expired, + )), + (false, true) => Some(( + Arc::clone(node.element.key()), + node.element.hash(), + RemovalCause::Explicit, + )), + (false, false) => None, } }); - if key_hash.is_none() { + if key_hash_cause.is_none() { break; } - let (key, hash) = key_hash.as_ref().map(|(k, h)| (k, *h)).unwrap(); + let (key, hash, cause) = key_hash_cause + .as_ref() + .map(|(k, h, c)| (k, *h, *c)) + .unwrap(); // Remove the key from the map only when the entry is really // expired. This check is needed because it is possible that the entry in @@ -1396,7 +1407,7 @@ where if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { let key = Arc::clone(key); - eviction_state.add_removed_entry(key, &entry, RemovalCause::Expired); + eviction_state.add_removed_entry(key, &entry, cause); } Self::handle_remove_with_deques( deq_name, @@ -1456,19 +1467,22 @@ where let ttl = &self.time_to_live; let va = &self.valid_after(); for _ in 0..batch_size { - let key = deqs.write_order.peek_front().and_then(|node| { - if is_expired_entry_wo(ttl, va, &*node, now) { - Some(Arc::clone(node.element.key())) - } else { - None - } - }); + let key_cause = + deqs.write_order.peek_front().and_then( + |node| match is_entry_expired_wo_or_invalid(ttl, va, &*node, now) { + (true, _) => Some((Arc::clone(node.element.key()), RemovalCause::Expired)), + (false, true) => { + Some((Arc::clone(node.element.key()), RemovalCause::Explicit)) + } + (false, false) => None, + }, + ); - if key.is_none() { + if key_cause.is_none() { break; } - let key = key.as_ref().unwrap(); + let (key, cause) = key_cause.as_ref().unwrap(); let hash = self.hash(key); let maybe_entry = self @@ -1478,7 +1492,7 @@ where if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { let key = Arc::clone(key); - eviction_state.add_removed_entry(key, &entry, RemovalCause::Expired); + eviction_state.add_removed_entry(key, &entry, *cause); } Self::handle_remove(deqs, entry, &mut eviction_state.counters); } else if let Some(entry) = self.cache.get(key, hash) { @@ -1659,12 +1673,6 @@ where notifier.add_single_notification(key, entry.value.clone(), cause) } } - - // fn notify_multiple_removals(&self, entries: Vec>) { - // if let Some(notifier) = &self.removal_notifier { - // notifier.add_multiple_notifications(entries) - // } - // } } // @@ -1707,17 +1715,8 @@ fn is_expired_entry_ao( now: Instant, ) -> bool { if let Some(ts) = entry.last_accessed() { - if let Some(va) = valid_after { - if ts < *va { - return true; - } - } - if let Some(tti) = time_to_idle { - let checked_add = ts.checked_add(*tti); - if checked_add.is_none() { - panic!("ttl overflow") - } - return checked_add.unwrap() <= now; + if is_invalid_entry(valid_after, ts) || is_expired_by_tti(time_to_idle, ts, now) { + return true; } } false @@ -1731,18 +1730,81 @@ fn is_expired_entry_wo( now: Instant, ) -> bool { if let Some(ts) = entry.last_modified() { - if let Some(va) = valid_after { - if ts < *va { - return true; - } + if is_invalid_entry(valid_after, ts) || is_expired_by_ttl(time_to_live, ts, now) { + return true; } - if let Some(ttl) = time_to_live { - let checked_add = ts.checked_add(*ttl); - if checked_add.is_none() { - panic!("ttl overflow"); - } - return checked_add.unwrap() <= now; + } + false +} + +#[inline] +fn is_entry_expired_ao_or_invalid( + time_to_idle: &Option, + valid_after: &Option, + entry: &impl AccessTime, + now: Instant, +) -> (bool, bool) { + if let Some(ts) = entry.last_accessed() { + let expired = is_expired_by_tti(time_to_idle, ts, now); + let invalid = is_invalid_entry(valid_after, ts); + return (expired, invalid); + } + (false, false) +} + +#[inline] +fn is_entry_expired_wo_or_invalid( + time_to_live: &Option, + valid_after: &Option, + entry: &impl AccessTime, + now: Instant, +) -> (bool, bool) { + if let Some(ts) = entry.last_modified() { + let expired = is_expired_by_ttl(time_to_live, ts, now); + let invalid = is_invalid_entry(valid_after, ts); + return (expired, invalid); + } + (false, false) +} + +#[inline] +fn is_invalid_entry(valid_after: &Option, entry_ts: Instant) -> bool { + if let Some(va) = valid_after { + if entry_ts < *va { + return true; + } + } + false +} + +#[inline] +fn is_expired_by_tti( + time_to_idle: &Option, + entry_last_accessed: Instant, + now: Instant, +) -> bool { + if let Some(tti) = time_to_idle { + let checked_add = entry_last_accessed.checked_add(*tti); + if checked_add.is_none() { + panic!("tti overflow") + } + return checked_add.unwrap() <= now; + } + false +} + +#[inline] +fn is_expired_by_ttl( + time_to_live: &Option, + entry_last_modified: Instant, + now: Instant, +) -> bool { + if let Some(ttl) = time_to_live { + let checked_add = entry_last_modified.checked_add(*ttl); + if checked_add.is_none() { + panic!("ttl overflow"); } + return checked_add.unwrap() <= now; } false } From 7fa072b0fef2c4bb6b32fc028da47f383c4e9c17 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 31 May 2022 22:58:23 +0800 Subject: [PATCH 12/44] Support notification on eviction - Add unit tests to `sync::SegmentedCache`. --- src/sync/builder.rs | 1 - src/sync/cache.rs | 3 +- src/sync/segment.rs | 131 +++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 129 insertions(+), 6 deletions(-) diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 2a573b21..46d379a7 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -11,7 +11,6 @@ use std::{ sync::Arc, time::Duration, }; -// use parking_lot::Mutex; /// Builds a [`Cache`][cache-struct] or [`SegmentedCache`][seg-cache-struct] /// with various configuration knobs. diff --git a/src/sync/cache.rs b/src/sync/cache.rs index bbed854d..eb974783 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -999,11 +999,10 @@ where // To see the debug prints, run test as `cargo test -- --nocapture` #[cfg(test)] mod tests { - use parking_lot::Mutex; - use super::{Cache, ConcurrentCacheExt}; use crate::{common::time::Clock, notification::RemovalCause}; + use parking_lot::Mutex; use std::{convert::Infallible, sync::Arc, time::Duration}; #[test] diff --git a/src/sync/segment.rs b/src/sync/segment.rs index ceedb175..14f28301 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -647,11 +647,25 @@ where #[cfg(test)] mod tests { use super::{ConcurrentCacheExt, SegmentedCache}; + use crate::notification::RemovalCause; + use parking_lot::Mutex; use std::{sync::Arc, time::Duration}; #[test] fn basic_single_thread() { - let mut cache = SegmentedCache::new(3, 1); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(1) + .max_capacity(3) + .eviction_listener(listener) + .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. @@ -681,11 +695,13 @@ mod tests { // "d" should not be admitted because its frequency is too low. cache.insert("d", "david"); // count: d -> 0 + expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", "david"); + expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.sync(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 @@ -693,6 +709,7 @@ mod tests { // "d" should be admitted and "c" should be evicted // because d's frequency is higher than c's. cache.insert("d", "dennis"); + expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); @@ -704,8 +721,20 @@ mod tests { assert!(cache.contains_key(&"d")); cache.invalidate(&"b"); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + cache.sync(); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"b")); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[test] @@ -738,9 +767,19 @@ mod tests { let david = ("david", 15); let dennis = ("dennis", 15); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. let mut cache = SegmentedCache::builder(1) .max_capacity(31) .weigher(weigher) + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -773,27 +812,33 @@ mod tests { // "d" must have higher count than 3, which is the aggregated count // of "a" and "c". cache.insert("d", david); // count: d -> 0 + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"d"), None); // d -> 3 assert!(!cache.contains_key(&"d")); cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 4 // Finally "d" should be admitted by evicting "c" and "a". cache.insert("d", dennis); + expected.push((Arc::new("c"), cindy, RemovalCause::Size)); + expected.push((Arc::new("a"), alice, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), Some(bob)); @@ -806,6 +851,8 @@ mod tests { // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). cache.insert("b", bill); + expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); + expected.push((Arc::new("d"), dennis, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"b"), Some(bill)); assert_eq!(cache.get(&"d"), None); @@ -815,6 +862,7 @@ mod tests { // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). cache.insert("a", alice); cache.insert("b", bob); + expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); cache.sync(); assert_eq!(cache.get(&"a"), Some(alice)); assert_eq!(cache.get(&"b"), Some(bob)); @@ -826,6 +874,16 @@ mod tests { // Verify the sizes. assert_eq!(cache.entry_count(), 2); assert_eq!(cache.weighted_size(), 25); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[test] @@ -865,7 +923,24 @@ mod tests { #[test] fn invalidate_all() { - let mut cache = SegmentedCache::new(100, 4); + use std::collections::HashMap; + + // The following `HashMap`s will hold actual and expected notifications. + // Note: We use `HashMap` here as the order of invalidations is non-deterministic. + let actual = Arc::new(Mutex::new(HashMap::new())); + let mut expected = HashMap::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| { + a1.lock().insert(k, (v, cause)); + }; + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(4) + .max_capacity(100) + .eviction_listener(listener) + .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. @@ -883,6 +958,9 @@ mod tests { cache.sync(); cache.invalidate_all(); + expected.insert(Arc::new("a"), ("alice", RemovalCause::Explicit)); + expected.insert(Arc::new("b"), ("bob", RemovalCause::Explicit)); + expected.insert(Arc::new("c"), ("cindy", RemovalCause::Explicit)); cache.sync(); cache.insert("d", "david"); @@ -896,17 +974,45 @@ mod tests { assert!(!cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for actual_key in actual.keys() { + assert_eq!( + actual.get(actual_key), + expected.get(actual_key), + "expected[{}]", + actual_key + ); + } } #[test] fn invalidate_entries_if() -> Result<(), Box> { - use std::collections::HashSet; + use std::collections::{HashMap, HashSet}; const SEGMENTS: usize = 4; + // The following `HashMap`s will hold actual and expected notifications. + // Note: We use `HashMap` here as the order of invalidations is non-deterministic. + let actual = Arc::new(Mutex::new(HashMap::new())); + let mut expected = HashMap::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| { + a1.lock().insert(k, (v, cause)); + }; + + // Create a cache with the eviction listener. let mut cache = SegmentedCache::builder(SEGMENTS) .max_capacity(100) .support_invalidation_closures() + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -932,6 +1038,8 @@ mod tests { let names = ["alice", "alex"].iter().cloned().collect::>(); cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; assert_eq!(cache.invalidation_predicate_count(), SEGMENTS); + expected.insert(Arc::new(0), ("alice", RemovalCause::Explicit)); + expected.insert(Arc::new(2), ("alex", RemovalCause::Explicit)); mock.increment(Duration::from_secs(5)); // 10 secs from the start. @@ -962,6 +1070,8 @@ mod tests { cache.invalidate_entries_if(|_k, &v| v == "alice")?; cache.invalidate_entries_if(|_k, &v| v == "bob")?; assert_eq!(cache.invalidation_predicate_count(), SEGMENTS * 2); + expected.insert(Arc::new(1), ("bob", RemovalCause::Explicit)); + expected.insert(Arc::new(3), ("alice", RemovalCause::Explicit)); // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) cache.sync(); // To submit the invalidation task. @@ -978,6 +1088,21 @@ mod tests { assert_eq!(cache.entry_count(), 0); assert_eq!(cache.invalidation_predicate_count(), 0); + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for actual_key in actual.keys() { + assert_eq!( + actual.get(actual_key), + expected.get(actual_key), + "expected[{}]", + actual_key + ); + } + Ok(()) } From fa991fa8917b807d55afebe3e12d472acdae0671 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Wed, 1 Jun 2022 07:05:04 +0800 Subject: [PATCH 13/44] Support notification on eviction - Add unit tests to `future::Cache`. --- src/future/cache.rs | 167 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 161 insertions(+), 6 deletions(-) diff --git a/src/future/cache.rs b/src/future/cache.rs index 88d4b6d1..5048a2e4 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1122,14 +1122,27 @@ where #[cfg(test)] mod tests { use super::{Cache, ConcurrentCacheExt}; - use crate::common::time::Clock; + use crate::{common::time::Clock, notification::RemovalCause}; use async_io::Timer; + use parking_lot::Mutex; use std::{convert::Infallible, sync::Arc, time::Duration}; #[tokio::test] async fn basic_single_async_task() { - let mut cache = Cache::new(3); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_listener(listener) + .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. @@ -1159,11 +1172,13 @@ mod tests { // "d" should not be admitted because its frequency is too low. cache.insert("d", "david").await; // count: d -> 0 + expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", "david").await; + expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.sync(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 @@ -1171,6 +1186,7 @@ mod tests { // "d" should be admitted and "c" should be evicted // because d's frequency is higher than c's. cache.insert("d", "dennis").await; + expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); @@ -1182,8 +1198,20 @@ mod tests { assert!(cache.contains_key(&"d")); cache.invalidate(&"b").await; + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + cache.sync(); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"b")); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[test] @@ -1244,7 +1272,20 @@ mod tests { let david = ("david", 15); let dennis = ("dennis", 15); - let mut cache = Cache::builder().max_capacity(31).weigher(weigher).build(); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(31) + .weigher(weigher) + .eviction_listener(listener) + .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. @@ -1276,27 +1317,33 @@ mod tests { // "d" must have higher count than 3, which is the aggregated count // of "a" and "c". cache.insert("d", david).await; // count: d -> 0 + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", david).await; + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 cache.insert("d", david).await; + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"d"), None); // d -> 3 assert!(!cache.contains_key(&"d")); cache.insert("d", david).await; + expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.sync(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 4 // Finally "d" should be admitted by evicting "c" and "a". cache.insert("d", dennis).await; + expected.push((Arc::new("c"), cindy, RemovalCause::Size)); + expected.push((Arc::new("a"), alice, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), Some(bob)); @@ -1309,6 +1356,8 @@ mod tests { // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). cache.insert("b", bill).await; + expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); + expected.push((Arc::new("d"), dennis, RemovalCause::Size)); cache.sync(); assert_eq!(cache.get(&"b"), Some(bill)); assert_eq!(cache.get(&"d"), None); @@ -1318,6 +1367,7 @@ mod tests { // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). cache.insert("a", alice).await; cache.insert("b", bob).await; + expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); cache.sync(); assert_eq!(cache.get(&"a"), Some(alice)); assert_eq!(cache.get(&"b"), Some(bob)); @@ -1329,6 +1379,16 @@ mod tests { // Verify the sizes. assert_eq!(cache.entry_count(), 2); assert_eq!(cache.weighted_size(), 25); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[tokio::test] @@ -1367,7 +1427,19 @@ mod tests { #[tokio::test] async fn invalidate_all() { - let mut cache = Cache::new(100); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .eviction_listener(listener) + .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. @@ -1385,6 +1457,9 @@ mod tests { cache.sync(); cache.invalidate_all(); + expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); cache.sync(); cache.insert("d", "david").await; @@ -1398,15 +1473,35 @@ mod tests { assert!(!cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[tokio::test] async fn invalidate_entries_if() -> Result<(), Box> { use std::collections::HashSet; + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -1434,6 +1529,8 @@ mod tests { let names = ["alice", "alex"].iter().cloned().collect::>(); cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; assert_eq!(cache.invalidation_predicate_count(), 1); + expected.push((Arc::new(0), "alice", RemovalCause::Explicit)); + expected.push((Arc::new(2), "alex", RemovalCause::Explicit)); mock.increment(Duration::from_secs(5)); // 10 secs from the start. @@ -1464,6 +1561,9 @@ mod tests { cache.invalidate_entries_if(|_k, &v| v == "alice")?; cache.invalidate_entries_if(|_k, &v| v == "bob")?; assert_eq!(cache.invalidation_predicate_count(), 2); + // key 1 was inserted before key 3. + expected.push((Arc::new(1), "bob", RemovalCause::Explicit)); + expected.push((Arc::new(3), "alice", RemovalCause::Explicit)); // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) cache.sync(); // To submit the invalidation task. @@ -1480,16 +1580,35 @@ mod tests { assert_eq!(cache.entry_count(), 0); assert_eq!(cache.invalidation_predicate_count(), 0); + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } + Ok(()) } #[tokio::test] async fn time_to_live() { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) + .eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); let (clock, mock) = Clock::mock(); @@ -1508,6 +1627,7 @@ mod tests { assert!(cache.contains_key(&"a")); mock.increment(Duration::from_secs(5)); // 10 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert!(!cache.contains_key(&"a")); @@ -1529,6 +1649,7 @@ mod tests { assert_eq!(cache.entry_count(), 1); cache.insert("b", "bill").await; + expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); cache.sync(); mock.increment(Duration::from_secs(5)); // 20 secs @@ -1539,6 +1660,7 @@ mod tests { assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 25 secs + expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), None); @@ -1549,15 +1671,34 @@ mod tests { cache.sync(); assert!(cache.is_table_empty()); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[tokio::test] async fn time_to_idle() { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) + .eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); let (clock, mock) = Clock::mock(); @@ -1593,6 +1734,8 @@ mod tests { assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(3)); // 15 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); + assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), Some("bob")); assert!(!cache.contains_key(&"a")); @@ -1604,6 +1747,8 @@ mod tests { assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(10)); // 25 secs + expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); + assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"a")); @@ -1613,6 +1758,16 @@ mod tests { cache.sync(); assert!(cache.is_table_empty()); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } } #[tokio::test] From ea67182a3fe4e4ec3780cc2980f056731c36b88c Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Wed, 1 Jun 2022 22:51:51 +0800 Subject: [PATCH 14/44] Support notification on eviction - Docs: Write code examples for eviction listener. - Update the dev dependencies: - Tokio v1.16 -> v1.18. - Add anyhow v1.0. --- Cargo.toml | 3 +- src/future/builder.rs | 164 +++++++++++++++++++++++++++++++++++++++++- src/lib.rs | 2 + src/sync/builder.rs | 163 ++++++++++++++++++++++++++++++++++++++++- 4 files changed, 329 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 961447ba..a6465066 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,11 +75,12 @@ futures-util = { version = "0.3", optional = true } [dev-dependencies] actix-rt = { version = "2", default-features = false } +anyhow = "1.0" async-std = { version = "1", default-features = false, features = ["attributes"] } getrandom = "0.2" reqwest = "0.11" skeptic = "0.13" -tokio = { version = "1.16", features = ["rt-multi-thread", "macros", "sync", "time" ] } +tokio = { version = "1.18", features = ["fs", "macros", "rt-multi-thread", "sync", "time" ] } [target.'cfg(trybuild)'.dev-dependencies] trybuild = "1.0" diff --git a/src/future/builder.rs b/src/future/builder.rs index 623c0b6c..4449e5f9 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -17,7 +17,7 @@ use std::{ /// /// [cache-struct]: ./struct.Cache.html /// -/// # Examples +/// # Example: Expirations /// /// ```rust /// // Cargo.toml @@ -53,6 +53,168 @@ use std::{ /// } /// ``` /// +/// # Example: Eviction Listener +/// +/// A `Cache` can be configured with an `eviction_listener`, a closure that is called +/// every time there is a cache eviction. The closure takes the key, value and +/// [`RemovalCause`](../notification/enum.RemovalCause.html) as parameters. It can be +/// used to keep other data structures in sync with the cache. +/// +/// The following example demonstrates how to use a cache with an `eviction_listener` +/// and `time_to_live` to manage the lifecycle of temporary files on a filesystem. +/// The cache stores the paths of the files, and when one of them has +/// expired, the eviction lister will be called with the path, so it can remove the +/// file from the filesystem. +/// +/// ```rust +/// // Cargo.toml +/// // +/// // [dependencies] +/// // anyhow = "1.0" +/// // uuid = { version = "1.1", features = ["v4"] } +/// // tokio = { version = "1.18", features = ["fs", "macros", "rt-multi-thread", "sync", "time"] } +/// +/// use moka::future::Cache; +/// +/// use anyhow::{anyhow, Context}; +/// use std::{ +/// io, +/// path::{Path, PathBuf}, +/// sync::Arc, +/// time::Duration, +/// }; +/// use tokio::{fs, sync::RwLock}; +/// use uuid::Uuid; +/// +/// /// The DataFileManager writes, reads and removes data files. +/// struct DataFileManager { +/// base_dir: PathBuf, +/// file_count: usize, +/// } +/// +/// impl DataFileManager { +/// fn new(base_dir: PathBuf) -> Self { +/// Self { +/// base_dir, +/// file_count: 0, +/// } +/// } +/// +/// async fn write_data_file(&mut self, contents: String) -> io::Result { +/// loop { +/// // Generate a unique file path. +/// let mut path = self.base_dir.to_path_buf(); +/// path.push(Uuid::new_v4().as_hyphenated().to_string()); +/// +/// if path.exists() { +/// continue; // This path is already taken by others. Retry. +/// } +/// +/// // We have got a unique file path, so create the file at +/// // the path and write the contents to the file. +/// fs::write(&path, contents).await?; +/// self.file_count += 1; +/// println!( +/// "Created a data file at {:?} (file count: {})", +/// path, self.file_count +/// ); +/// +/// // Return the path. +/// return Ok(path); +/// } +/// } +/// +/// async fn read_data_file(&self, path: impl AsRef) -> io::Result { +/// // Reads the contents of the file at the path, and return the contents. +/// fs::read_to_string(path).await +/// } +/// +/// async fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { +/// // Remove the file at the path. +/// fs::remove_file(path.as_ref()).await?; +/// self.file_count -= 1; +/// println!( +/// "Removed a data file at {:?} (file count: {})", +/// path.as_ref(), +/// self.file_count +/// ); +/// +/// Ok(()) +/// } +/// } +/// +/// #[tokio::main] +/// async fn main() -> anyhow::Result<()> { +/// // Create an instance of the DataFileManager and wrap it with +/// // Arc> so it can be shared across threads. +/// let file_mgr = DataFileManager::new(std::env::temp_dir()); +/// let file_mgr = Arc::new(RwLock::new(file_mgr)); +/// +/// let file_mgr1 = Arc::clone(&file_mgr); +/// let rt = tokio::runtime::Handle::current(); +/// +/// // Create an eviction lister closure. +/// let listener = move |k, v: PathBuf, cause| { +/// // Try to remove the data file at the path `v`. +/// println!( +/// "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", +/// k, v, cause +/// ); +/// rt.block_on(async { +/// // Acquire the write lock of the DataFileManager. +/// let mut mgr = file_mgr1.write().await; +/// // Remove the data file. We must handle error cases here to +/// // prevent the listener from panicking. +/// if let Err(_e) = mgr.remove_data_file(v.as_path()).await { +/// eprintln!("Failed to remove a data file at {:?}", v); +/// } +/// }); +/// }; +/// +/// // Create the cache. Set time to live for two seconds and set the +/// // eviction listener. +/// let cache = Cache::builder() +/// .max_capacity(100) +/// .time_to_live(Duration::from_secs(2)) +/// .eviction_listener(listener) +/// .build(); +/// +/// // Insert an entry to the cache. +/// // This will create and write a data file for the key "user1", store the +/// // path of the file to the cache, and return it. +/// println!("== try_get_with()"); +/// let path = cache +/// .try_get_with("user1", async { +/// let mut mgr = file_mgr.write().await; +/// let path = mgr +/// .write_data_file("user data".into()) +/// .await +/// .with_context(|| format!("Failed to create a data file"))?; +/// Ok(path) as anyhow::Result<_> +/// }) +/// .await +/// .map_err(|e| anyhow!("{}", e))?; +/// +/// // Read the data file at the path and print the contents. +/// println!("\n== read_data_file()"); +/// { +/// let mgr = file_mgr.read().await; +/// let contents = mgr +/// .read_data_file(path.as_path()) +/// .await +/// .with_context(|| format!("Failed to read data from {:?}", path))?; +/// println!("contents: {}", contents); +/// } +/// +/// // Sleep for five seconds. While sleeping, the cache entry for key "user1" +/// // will be expired and evicted, so the eviction lister will be called to +/// // remove the file. +/// tokio::time::sleep(Duration::from_secs(5)).await; +/// +/// Ok(()) +/// } +/// ``` +/// #[must_use] pub struct CacheBuilder { max_capacity: Option, diff --git a/src/lib.rs b/src/lib.rs index 502debd5..a75165de 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,6 +37,8 @@ //! - Supports expiration policies: //! - Time to live //! - Time to idle +//! - Supports eviction listener, a callback function that will be called when an entry +//! is removed from the cache. //! //! # Examples //! diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 46d379a7..5f3866e7 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -18,7 +18,7 @@ use std::{ /// [cache-struct]: ./struct.Cache.html /// [seg-cache-struct]: ./struct.SegmentedCache.html /// -/// # Examples +/// # Example: Expirations /// /// ```rust /// use moka::sync::Cache; @@ -44,6 +44,167 @@ use std::{ /// // after 30 minutes (TTL) from the insert(). /// ``` /// +/// # Example: Eviction Listener +/// +/// A `Cache` can be configured with an `eviction_listener`, a closure that is called +/// every time there is a cache eviction. The closure takes the key, value and +/// [`RemovalCause`](../notification/enum.RemovalCause.html) as parameters. It can be +/// used to keep other data structures in sync with the cache. +/// +/// The following example demonstrates how to use a cache with an `eviction_listener` +/// and `time_to_live` to manage the lifecycle of temporary files on a filesystem. +/// The cache stores the paths of the files, and when one of them has expired, the +/// eviction lister will be called with the path, so it can remove the file from the +/// filesystem. +/// +/// ```rust +/// // Cargo.toml +/// // +/// // [dependencies] +/// // anyhow = "1.0" +/// // uuid = { version = "1.1", features = ["v4"] } +/// +/// use moka::sync::Cache; +/// +/// use anyhow::{anyhow, Context}; +/// use std::{ +/// fs, io, +/// path::{Path, PathBuf}, +/// sync::{Arc, RwLock}, +/// time::Duration, +/// }; +/// use uuid::Uuid; +/// +/// /// The DataFileManager writes, reads and removes data files. +/// struct DataFileManager { +/// base_dir: PathBuf, +/// file_count: usize, +/// } +/// +/// impl DataFileManager { +/// fn new(base_dir: PathBuf) -> Self { +/// Self { +/// base_dir, +/// file_count: 0, +/// } +/// } +/// +/// fn write_data_file(&mut self, contents: String) -> io::Result { +/// loop { +/// // Generate a unique file path. +/// let mut path = self.base_dir.to_path_buf(); +/// path.push(Uuid::new_v4().as_hyphenated().to_string()); +/// +/// if path.exists() { +/// continue; // This path is already taken by others. Retry. +/// } +/// +/// // We have got a unique file path, so create the file at +/// // the path and write the contents to the file. +/// fs::write(&path, contents)?; +/// self.file_count += 1; +/// println!("Created a data file at {:?} (file count: {})", path, self.file_count); +/// +/// // Return the path. +/// return Ok(path); +/// } +/// } +/// +/// fn read_data_file(&self, path: impl AsRef) -> io::Result { +/// // Reads the contents of the file at the path, and return the contents. +/// fs::read_to_string(path) +/// } +/// +/// fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { +/// // Remove the file at the path. +/// fs::remove_file(path.as_ref())?; +/// self.file_count -= 1; +/// println!( +/// "Removed a data file at {:?} (file count: {})", +/// path.as_ref(), +/// self.file_count +/// ); +/// +/// Ok(()) +/// } +/// } +/// +/// fn main() -> anyhow::Result<()> { +/// // Create an instance of the DataFileManager and wrap it with +/// // Arc> so it can be shared across threads. +/// let file_mgr = DataFileManager::new(std::env::temp_dir()); +/// let file_mgr = Arc::new(RwLock::new(file_mgr)); +/// +/// let file_mgr1 = Arc::clone(&file_mgr); +/// +/// // Create an eviction lister closure. +/// let listener = move |k, v: PathBuf, cause| { +/// // Try to remove the data file at the path `v`. +/// println!( +/// "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", +/// k, v, cause +/// ); +/// +/// // Acquire the write lock of the DataFileManager. We must handle +/// // error cases here to prevent the listener from panicking. +/// match file_mgr1.write() { +/// Err(_e) => { +/// eprintln!("The lock has been poisoned"); +/// } +/// Ok(mut mgr) => { +/// // Remove the data file using the DataFileManager. +/// if let Err(_e) = mgr.remove_data_file(v.as_path()) { +/// eprintln!("Failed to remove a data file at {:?}", v); +/// } +/// } +/// } +/// }; +/// +/// // Create the cache. Set time to live for two seconds and set the +/// // eviction listener. +/// let cache = Cache::builder() +/// .max_capacity(100) +/// .time_to_live(Duration::from_secs(2)) +/// .eviction_listener(listener) +/// .build(); +/// +/// // Insert an entry to the cache. +/// // This will create and write a data file for the key "user1", store the +/// // path of the file to the cache, and return it. +/// println!("== try_get_with()"); +/// let path = cache +/// .try_get_with("user1", || -> anyhow::Result<_, anyhow::Error> { +/// let mut mgr = file_mgr +/// .write() +/// .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; +/// let path = mgr +/// .write_data_file("user data".into()) +/// .with_context(|| format!("Failed to create a data file"))?; +/// Ok(path) +/// }) +/// .map_err(|e| anyhow!("{}", e))?; +/// +/// // Read the data file at the path and print the contents. +/// println!("\n== read_data_file()"); +/// { +/// let mgr = file_mgr +/// .read() +/// .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; +/// let contents = mgr +/// .read_data_file(path.as_path()) +/// .with_context(|| format!("Failed to read data from {:?}", path))?; +/// println!("contents: {}", contents); +/// } +/// +/// // Sleep for five seconds. While sleeping, the cache entry for key "user1" +/// // will be expired and evicted, so the eviction lister will be called to +/// // remove the file. +/// std::thread::sleep(Duration::from_secs(5)); +/// +/// Ok(()) +/// } +/// ``` +/// #[must_use] pub struct CacheBuilder { max_capacity: Option, From 5b3aa781b87cafa4290b1dfe30040d40f8daf0f6 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Wed, 1 Jun 2022 23:00:35 +0800 Subject: [PATCH 15/44] Support notification on eviction Fix a doc test (compile error). --- src/sync/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 5f3866e7..918fd362 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -173,7 +173,7 @@ use std::{ /// // path of the file to the cache, and return it. /// println!("== try_get_with()"); /// let path = cache -/// .try_get_with("user1", || -> anyhow::Result<_, anyhow::Error> { +/// .try_get_with("user1", || -> anyhow::Result<_> { /// let mut mgr = file_mgr /// .write() /// .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; From d8567c9ee24ad070a0ade1f395c83a97eb86b419 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 12 Jun 2022 17:57:36 +0800 Subject: [PATCH 16/44] Support notification on eviction Fix a bug with timing issues that causes to report wrong `RemovalCause` `Replaced` or `Explicit` when `Expired` is appropriate. --- src/cht/segment.rs | 8 +- src/future/cache.rs | 180 +++++++++++++++++++++++++++++++++- src/sync/cache.rs | 106 +++++++++++++++++++- src/sync_base/base_cache.rs | 187 +++++++++++++++++++++++------------- 4 files changed, 404 insertions(+), 77 deletions(-) diff --git a/src/cht/segment.rs b/src/cht/segment.rs index 888e0a58..551aec2d 100644 --- a/src/cht/segment.rs +++ b/src/cht/segment.rs @@ -193,6 +193,10 @@ impl HashMap { } } + pub(crate) fn actual_num_segments(&self) -> usize { + self.segments.len() + } + /// Returns the number of elements in the map. /// /// # Safety @@ -560,10 +564,6 @@ impl HashMap { { bucket::hash(&self.build_hasher, key) } - - pub(crate) fn actual_num_segments(&self) -> usize { - self.segments.len() - } } impl Drop for HashMap { diff --git a/src/future/cache.rs b/src/future/cache.rs index 5048a2e4..7e6e6926 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -8,7 +8,7 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, - notification::{EvictionListener, RemovalCause}, + notification::EvictionListener, sync_base::base_cache::{BaseCache, HouseKeeperArc}, Policy, PredicateError, }; @@ -763,9 +763,7 @@ where let hash = self.base.hash(key); if let Some(kv) = self.base.remove_entry(key, hash) { if self.base.is_removal_notifier_enabled() { - let key = Arc::clone(&kv.key); - self.base - .notify_single_removal(key, &kv.entry, RemovalCause::Explicit); + self.base.notify_invalidate(&kv.key, &kv.entry) } let op = WriteOp::Remove(kv); let hk = self.base.housekeeper.as_ref(); @@ -2324,6 +2322,180 @@ mod tests { ); } + #[tokio::test] + async fn test_removal_notifications() { + // NOTE: The following tests also check the notifications: + // - basic_single_thread + // - size_aware_eviction + // - invalidate_entries_if + // - time_to_live + // - time_to_idle + + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert('a', "alice").await; + cache.invalidate(&'a').await; + expected.push((Arc::new('a'), "alice", RemovalCause::Explicit)); + + cache.sync(); + assert_eq!(cache.entry_count(), 0); + + cache.insert('b', "bob").await; + cache.insert('c', "cathy").await; + cache.insert('d', "david").await; + cache.sync(); + assert_eq!(cache.entry_count(), 3); + + // This will be rejected due to the size constraint. + cache.insert('e', "emily").await; + expected.push((Arc::new('e'), "emily", RemovalCause::Size)); + cache.sync(); + assert_eq!(cache.entry_count(), 3); + + // Raise the popularity of 'e' so it will be accepted next time. + cache.get(&'e'); + cache.sync(); + + // Retry. + cache.insert('e', "eliza").await; + // and the LRU entry will be evicted. + expected.push((Arc::new('b'), "bob", RemovalCause::Size)); + cache.sync(); + assert_eq!(cache.entry_count(), 3); + + // Replace an existing entry. + cache.insert('d', "dennis").await; + expected.push((Arc::new('d'), "david", RemovalCause::Replaced)); + cache.sync(); + assert_eq!(cache.entry_count(), 3); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } + } + + #[tokio::test] + async fn test_removal_notifications_with_updates() { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener and also TTL and TTI. + let mut cache = Cache::builder() + .eviction_listener(listener) + .time_to_live(Duration::from_secs(7)) + .time_to_idle(Duration::from_secs(5)) + .build(); + cache.reconfigure_for_testing(); + + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("alice", "a0").await; + cache.sync(); + + // Now alice (a0) has been expired by the idle timeout (TTI). + mock.increment(Duration::from_secs(6)); + expected.push((Arc::new("alice"), "a0", RemovalCause::Expired)); + assert_eq!(cache.get(&"alice"), None); + + // We have not ran sync after the expiration of alice (a0), so it is + // still in the cache. + assert_eq!(cache.entry_count(), 1); + + // Re-insert alice with a different value. Since alice (a0) is still + // in the cache, this is actually a replace operation rather than an + // insert operation. We want to verify that the RemovalCause of a0 is + // Expired, not Replaced. + cache.insert("alice", "a1").await; + cache.sync(); + + mock.increment(Duration::from_secs(4)); + assert_eq!(cache.get(&"alice"), Some("a1")); + cache.sync(); + + // Now alice has been expired by time-to-live (TTL). + mock.increment(Duration::from_secs(4)); + expected.push((Arc::new("alice"), "a1", RemovalCause::Expired)); + assert_eq!(cache.get(&"alice"), None); + + // But, again, it is still in the cache. + assert_eq!(cache.entry_count(), 1); + + // Re-insert alice with a different value and verify that the + // RemovalCause of a1 is Expired (not Replaced). + cache.insert("alice", "a2").await; + cache.sync(); + + assert_eq!(cache.entry_count(), 1); + + // Now alice (a2) has been expired by the idle timeout. + mock.increment(Duration::from_secs(6)); + expected.push((Arc::new("alice"), "a2", RemovalCause::Expired)); + assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.entry_count(), 1); + + // This invalidate will internally remove alice (a2). + cache.invalidate(&"alice").await; + cache.sync(); + assert_eq!(cache.entry_count(), 0); + + // Re-insert, and this time, make it expired by the TTL. + cache.insert("alice", "a3").await; + cache.sync(); + mock.increment(Duration::from_secs(4)); + assert_eq!(cache.get(&"alice"), Some("a3")); + cache.sync(); + mock.increment(Duration::from_secs(4)); + expected.push((Arc::new("alice"), "a3", RemovalCause::Expired)); + assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.entry_count(), 1); + + // This invalidate will internally remove alice (a2). + cache.invalidate(&"alice").await; + cache.sync(); + assert_eq!(cache.entry_count(), 0); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } + } + #[tokio::test] async fn test_debug_format() { let cache = Cache::new(10); diff --git a/src/sync/cache.rs b/src/sync/cache.rs index eb974783..40e8a7c0 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -8,7 +8,7 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, - notification::{EvictionListener, RemovalCause}, + notification::EvictionListener, sync::{Iter, PredicateId}, sync_base::{ base_cache::{BaseCache, HouseKeeperArc}, @@ -769,9 +769,7 @@ where { if let Some(kv) = self.base.remove_entry(key, hash) { if self.base.is_removal_notifier_enabled() { - let key = Arc::clone(&kv.key); - self.base - .notify_single_removal(key, &kv.entry, RemovalCause::Explicit); + self.base.notify_invalidate(&kv.key, &kv.entry) } let op = WriteOp::Remove(kv); let hk = self.base.housekeeper.as_ref(); @@ -2168,6 +2166,106 @@ mod tests { } } + #[test] + fn test_removal_notifications_with_updates() { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener and also TTL and TTI. + let mut cache = Cache::builder() + .eviction_listener(listener) + .time_to_live(Duration::from_secs(7)) + .time_to_idle(Duration::from_secs(5)) + .build(); + cache.reconfigure_for_testing(); + + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("alice", "a0"); + cache.sync(); + + // Now alice (a0) has been expired by the idle timeout (TTI). + mock.increment(Duration::from_secs(6)); + expected.push((Arc::new("alice"), "a0", RemovalCause::Expired)); + assert_eq!(cache.get(&"alice"), None); + + // We have not ran sync after the expiration of alice (a0), so it is + // still in the cache. + assert_eq!(cache.entry_count(), 1); + + // Re-insert alice with a different value. Since alice (a0) is still + // in the cache, this is actually a replace operation rather than an + // insert operation. We want to verify that the RemovalCause of a0 is + // Expired, not Replaced. + cache.insert("alice", "a1"); + cache.sync(); + + mock.increment(Duration::from_secs(4)); + assert_eq!(cache.get(&"alice"), Some("a1")); + cache.sync(); + + // Now alice has been expired by time-to-live (TTL). + mock.increment(Duration::from_secs(4)); + expected.push((Arc::new("alice"), "a1", RemovalCause::Expired)); + assert_eq!(cache.get(&"alice"), None); + + // But, again, it is still in the cache. + assert_eq!(cache.entry_count(), 1); + + // Re-insert alice with a different value and verify that the + // RemovalCause of a1 is Expired (not Replaced). + cache.insert("alice", "a2"); + cache.sync(); + + assert_eq!(cache.entry_count(), 1); + + // Now alice (a2) has been expired by the idle timeout. + mock.increment(Duration::from_secs(6)); + expected.push((Arc::new("alice"), "a2", RemovalCause::Expired)); + assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.entry_count(), 1); + + // This invalidate will internally remove alice (a2). + cache.invalidate(&"alice"); + cache.sync(); + assert_eq!(cache.entry_count(), 0); + + // Re-insert, and this time, make it expired by the TTL. + cache.insert("alice", "a3"); + cache.sync(); + mock.increment(Duration::from_secs(4)); + assert_eq!(cache.get(&"alice"), Some("a3")); + cache.sync(); + mock.increment(Duration::from_secs(4)); + expected.push((Arc::new("alice"), "a3", RemovalCause::Expired)); + assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.entry_count(), 1); + + // This invalidate will internally remove alice (a2). + cache.invalidate(&"alice"); + cache.sync(); + assert_eq!(cache.entry_count(), 0); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq!(actual.len(), expected.len()); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, &expected, "expected[{}]", i); + } + } + #[test] fn test_debug_format() { let cache = Cache::new(10); diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index f8c3d75c..3dc38780 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -99,16 +99,12 @@ impl BaseCache { self.inner.is_removal_notifier_enabled() } - pub(crate) fn notify_single_removal( - &self, - key: Arc, - entry: &TrioArc>, - cause: RemovalCause, - ) where + pub(crate) fn notify_invalidate(&self, key: &Arc, entry: &TrioArc>) + where K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { - self.inner.notify_single_removal(key, entry, cause) + self.inner.notify_invalidate(key, entry); } #[cfg(feature = "unstable-debug-counters")] @@ -362,11 +358,13 @@ where // prevent this new ValueEntry from being evicted by an expiration policy. // 3. This method will update the policy_weight with the new weight. let old_weight = old_entry.policy_weight(); + let old_timestamps = (old_entry.last_accessed(), old_entry.last_modified()); let entry = self.new_value_entry_from(value.clone(), weight, old_entry); let cnt = op_cnt2.fetch_add(1, Ordering::Relaxed); op2 = Some(( cnt, TrioArc::clone(old_entry), + old_timestamps, WriteOp::Upsert { key_hash: KeyHash::new(Arc::clone(&key), hash), value_entry: TrioArc::clone(&entry), @@ -380,20 +378,29 @@ where match (op1, op2) { (Some((_cnt, ins_op)), None) => ins_op, - (None, Some((_cnt, old_entry, upd_op))) => { + (None, Some((_cnt, old_entry, (old_last_accessed, old_last_modified), upd_op))) => { old_entry.unset_q_nodes(); if self.is_removal_notifier_enabled() { - self.notify_single_removal(key, &old_entry, RemovalCause::Replaced); + self.inner + .notify_upsert(key, &old_entry, old_last_accessed, old_last_modified); } upd_op } - (Some((cnt1, ins_op)), Some((cnt2, old_entry, upd_op))) => { + ( + Some((cnt1, ins_op)), + Some((cnt2, old_entry, (old_last_accessed, old_last_modified), upd_op)), + ) => { if cnt1 > cnt2 { ins_op } else { old_entry.unset_q_nodes(); if self.is_removal_notifier_enabled() { - self.notify_single_removal(key, &old_entry, RemovalCause::Replaced); + self.inner.notify_upsert( + key, + &old_entry, + old_last_accessed, + old_last_modified, + ); } upd_op } @@ -643,6 +650,60 @@ impl Inner { self.frequency_sketch.read().table_size(), ) } + + #[inline] + fn current_time_from_expiration_clock(&self) -> Instant { + if self.has_expiration_clock.load(Ordering::Relaxed) { + Instant::new( + self.expiration_clock + .read() + .as_ref() + .expect("Cannot get the expiration clock") + .now(), + ) + } else { + Instant::now() + } + } + + fn num_cht_segments(&self) -> usize { + self.cache.actual_num_segments() + } + + #[inline] + fn time_to_live(&self) -> Option { + self.time_to_live + } + + #[inline] + fn time_to_idle(&self) -> Option { + self.time_to_idle + } + + #[inline] + fn has_expiry(&self) -> bool { + self.time_to_live.is_some() || self.time_to_idle.is_some() + } + + #[inline] + fn is_write_order_queue_enabled(&self) -> bool { + self.time_to_live.is_some() || self.invalidator_enabled + } + + #[inline] + fn valid_after(&self) -> Option { + self.valid_after.instant() + } + + #[inline] + fn set_valid_after(&self, timestamp: Instant) { + self.valid_after.set_instant(timestamp); + } + + #[inline] + fn has_valid_after(&self) -> bool { + self.valid_after.is_set() + } } // functions/methods used by BaseCache @@ -757,45 +818,6 @@ where self.cache.keys(cht_segment, Arc::clone) } - fn num_cht_segments(&self) -> usize { - self.cache.actual_num_segments() - } - - #[inline] - fn time_to_live(&self) -> Option { - self.time_to_live - } - - #[inline] - fn time_to_idle(&self) -> Option { - self.time_to_idle - } - - #[inline] - fn has_expiry(&self) -> bool { - self.time_to_live.is_some() || self.time_to_idle.is_some() - } - - #[inline] - fn is_write_order_queue_enabled(&self) -> bool { - self.time_to_live.is_some() || self.invalidator_enabled - } - - #[inline] - fn valid_after(&self) -> Option { - self.valid_after.instant() - } - - #[inline] - fn set_valid_after(&self, timestamp: Instant) { - self.valid_after.set_instant(timestamp); - } - - #[inline] - fn has_valid_after(&self) -> bool { - self.valid_after.is_set() - } - #[inline] fn register_invalidation_predicate( &self, @@ -823,21 +845,6 @@ where fn weigh(&self, key: &K, value: &V) -> u32 { self.weigher.as_ref().map(|w| w(key, value)).unwrap_or(1) } - - #[inline] - fn current_time_from_expiration_clock(&self) -> Instant { - if self.has_expiration_clock.load(Ordering::Relaxed) { - Instant::new( - self.expiration_clock - .read() - .as_ref() - .expect("Cannot get the expiration clock") - .now(), - ) - } else { - Instant::now() - } - } } impl GetOrRemoveEntry for Arc> @@ -1673,6 +1680,56 @@ where notifier.add_single_notification(key, entry.value.clone(), cause) } } + + #[inline] + fn notify_upsert( + &self, + key: Arc, + entry: &TrioArc>, + last_accessed: Option, + last_modified: Option, + ) { + let now = self.current_time_from_expiration_clock(); + + let mut cause = RemovalCause::Replaced; + + if let Some(last_accessed) = last_accessed { + if is_expired_by_tti(&self.time_to_idle, last_accessed, now) { + cause = RemovalCause::Expired; + } + } + + if let Some(last_modified) = last_modified { + if is_expired_by_ttl(&self.time_to_live, last_modified, now) { + cause = RemovalCause::Expired; + } else if is_invalid_entry(&self.valid_after(), last_modified) { + cause = RemovalCause::Explicit; + } + } + + self.notify_single_removal(key, entry, cause); + } + + #[inline] + fn notify_invalidate(&self, key: &Arc, entry: &TrioArc>) { + let now = self.current_time_from_expiration_clock(); + + let mut cause = RemovalCause::Explicit; + + if let Some(last_accessed) = entry.last_accessed() { + if is_expired_by_tti(&self.time_to_idle, last_accessed, now) { + cause = RemovalCause::Expired; + } + } + + if let Some(last_modified) = entry.last_modified() { + if is_expired_by_ttl(&self.time_to_live, last_modified, now) { + cause = RemovalCause::Expired; + } + } + + self.notify_single_removal(Arc::clone(key), entry, cause); + } } // From 8adca0ffac50f3844b213392c263fbc35b9fdd8c Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 12 Jun 2022 23:15:42 +0800 Subject: [PATCH 17/44] Support notification on eviction Implement basic part of blocking notifications. --- src/future/builder.rs | 14 +++- src/future/cache.rs | 38 +++++++--- src/notification.rs | 12 ++++ src/sync/builder.rs | 17 ++++- src/sync/cache.rs | 29 +++++--- src/sync/segment.rs | 17 +++-- src/sync_base/base_cache.rs | 112 ++++++++++++++++++++++-------- src/sync_base/removal_notifier.rs | 109 ++++++++++++++++++++++++++--- 8 files changed, 278 insertions(+), 70 deletions(-) diff --git a/src/future/builder.rs b/src/future/builder.rs index 4449e5f9..3c41f1c3 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -1,7 +1,7 @@ use super::Cache; use crate::{ common::{builder_utils, concurrent::Weigher}, - notification::{EvictionListener, RemovalCause}, + notification::{EvictionListener, EvictionNotificationMode, RemovalCause}, }; use std::{ @@ -74,7 +74,7 @@ use std::{ /// // uuid = { version = "1.1", features = ["v4"] } /// // tokio = { version = "1.18", features = ["fs", "macros", "rt-multi-thread", "sync", "time"] } /// -/// use moka::future::Cache; +/// use moka::{future::Cache, notification::EvictionNotificationMode}; /// /// use anyhow::{anyhow, Context}; /// use std::{ @@ -176,7 +176,7 @@ use std::{ /// let cache = Cache::builder() /// .max_capacity(100) /// .time_to_live(Duration::from_secs(2)) -/// .eviction_listener(listener) +/// .eviction_listener(listener, EvictionNotificationMode::NonBlocking) /// .build(); /// /// // Insert an entry to the cache. @@ -221,6 +221,7 @@ pub struct CacheBuilder { initial_capacity: Option, weigher: Option>, eviction_listener: Option>, + eviction_notification_mode: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -238,6 +239,7 @@ where initial_capacity: None, weigher: None, eviction_listener: None, + eviction_notification_mode: None, time_to_live: None, time_to_idle: None, invalidator_enabled: false, @@ -276,6 +278,7 @@ where build_hasher, self.weigher, self.eviction_listener, + self.eviction_notification_mode, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -300,6 +303,7 @@ where hasher, self.weigher, self.eviction_listener, + self.eviction_notification_mode, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -335,12 +339,16 @@ impl CacheBuilder { } } + // TODO: Need to come up with a better interface than always specifying the mode. + pub fn eviction_listener( self, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, + mode: EvictionNotificationMode, ) -> Self { Self { eviction_listener: Some(Arc::new(listener)), + eviction_notification_mode: Some(mode), ..self } } diff --git a/src/future/cache.rs b/src/future/cache.rs index 7e6e6926..74b8e9a0 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -8,7 +8,7 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, - notification::EvictionListener, + notification::{EvictionListener, EvictionNotificationMode}, sync_base::base_cache::{BaseCache, HouseKeeperArc}, Policy, PredicateError, }; @@ -428,6 +428,7 @@ where None, None, None, + None, false, ) } @@ -455,6 +456,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, + eviction_notification_mode: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -466,6 +468,7 @@ where build_hasher.clone(), weigher, eviction_listener, + eviction_notification_mode, time_to_live, time_to_idle, invalidator_enabled, @@ -761,6 +764,10 @@ where Q: Hash + Eq + ?Sized, { let hash = self.base.hash(key); + + // TODO: If blocking removal notification is enabled, lock the key until + // notification is processed by the listener. + if let Some(kv) = self.base.remove_entry(key, hash) { if self.base.is_removal_notifier_enabled() { self.base.notify_invalidate(&kv.key, &kv.entry) @@ -1120,7 +1127,10 @@ where #[cfg(test)] mod tests { use super::{Cache, ConcurrentCacheExt}; - use crate::{common::time::Clock, notification::RemovalCause}; + use crate::{ + common::time::Clock, + notification::{EvictionNotificationMode, RemovalCause}, + }; use async_io::Timer; use parking_lot::Mutex; @@ -1139,7 +1149,8 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::Blocking) + // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1282,7 +1293,8 @@ mod tests { let mut cache = Cache::builder() .max_capacity(31) .weigher(weigher) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::Blocking) + // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1436,7 +1448,8 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::Blocking) + // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1499,7 +1512,8 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::Blocking) + // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1605,7 +1619,8 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::Blocking) + // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1695,7 +1710,8 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::Blocking) + // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -2342,7 +2358,8 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::Blocking) + // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -2408,7 +2425,8 @@ mod tests { // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::Blocking) + // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .build(); diff --git a/src/notification.rs b/src/notification.rs index af131c5b..d8501766 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -11,6 +11,18 @@ pub(crate) type EvictionListenerRef<'a, K, V> = // the notifications, but currently there is no way to know when all entries // have been invalidated and their notifications have been sent. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum EvictionNotificationMode { + Blocking, + NonBlocking, +} + +impl Default for EvictionNotificationMode { + fn default() -> Self { + Self::Blocking + } +} + #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum RemovalCause { /// The entry's expiration timestamp has passed. diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 918fd362..ee7fa40b 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -1,7 +1,7 @@ use super::{Cache, SegmentedCache}; use crate::{ common::{builder_utils, concurrent::Weigher}, - notification::{EvictionListener, RemovalCause}, + notification::{EvictionListener, EvictionNotificationMode, RemovalCause}, }; use std::{ @@ -64,7 +64,7 @@ use std::{ /// // anyhow = "1.0" /// // uuid = { version = "1.1", features = ["v4"] } /// -/// use moka::sync::Cache; +/// use moka::{sync::Cache, notification::EvictionNotificationMode}; /// /// use anyhow::{anyhow, Context}; /// use std::{ @@ -165,7 +165,7 @@ use std::{ /// let cache = Cache::builder() /// .max_capacity(100) /// .time_to_live(Duration::from_secs(2)) -/// .eviction_listener(listener) +/// .eviction_listener(listener, EvictionNotificationMode::NonBlocking) /// .build(); /// /// // Insert an entry to the cache. @@ -212,6 +212,7 @@ pub struct CacheBuilder { num_segments: Option, weigher: Option>, eviction_listener: Option>, + eviction_notification_mode: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -230,6 +231,7 @@ where num_segments: None, weigher: None, eviction_listener: None, + eviction_notification_mode: None, time_to_live: None, time_to_idle: None, invalidator_enabled: false, @@ -269,6 +271,7 @@ where num_segments: Some(num_segments), weigher: None, eviction_listener: None, + eviction_notification_mode: None, time_to_live: self.time_to_live, time_to_idle: self.time_to_idle, invalidator_enabled: self.invalidator_enabled, @@ -295,6 +298,7 @@ where build_hasher, self.weigher, self.eviction_listener, + self.eviction_notification_mode, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -322,6 +326,7 @@ where hasher, self.weigher, self.eviction_listener, + self.eviction_notification_mode, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -354,6 +359,7 @@ where build_hasher, self.weigher, self.eviction_listener, + self.eviction_notification_mode, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -382,6 +388,7 @@ where hasher, self.weigher, self.eviction_listener, + self.eviction_notification_mode, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -417,12 +424,16 @@ impl CacheBuilder { } } + // TODO: Need to come up with a better interface than always specifying the mode. + pub fn eviction_listener( self, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, + mode: EvictionNotificationMode, ) -> Self { Self { eviction_listener: Some(Arc::new(listener)), + eviction_notification_mode: Some(mode), ..self } } diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 40e8a7c0..ab534b71 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -8,7 +8,7 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, - notification::EvictionListener, + notification::{EvictionListener, EvictionNotificationMode}, sync::{Iter, PredicateId}, sync_base::{ base_cache::{BaseCache, HouseKeeperArc}, @@ -372,6 +372,7 @@ where None, None, None, + None, false, ) } @@ -399,6 +400,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, + eviction_notification_mode: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -410,6 +412,7 @@ where build_hasher.clone(), weigher, eviction_listener, + eviction_notification_mode, time_to_live, time_to_idle, invalidator_enabled, @@ -767,6 +770,9 @@ where Arc: Borrow, Q: Hash + Eq + ?Sized, { + // TODO: If blocking removal notification is enabled, lock the key until + // notification is processed by the listener. + if let Some(kv) = self.base.remove_entry(key, hash) { if self.base.is_removal_notifier_enabled() { self.base.notify_invalidate(&kv.key, &kv.entry) @@ -998,7 +1004,10 @@ where #[cfg(test)] mod tests { use super::{Cache, ConcurrentCacheExt}; - use crate::{common::time::Clock, notification::RemovalCause}; + use crate::{ + common::time::Clock, + notification::{EvictionNotificationMode, RemovalCause}, + }; use parking_lot::Mutex; use std::{convert::Infallible, sync::Arc, time::Duration}; @@ -1016,7 +1025,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1112,7 +1121,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(31) .weigher(weigher) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1259,7 +1268,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1322,7 +1331,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1428,7 +1437,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1518,7 +1527,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -2112,7 +2121,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -2178,7 +2187,7 @@ mod tests { // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .build(); diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 14f28301..ebb4cd41 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -1,7 +1,7 @@ use super::{cache::Cache, CacheBuilder, ConcurrentCacheExt}; use crate::{ common::concurrent::Weigher, - notification::EvictionListener, + notification::{EvictionListener, EvictionNotificationMode}, sync_base::iter::{Iter, ScanningGet}, Policy, PredicateError, }; @@ -104,6 +104,7 @@ where None, None, None, + None, false, ) } @@ -204,6 +205,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, + eviction_notification_mode: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -216,6 +218,7 @@ where build_hasher, weigher, eviction_listener, + eviction_notification_mode, time_to_live, time_to_idle, invalidator_enabled, @@ -581,6 +584,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, + eviction_notification_mode: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -602,6 +606,7 @@ where build_hasher.clone(), weigher.as_ref().map(Arc::clone), eviction_listener.as_ref().map(Arc::clone), + eviction_notification_mode.clone(), time_to_live, time_to_idle, invalidator_enabled, @@ -647,7 +652,7 @@ where #[cfg(test)] mod tests { use super::{ConcurrentCacheExt, SegmentedCache}; - use crate::notification::RemovalCause; + use crate::notification::{EvictionNotificationMode, RemovalCause}; use parking_lot::Mutex; use std::{sync::Arc, time::Duration}; @@ -664,7 +669,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = SegmentedCache::builder(1) .max_capacity(3) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -779,7 +784,7 @@ mod tests { let mut cache = SegmentedCache::builder(1) .max_capacity(31) .weigher(weigher) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -939,7 +944,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = SegmentedCache::builder(4) .max_capacity(100) - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1012,7 +1017,7 @@ mod tests { let mut cache = SegmentedCache::builder(SEGMENTS) .max_capacity(100) .support_invalidation_closures() - .eviction_listener(listener) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 3dc38780..9f9fc317 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -1,7 +1,7 @@ use super::{ invalidator::{GetOrRemoveEntry, InvalidationResult, Invalidator, KeyDateLite, PredicateFun}, iter::ScanningGet, - removal_notifier::{RemovalNotifier, RemovedEntry}, + removal_notifier::RemovedEntry, PredicateId, }; @@ -25,7 +25,8 @@ use crate::{ time::{CheckedTimeOps, Clock, Instant}, CacheRegion, }, - notification::{EvictionListener, RemovalCause}, + notification::{EvictionListener, EvictionNotificationMode, RemovalCause}, + sync_base::removal_notifier::RemovalNotifier, Policy, PredicateError, }; @@ -127,6 +128,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, + eviction_notification_mode: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -139,6 +141,7 @@ where build_hasher, weigher, eviction_listener, + eviction_notification_mode, r_rcv, w_rcv, time_to_live, @@ -324,6 +327,10 @@ where let mut op1 = None; let mut op2 = None; + // TODO: If blocking removal notification is enabled, lock the key until + // notification is processed by the listener. (How can we handle async + // lock here for `future::Cache`?) + // Since the cache (cht::SegmentedHashMap) employs optimistic locking // strategy, insert_with_or_modify() may get an insert/modify operation // conflicted with other concurrent hash table operations. In that case, it @@ -481,28 +488,41 @@ where } } -struct EvictionState { +struct EvictionState<'a, K, V> { counters: EvictionCounters, + notifier: Option<&'a RemovalNotifier>, removed_entries: Option>>, } -impl EvictionState { - fn new(entry_count: u64, weighted_size: u64, is_notifier_enabled: bool) -> Self { - let removed_entries = if is_notifier_enabled { - Some(Vec::new()) - } else { - None - }; +impl<'a, K, V> EvictionState<'a, K, V> { + fn new( + entry_count: u64, + weighted_size: u64, + notifier: Option<&'a RemovalNotifier>, + ) -> Self { + let removed_entries = notifier.and_then(|n| { + if n.is_batching_supported() { + Some(Vec::new()) + } else { + None + } + }); + Self { counters: EvictionCounters::new(entry_count, weighted_size), + notifier, removed_entries, } } fn is_notifier_enabled(&self) -> bool { - self.removed_entries.is_some() + self.notifier.is_some() } + // fn is_batch_notification_supported(&self) -> bool { + // self.removed_entries.is_some() + // } + fn add_removed_entry( &mut self, key: Arc, @@ -512,18 +532,23 @@ impl EvictionState { K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { + debug_assert!(self.is_notifier_enabled()); + if let Some(removed) = &mut self.removed_entries { removed.push(RemovedEntry::new(key, entry.value.clone(), cause)); + } else if let Some(notifier) = self.notifier { + notifier.notify(key, entry.value.clone(), cause); } } - fn notify_multiple_removals(&mut self, notifier: &RemovalNotifier) + fn notify_multiple_removals(&mut self) where K: Send + Sync + 'static, V: Send + Sync + 'static, { - if let Some(removed) = self.removed_entries.take() { - notifier.add_multiple_notifications(removed) + if let (Some(notifier), Some(removed)) = (self.notifier, self.removed_entries.take()) { + notifier.batch_notify(removed); + notifier.sync(); } } } @@ -722,6 +747,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, + eviction_notification_mode: Option, read_op_ch: Receiver>, write_op_ch: Receiver>, time_to_live: Option, @@ -737,7 +763,9 @@ where initial_capacity, build_hasher.clone(), ); - let removal_notifier = eviction_listener.map(RemovalNotifier::new); + let removal_notifier = eviction_listener.map(|listener| { + RemovalNotifier::new(listener, eviction_notification_mode.unwrap_or_default()) + }); Self { max_capacity: max_capacity.map(|n| n as u64), @@ -865,6 +893,10 @@ where where F: FnMut(&Arc, &TrioArc>) -> bool, { + // TODO: If blocking removal notification is enabled, lock the key until + // notification is processed b y the listener. (How can we handle async + // lock here for `future::Cache`?) + self.cache.remove_if(key, hash, condition) } } @@ -901,7 +933,7 @@ where let current_ec = self.entry_count.load(); let current_ws = self.weighted_size.load(); let mut eviction_state = - EvictionState::new(current_ec, current_ws, self.is_removal_notifier_enabled()); + EvictionState::new(current_ec, current_ws, self.removal_notifier.as_ref()); while should_sync && calls <= max_repeats { let r_len = self.read_op_ch.len(); @@ -955,10 +987,7 @@ where ); } - if let Some(notifier) = &self.removal_notifier { - eviction_state.notify_multiple_removals(notifier); - notifier.submit_task(); - } + eviction_state.notify_multiple_removals(); debug_assert_eq!(self.entry_count.load(), current_ec); debug_assert_eq!(self.weighted_size.load(), current_ws); @@ -1057,7 +1086,7 @@ where &self, deqs: &mut Deques, count: usize, - eviction_state: &mut EvictionState, + eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { @@ -1101,7 +1130,7 @@ where timestamp: Instant, deqs: &mut Deques, freq: &FrequencySketch, - eviction_state: &mut EvictionState, + eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { @@ -1152,6 +1181,10 @@ where victim_nodes, skipped_nodes: mut skipped, } => { + // TODO: If blocking removal notification is enabled, lock the key until + // notification is processed by the listener. (How can we handle async + // lock here for `future::Cache`?) + // Try to remove the victims from the cache (hash map). for victim in victim_nodes { let element = unsafe { &victim.as_ref().element }; @@ -1181,6 +1214,11 @@ where } AdmissionResult::Rejected { skipped_nodes: s } => { skipped_nodes = s; + + // TODO: If blocking removal notification is enabled, lock the key until + // notification is processed by the listener. (How can we handle async + // lock here for `future::Cache`?) + // Remove the candidate from the cache (hash map). let key = Arc::clone(&kh.key); self.cache.remove(&key, kh.hash); @@ -1335,7 +1373,7 @@ where &self, deqs: &mut Deques, batch_size: usize, - eviction_state: &mut EvictionState, + eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { @@ -1370,7 +1408,7 @@ where write_order_deq: &mut Deque>, batch_size: usize, now: Instant, - eviction_state: &mut EvictionState, + eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { @@ -1403,6 +1441,10 @@ where .map(|(k, h, c)| (k, *h, *c)) .unwrap(); + // TODO: If blocking removal notification is enabled, lock the key until + // notification is processed b y the listener. (How can we handle async + // lock here for `future::Cache`?) + // Remove the key from the map only when the entry is really // expired. This check is needed because it is possible that the entry in // the map has been updated or deleted but its deque node we checked @@ -1467,7 +1509,7 @@ where deqs: &mut Deques, batch_size: usize, now: Instant, - eviction_state: &mut EvictionState, + eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { @@ -1492,6 +1534,10 @@ where let (key, cause) = key_cause.as_ref().unwrap(); let hash = self.hash(key); + // TODO: If blocking removal notification is enabled, lock the key until + // notification is processed by the listener. (How can we handle async + // lock here for `future::Cache`?) + let maybe_entry = self .cache .remove_if(key, hash, |_, v| is_expired_entry_wo(ttl, va, v, now)); @@ -1528,7 +1574,7 @@ where invalidator: &Invalidator, deqs: &mut Deques, batch_size: usize, - eviction_state: &mut EvictionState, + eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { @@ -1540,7 +1586,7 @@ where &self, invalidator: &Invalidator, deqs: &mut Deques, - eviction_state: &mut EvictionState, + eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { @@ -1550,6 +1596,9 @@ where }) = invalidator.task_result() { for KvEntry { key, entry } in invalidated { + // TODO: If blocking removal notification is enabled, process + // notification when they were actually removed. + if eviction_state.is_notifier_enabled() { eviction_state.add_removed_entry(key, &entry, RemovalCause::Explicit); } @@ -1604,7 +1653,7 @@ where deqs: &mut Deques, batch_size: usize, weights_to_evict: u64, - eviction_state: &mut EvictionState, + eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { @@ -1645,6 +1694,10 @@ where } }); + // TODO: If blocking removal notification is enabled, lock the key until + // notification is processed by the listener. (How can we handle async + // lock here for `future::Cache`?) + if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { eviction_state.add_removed_entry(key, &entry, RemovalCause::Size); @@ -1677,7 +1730,7 @@ where cause: RemovalCause, ) { if let Some(notifier) = &self.removal_notifier { - notifier.add_single_notification(key, entry.value.clone(), cause) + notifier.notify(key, entry.value.clone(), cause) } } @@ -1887,6 +1940,7 @@ mod tests { None, None, None, + None, false, ); cache.inner.enable_frequency_sketch_for_testing(); diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index 103cf84e..9aa4d1a4 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -8,7 +8,7 @@ use std::{ use crate::{ common::concurrent::thread_pool::{PoolName, ThreadPool, ThreadPoolRegistry}, - notification::{EvictionListener, EvictionListenerRef, RemovalCause}, + notification::{EvictionListener, EvictionListenerRef, EvictionNotificationMode, RemovalCause}, }; use crossbeam_channel::{Receiver, Sender}; @@ -18,13 +18,104 @@ const CHANNEL_CAPACITY: usize = 1_024; const SUBMIT_TASK_THRESHOLD: usize = 100; const MAX_NOTIFICATIONS_PER_TASK: u16 = 5_000; -pub(crate) struct RemovalNotifier { +pub(crate) enum RemovalNotifier { + Blocking(BlockingRemovalNotifier), + // NonBlocking(NonBlockingRemovalNotifier), + ThreadPool(ThreadPoolRemovalNotifier), +} + +impl RemovalNotifier { + pub(crate) fn new(listener: EvictionListener, mode: EvictionNotificationMode) -> Self { + match mode { + EvictionNotificationMode::Blocking => { + Self::Blocking(BlockingRemovalNotifier::new(listener)) + } + EvictionNotificationMode::NonBlocking => { + Self::ThreadPool(ThreadPoolRemovalNotifier::new(listener)) + } + } + } + + pub(crate) fn is_batching_supported(&self) -> bool { + matches!( + self, + // RemovalNotifier::NonBlocking(_) | RemovalNotifier::ThreadPool(_) + RemovalNotifier::ThreadPool(_) + ) + } + + pub(crate) fn notify(&self, key: Arc, value: V, cause: RemovalCause) + where + K: Send + Sync + 'static, + V: Send + Sync + 'static, + { + match self { + RemovalNotifier::Blocking(notifier) => notifier.notify(key, value, cause), + // RemovalNotifier::NonBlocking(_) => todo!(), + RemovalNotifier::ThreadPool(notifier) => { + notifier.add_single_notification(key, value, cause) + } + } + } + + pub(crate) fn batch_notify(&self, entries: Vec>) + where + K: Send + Sync + 'static, + V: Send + Sync + 'static, + { + match self { + RemovalNotifier::Blocking(_) => unreachable!(), + // RemovalNotifier::NonBlocking(_) => todo!(), + RemovalNotifier::ThreadPool(notifier) => notifier.add_multiple_notifications(entries), + } + } + + pub(crate) fn sync(&self) + where + K: Send + Sync + 'static, + V: Send + Sync + 'static, + { + match self { + RemovalNotifier::Blocking(_) => unreachable!(), + // RemovalNotifier::NonBlocking(_) => todo!(), + RemovalNotifier::ThreadPool(notifier) => notifier.submit_task(), + } + } +} + +pub(crate) struct BlockingRemovalNotifier { + listener: EvictionListener, +} + +impl BlockingRemovalNotifier { + fn new(listener: EvictionListener) -> Self { + Self { listener } + } + + fn notify(&self, key: Arc, value: V, cause: RemovalCause) { + // use std::panic::{catch_unwind, AssertUnwindSafe}; + + (self.listener)(key, value, cause); + + // let listener_clo = || listener(key, value, cause); + // match catch_unwind(AssertUnwindSafe(listener_clo)) { + // Ok(_) => todo!(), + // Err(_) => todo!(), + // } + } +} + +// pub(crate) struct NonBlockingRemovalNotifier { +// _phantom: std::marker::PhantomData<(K, V)>, +// } + +pub(crate) struct ThreadPoolRemovalNotifier { snd: Sender>, state: Arc>, thread_pool: Arc, } -impl Drop for RemovalNotifier { +impl Drop for ThreadPoolRemovalNotifier { fn drop(&mut self) { let state = &self.state; // Disallow to create and run a notification task by now. @@ -39,8 +130,8 @@ impl Drop for RemovalNotifier { } } -impl RemovalNotifier { - pub(crate) fn new(listener: EvictionListener) -> Self { +impl ThreadPoolRemovalNotifier { + fn new(listener: EvictionListener) -> Self { let (snd, rcv) = crossbeam_channel::bounded(CHANNEL_CAPACITY); let thread_pool = ThreadPoolRegistry::acquire_pool(PoolName::RemovalNotifier); let state = NotifierState { @@ -58,24 +149,24 @@ impl RemovalNotifier { } } -impl RemovalNotifier +impl ThreadPoolRemovalNotifier where K: Send + Sync + 'static, V: Send + Sync + 'static, { - pub(crate) fn add_single_notification(&self, key: Arc, value: V, cause: RemovalCause) { + fn add_single_notification(&self, key: Arc, value: V, cause: RemovalCause) { let entry = RemovedEntries::new_single(key, value, cause); self.snd.send(entry).unwrap(); self.submit_task_if_necessary(); } - pub(crate) fn add_multiple_notifications(&self, entries: Vec>) { + fn add_multiple_notifications(&self, entries: Vec>) { let entries = RemovedEntries::new_multi(entries); self.snd.send(entries).unwrap(); // TODO: Error handling? self.submit_task_if_necessary(); } - pub(crate) fn submit_task(&self) { + fn submit_task(&self) { // TODO: Use compare and exchange to ensure it was false. if self.state.is_running() { From b39d6695011cdcf38c7c5c4dd520ff2676f47f53 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 14 Jun 2022 07:53:48 +0800 Subject: [PATCH 18/44] Support notification on eviction Implement the rest of the part of blocking notifications but only to `sync::{Cache, SegmentedCache}`. (Will remove blocking mode from `future::Cache`) - Add `KeyLockMap` for locking a key during updating/removing its entry and processing a blocking notification. - Update the unit tests in `sync::Cache` to test blocking notifications. --- src/future/cache.rs | 24 ++---- src/sync/cache.rs | 44 +++++++--- src/sync_base.rs | 1 + src/sync_base/base_cache.rs | 136 ++++++++++++++++++++++-------- src/sync_base/invalidator.rs | 14 ++- src/sync_base/key_lock.rs | 85 +++++++++++++++++++ src/sync_base/removal_notifier.rs | 4 + 7 files changed, 244 insertions(+), 64 deletions(-) create mode 100644 src/sync_base/key_lock.rs diff --git a/src/future/cache.rs b/src/future/cache.rs index 74b8e9a0..9e0d82d8 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1149,8 +1149,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1293,8 +1292,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(31) .weigher(weigher) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1448,8 +1446,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1512,8 +1509,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() - .eviction_listener(listener, EvictionNotificationMode::Blocking) - // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1619,8 +1615,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -1710,8 +1705,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -2358,8 +2352,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .build(); cache.reconfigure_for_testing(); @@ -2425,8 +2418,7 @@ mod tests { // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() - .eviction_listener(listener, EvictionNotificationMode::Blocking) - // .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::NonBlocking) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .build(); diff --git a/src/sync/cache.rs b/src/sync/cache.rs index ab534b71..f74f808e 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -770,13 +770,36 @@ where Arc: Borrow, Q: Hash + Eq + ?Sized, { - // TODO: If blocking removal notification is enabled, lock the key until - // notification is processed by the listener. + // Lock the key for removal if blocking removal notification is enabled. + let mut kl = None; + let mut klg = None; + if self.base.is_removal_notifier_enabled() && self.base.is_blocking_removal_notification() { + // To lock the key, we have to get Arc for key (&Q). + // + // TODO: Enhance this if possible. This is rather hack now because + // it cannot prevent race conditions like this: + // + // 1. We miss the key because it does not exist. So we do not lock + // the key. + // 2. Somebody else (other thread) inserts the key. + // 3. We remove the entry for the key, but without the key lock! + // + if let Some(arc_key) = self.base.get_key_with_hash(key, hash) { + kl = self.base.maybe_key_lock(&arc_key); + klg = kl.as_ref().map(|kl| kl.lock()); + } + } if let Some(kv) = self.base.remove_entry(key, hash) { if self.base.is_removal_notifier_enabled() { self.base.notify_invalidate(&kv.key, &kv.entry) } + // Drop the locks before scheduling write op to avoid a potential dead lock. + // (Scheduling write can do spin lock when the queue is full, and queue will + // be drained by the housekeeping thread that can lock the same key) + std::mem::drop(klg); + std::mem::drop(kl); + let op = WriteOp::Remove(kv); let hk = self.base.housekeeper.as_ref(); Self::schedule_write_op(&self.base.write_op_ch, op, hk).expect("Failed to remove"); @@ -1025,7 +1048,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::Blocking) .build(); cache.reconfigure_for_testing(); @@ -1121,7 +1144,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(31) .weigher(weigher) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::Blocking) .build(); cache.reconfigure_for_testing(); @@ -1268,7 +1291,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::Blocking) .build(); cache.reconfigure_for_testing(); @@ -1331,7 +1354,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::Blocking) .build(); cache.reconfigure_for_testing(); @@ -1437,7 +1460,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::Blocking) .build(); cache.reconfigure_for_testing(); @@ -1527,7 +1550,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::Blocking) .build(); cache.reconfigure_for_testing(); @@ -2101,6 +2124,7 @@ mod tests { ); } + // TODO: In general, test both blocking and non-blocking notifications. #[test] fn test_removal_notifications() { // NOTE: The following tests also check the notifications: @@ -2121,7 +2145,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::Blocking) .build(); cache.reconfigure_for_testing(); @@ -2187,7 +2211,7 @@ mod tests { // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener, EvictionNotificationMode::Blocking) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .build(); diff --git a/src/sync_base.rs b/src/sync_base.rs index 1447d7e6..f41aa74e 100644 --- a/src/sync_base.rs +++ b/src/sync_base.rs @@ -1,6 +1,7 @@ pub(crate) mod base_cache; mod invalidator; pub(crate) mod iter; +mod key_lock; mod removal_notifier; /// The type of the unique ID to identify a predicate used by diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 9f9fc317..01b8e6ba 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -1,6 +1,7 @@ use super::{ invalidator::{GetOrRemoveEntry, InvalidationResult, Invalidator, KeyDateLite, PredicateFun}, iter::ScanningGet, + key_lock::{KeyLock, KeyLockMap}, removal_notifier::RemovedEntry, PredicateId, }; @@ -100,6 +101,11 @@ impl BaseCache { self.inner.is_removal_notifier_enabled() } + #[inline] + pub(crate) fn is_blocking_removal_notification(&self) -> bool { + self.inner.is_blocking_removal_notification() + } + pub(crate) fn notify_invalidate(&self, key: &Arc, entry: &TrioArc>) where K: Send + Sync + 'static, @@ -114,6 +120,16 @@ impl BaseCache { } } +impl BaseCache +where + K: Hash + Eq, + S: BuildHasher, +{ + pub(crate) fn maybe_key_lock(&self, key: &Arc) -> Option> { + self.inner.maybe_key_lock(key) + } +} + impl BaseCache where K: Hash + Eq + Send + Sync + 'static, @@ -224,6 +240,15 @@ where } } + pub(crate) fn get_key_with_hash(&self, key: &Q, hash: u64) -> Option> + where + Arc: Borrow, + Q: Hash + Eq + ?Sized, + { + self.inner + .get_key_value_and(key, hash, |k, _entry| Arc::clone(k)) + } + #[inline] pub(crate) fn remove_entry(&self, key: &Q, hash: u64) -> Option> where @@ -327,9 +352,9 @@ where let mut op1 = None; let mut op2 = None; - // TODO: If blocking removal notification is enabled, lock the key until - // notification is processed by the listener. (How can we handle async - // lock here for `future::Cache`?) + // Lock the key for update if blocking removal notification is enabled. + let kl = self.maybe_key_lock(&key); + let _klg = &kl.as_ref().map(|kl| kl.lock()); // Since the cache (cht::SegmentedHashMap) employs optimistic locking // strategy, insert_with_or_modify() may get an insert/modify operation @@ -636,6 +661,7 @@ pub(crate) struct Inner { valid_after: AtomicInstant, weigher: Option>, removal_notifier: Option>, + key_locks: Option>, invalidator_enabled: bool, invalidator: RwLock>>, has_expiration_clock: AtomicBool, @@ -663,6 +689,14 @@ impl Inner { self.removal_notifier.is_some() } + #[inline] + pub(crate) fn is_blocking_removal_notification(&self) -> bool { + self.removal_notifier + .as_ref() + .map(|rn| rn.is_blocking()) + .unwrap_or_default() + } + #[cfg(feature = "unstable-debug-counters")] pub fn debug_stats(&self) -> CacheDebugStats { let ec = self.entry_count.load(); @@ -731,6 +765,17 @@ impl Inner { } } +// functions/methods used by BaseCache +impl Inner +where + K: Hash + Eq, + S: BuildHasher, +{ + fn maybe_key_lock(&self, key: &Arc) -> Option> { + self.key_locks.as_ref().map(|kls| kls.key_lock(key)) + } +} + // functions/methods used by BaseCache impl Inner where @@ -763,9 +808,17 @@ where initial_capacity, build_hasher.clone(), ); - let removal_notifier = eviction_listener.map(|listener| { - RemovalNotifier::new(listener, eviction_notification_mode.unwrap_or_default()) - }); + let (removal_notifier, key_locks) = if let Some(listener) = eviction_listener { + let rn = RemovalNotifier::new(listener, eviction_notification_mode.unwrap_or_default()); + if rn.is_blocking() { + let kl = KeyLockMap::with_hasher(build_hasher.clone()); + (Some(rn), Some(kl)) + } else { + (Some(rn), None) + } + } else { + (None, None) + }; Self { max_capacity: max_capacity.map(|n| n as u64), @@ -783,6 +836,7 @@ where valid_after: Default::default(), weigher, removal_notifier, + key_locks, invalidator_enabled, // When enabled, this field will be set later via the set_invalidator method. invalidator: RwLock::new(None), @@ -891,13 +945,21 @@ where condition: F, ) -> Option>> where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, F: FnMut(&Arc, &TrioArc>) -> bool, { - // TODO: If blocking removal notification is enabled, lock the key until - // notification is processed b y the listener. (How can we handle async - // lock here for `future::Cache`?) - - self.cache.remove_if(key, hash, condition) + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(key); + let _klg = &kl.as_ref().map(|kl| kl.lock()); + + let maybe_entry = self.cache.remove_if(key, hash, condition); + if let Some(entry) = &maybe_entry { + if self.is_removal_notifier_enabled() { + self.notify_single_removal(Arc::clone(key), entry, RemovalCause::Explicit); + } + } + maybe_entry } } @@ -1160,6 +1222,11 @@ where if let Some(max) = self.max_capacity { if new_weight as u64 > max { // The candidate is too big to fit in the cache. Reject it. + + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(&kh.key); + let _klg = &kl.as_ref().map(|kl| kl.lock()); + let removed = self.cache.remove(&Arc::clone(&kh.key), kh.hash); if let Some(entry) = removed { if eviction_state.is_notifier_enabled() { @@ -1181,13 +1248,14 @@ where victim_nodes, skipped_nodes: mut skipped, } => { - // TODO: If blocking removal notification is enabled, lock the key until - // notification is processed by the listener. (How can we handle async - // lock here for `future::Cache`?) - // Try to remove the victims from the cache (hash map). for victim in victim_nodes { let element = unsafe { &victim.as_ref().element }; + + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(element.key()); + let _klg = &kl.as_ref().map(|kl| kl.lock()); + if let Some((vic_key, vic_entry)) = self.cache.remove_entry(element.key(), element.hash()) { @@ -1215,9 +1283,9 @@ where AdmissionResult::Rejected { skipped_nodes: s } => { skipped_nodes = s; - // TODO: If blocking removal notification is enabled, lock the key until - // notification is processed by the listener. (How can we handle async - // lock here for `future::Cache`?) + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(&kh.key); + let _klg = &kl.as_ref().map(|kl| kl.lock()); // Remove the candidate from the cache (hash map). let key = Arc::clone(&kh.key); @@ -1441,9 +1509,9 @@ where .map(|(k, h, c)| (k, *h, *c)) .unwrap(); - // TODO: If blocking removal notification is enabled, lock the key until - // notification is processed b y the listener. (How can we handle async - // lock here for `future::Cache`?) + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(key); + let _klg = &kl.as_ref().map(|kl| kl.lock()); // Remove the key from the map only when the entry is really // expired. This check is needed because it is possible that the entry in @@ -1534,9 +1602,9 @@ where let (key, cause) = key_cause.as_ref().unwrap(); let hash = self.hash(key); - // TODO: If blocking removal notification is enabled, lock the key until - // notification is processed by the listener. (How can we handle async - // lock here for `future::Cache`?) + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(key); + let _klg = &kl.as_ref().map(|kl| kl.lock()); let maybe_entry = self .cache @@ -1595,13 +1663,7 @@ where is_done, }) = invalidator.task_result() { - for KvEntry { key, entry } in invalidated { - // TODO: If blocking removal notification is enabled, process - // notification when they were actually removed. - - if eviction_state.is_notifier_enabled() { - eviction_state.add_removed_entry(key, &entry, RemovalCause::Explicit); - } + for KvEntry { key: _key, entry } in invalidated { Self::handle_remove(deqs, entry, &mut eviction_state.counters); } if is_done { @@ -1615,7 +1677,9 @@ where invalidator: &Invalidator, write_order: &mut Deque>, batch_size: usize, - ) { + ) where + V: Clone, + { let now = self.current_time_from_expiration_clock(); // If the write order queue is empty, we are done and can remove the predicates @@ -1686,6 +1750,10 @@ where None => break, }; + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(&key); + let _klg = &kl.as_ref().map(|kl| kl.lock()); + let maybe_entry = self.cache.remove_if(&key, hash, |_, v| { if let Some(lm) = v.last_modified() { lm == ts @@ -1694,10 +1762,6 @@ where } }); - // TODO: If blocking removal notification is enabled, lock the key until - // notification is processed by the listener. (How can we handle async - // lock here for `future::Cache`?) - if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { eviction_state.add_removed_entry(key, &entry, RemovalCause::Size); diff --git a/src/sync_base/invalidator.rs b/src/sync_base/invalidator.rs index 42597de7..ba120b0e 100644 --- a/src/sync_base/invalidator.rs +++ b/src/sync_base/invalidator.rs @@ -39,6 +39,8 @@ pub(crate) trait GetOrRemoveEntry { condition: F, ) -> Option>> where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, F: FnMut(&Arc, &TrioArc>) -> bool; } @@ -189,7 +191,7 @@ impl Invalidator { pub(crate) fn submit_task(&self, candidates: Vec>, is_truncated: bool) where K: Hash + Eq + Send + Sync + 'static, - V: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, S: BuildHasher + Send + Sync + 'static, { let ctx = &self.scan_context; @@ -372,7 +374,11 @@ where } } - fn execute(&self) { + fn execute(&self) + where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + { let cache_lock = self.scan_context.cache.lock(); // Restore the Weak pointer to Inner. @@ -399,6 +405,8 @@ where fn do_execute(&self, cache: &Arc) -> ScanResult where Arc: GetOrRemoveEntry, + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, { let predicates = self.scan_context.predicates.lock(); let mut invalidated = Vec::default(); @@ -460,6 +468,8 @@ where ) -> Option>> where Arc: GetOrRemoveEntry, + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, { cache.remove_key_value_if(key, hash, |_, v| { if let Some(lm) = v.last_modified() { diff --git a/src/sync_base/key_lock.rs b/src/sync_base/key_lock.rs new file mode 100644 index 00000000..03dacfdf --- /dev/null +++ b/src/sync_base/key_lock.rs @@ -0,0 +1,85 @@ +use std::{ + hash::{BuildHasher, Hash}, + sync::Arc, +}; + +use crate::cht::SegmentedHashMap; + +use parking_lot::{Mutex, MutexGuard}; +use triomphe::Arc as TrioArc; + +const LOCK_MAP_NUM_SEGMENTS: usize = 64; + +// We need the `where` clause here because of the Drop impl. +pub(crate) struct KeyLock<'a, K, S> +where + Arc: Eq + Hash, + S: BuildHasher, +{ + map: &'a SegmentedHashMap, TrioArc>, S>, + key: Arc, + hash: u64, + lock: TrioArc>, +} + +impl<'a, K, S> Drop for KeyLock<'a, K, S> +where + Arc: Eq + Hash, + S: BuildHasher, +{ + fn drop(&mut self) { + if TrioArc::count(&self.lock) <= 1 { + self.map + .remove_if(&self.key, self.hash, |_k, v| TrioArc::count(v) <= 1); + } + } +} + +impl<'a, K, S> KeyLock<'a, K, S> +where + Arc: Eq + Hash, + S: BuildHasher, +{ + fn new(map: &'a LockMap, key: &Arc, hash: u64, lock: TrioArc>) -> Self { + Self { + map, + key: Arc::clone(key), + hash, + lock, + } + } + + pub(crate) fn lock(&self) -> MutexGuard<'_, ()> { + self.lock.lock() + } +} + +type LockMap = SegmentedHashMap, TrioArc>, S>; + +pub(crate) struct KeyLockMap { + locks: LockMap, +} + +impl KeyLockMap +where + Arc: Eq + Hash, + S: BuildHasher, +{ + pub(crate) fn with_hasher(hasher: S) -> Self { + Self { + locks: SegmentedHashMap::with_num_segments_and_hasher(LOCK_MAP_NUM_SEGMENTS, hasher), + } + } + + pub(crate) fn key_lock(&self, key: &Arc) -> KeyLock<'_, K, S> { + let hash = self.locks.hash(key); + let kl = TrioArc::new(Mutex::new(())); + match self + .locks + .insert_if_not_present(Arc::clone(key), hash, kl.clone()) + { + None => KeyLock::new(&self.locks, key, hash, kl), + Some(existing_kl) => KeyLock::new(&self.locks, key, hash, existing_kl), + } + } +} diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index 9aa4d1a4..07ba443f 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -36,6 +36,10 @@ impl RemovalNotifier { } } + pub(crate) fn is_blocking(&self) -> bool { + matches!(self, RemovalNotifier::Blocking(_)) + } + pub(crate) fn is_batching_supported(&self) -> bool { matches!( self, From 59ca134f44778996aebe9707ca75d6149810fa0d Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 14 Jun 2022 08:54:06 +0800 Subject: [PATCH 19/44] Raise the minimal version of Triomphe crate from v0.1 to v0.1.3 to ensure `triomphe::Arc::count` function exists --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index a6465066..608c5475 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,7 +53,7 @@ tagptr = "0.2" # Opt-out serde and stable_deref_trait features # https://github.com/Manishearth/triomphe/pull/5 -triomphe = { version = "0.1", default-features = false } +triomphe = { version = "0.1.3", default-features = false } # Optional dependencies (enabled by default) quanta = { version = "0.10.0", optional = true } From 090844e152910815a0f63886900e8dcb0b0a0a9d Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 14 Jun 2022 10:16:48 +0800 Subject: [PATCH 20/44] Support notification on eviction Remove the second parameter `mode` from `future::CacheBuilder::eviction_listener` method as `future::Cache` will not support blocking notification mode in the coming release. --- src/future/builder.rs | 10 +++------- src/future/cache.rs | 25 +++++++++---------------- 2 files changed, 12 insertions(+), 23 deletions(-) diff --git a/src/future/builder.rs b/src/future/builder.rs index 3c41f1c3..f79785a3 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -11,7 +11,6 @@ use std::{ sync::Arc, time::Duration, }; -// use parking_lot::Mutex; /// Builds a [`Cache`][cache-struct] with various configuration knobs. /// @@ -74,7 +73,7 @@ use std::{ /// // uuid = { version = "1.1", features = ["v4"] } /// // tokio = { version = "1.18", features = ["fs", "macros", "rt-multi-thread", "sync", "time"] } /// -/// use moka::{future::Cache, notification::EvictionNotificationMode}; +/// use moka::future::Cache; /// /// use anyhow::{anyhow, Context}; /// use std::{ @@ -176,7 +175,7 @@ use std::{ /// let cache = Cache::builder() /// .max_capacity(100) /// .time_to_live(Duration::from_secs(2)) -/// .eviction_listener(listener, EvictionNotificationMode::NonBlocking) +/// .eviction_listener(listener) /// .build(); /// /// // Insert an entry to the cache. @@ -339,16 +338,13 @@ impl CacheBuilder { } } - // TODO: Need to come up with a better interface than always specifying the mode. - pub fn eviction_listener( self, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, - mode: EvictionNotificationMode, ) -> Self { Self { eviction_listener: Some(Arc::new(listener)), - eviction_notification_mode: Some(mode), + eviction_notification_mode: Some(EvictionNotificationMode::NonBlocking), ..self } } diff --git a/src/future/cache.rs b/src/future/cache.rs index 9e0d82d8..3356738d 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -764,10 +764,6 @@ where Q: Hash + Eq + ?Sized, { let hash = self.base.hash(key); - - // TODO: If blocking removal notification is enabled, lock the key until - // notification is processed by the listener. - if let Some(kv) = self.base.remove_entry(key, hash) { if self.base.is_removal_notifier_enabled() { self.base.notify_invalidate(&kv.key, &kv.entry) @@ -1127,10 +1123,7 @@ where #[cfg(test)] mod tests { use super::{Cache, ConcurrentCacheExt}; - use crate::{ - common::time::Clock, - notification::{EvictionNotificationMode, RemovalCause}, - }; + use crate::{common::time::Clock, notification::RemovalCause}; use async_io::Timer; use parking_lot::Mutex; @@ -1149,7 +1142,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -1292,7 +1285,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(31) .weigher(weigher) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -1446,7 +1439,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -1509,7 +1502,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -1615,7 +1608,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -1705,7 +1698,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -2352,7 +2345,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); @@ -2418,7 +2411,7 @@ mod tests { // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) + .eviction_listener(listener) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .build(); From 14627007776ee46e54462e968b21a18340c71bf9 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 21 Jun 2022 07:20:21 +0800 Subject: [PATCH 21/44] Support notification on eviction - Add `notification::Configuration` struct. - Add `eviction_listener_with_conf` method to `sync::Builder`. - Rename `notification::EvictionNotificationMode` enum to `notification::DeliveryMode`. - Update unit tests for `sync` caches to test both two `DeliveryMode` variants. --- src/future/builder.rs | 15 +- src/future/cache.rs | 6 +- src/notification.rs | 65 +- src/sync/builder.rs | 41 +- src/sync/cache.rs | 1388 ++++++++++++++++------------- src/sync/segment.rs | 808 +++++++++-------- src/sync_base/base_cache.rs | 10 +- src/sync_base/removal_notifier.rs | 14 +- 8 files changed, 1285 insertions(+), 1062 deletions(-) diff --git a/src/future/builder.rs b/src/future/builder.rs index f79785a3..26855681 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -1,7 +1,7 @@ use super::Cache; use crate::{ common::{builder_utils, concurrent::Weigher}, - notification::{EvictionListener, EvictionNotificationMode, RemovalCause}, + notification::{self, DeliveryMode, EvictionListener, RemovalCause}, }; use std::{ @@ -220,7 +220,7 @@ pub struct CacheBuilder { initial_capacity: Option, weigher: Option>, eviction_listener: Option>, - eviction_notification_mode: Option, + eviction_listener_conf: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -238,7 +238,7 @@ where initial_capacity: None, weigher: None, eviction_listener: None, - eviction_notification_mode: None, + eviction_listener_conf: None, time_to_live: None, time_to_idle: None, invalidator_enabled: false, @@ -277,7 +277,7 @@ where build_hasher, self.weigher, self.eviction_listener, - self.eviction_notification_mode, + self.eviction_listener_conf, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -302,7 +302,7 @@ where hasher, self.weigher, self.eviction_listener, - self.eviction_notification_mode, + self.eviction_listener_conf, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -342,9 +342,12 @@ impl CacheBuilder { self, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, ) -> Self { + let conf = notification::Configuration::builder() + .delivery_mode(DeliveryMode::Queued) + .build(); Self { eviction_listener: Some(Arc::new(listener)), - eviction_notification_mode: Some(EvictionNotificationMode::NonBlocking), + eviction_listener_conf: Some(conf), ..self } } diff --git a/src/future/cache.rs b/src/future/cache.rs index 57fbf39c..13c271f9 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -8,7 +8,7 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, - notification::{EvictionListener, EvictionNotificationMode}, + notification::{self, EvictionListener}, sync_base::base_cache::{BaseCache, HouseKeeperArc}, Policy, PredicateError, }; @@ -456,7 +456,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_notification_mode: Option, + eviction_listener_conf: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -468,7 +468,7 @@ where build_hasher.clone(), weigher, eviction_listener, - eviction_notification_mode, + eviction_listener_conf, time_to_live, time_to_idle, invalidator_enabled, diff --git a/src/notification.rs b/src/notification.rs index d8501766..78d59860 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -11,15 +11,46 @@ pub(crate) type EvictionListenerRef<'a, K, V> = // the notifications, but currently there is no way to know when all entries // have been invalidated and their notifications have been sent. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum EvictionNotificationMode { - Blocking, - NonBlocking, +#[derive(Clone, Debug, Default)] +pub struct Configuration { + mode: DeliveryMode, } -impl Default for EvictionNotificationMode { +impl Configuration { + pub fn builder() -> ConfigurationBuilder { + ConfigurationBuilder::default() + } + + pub fn delivery_mode(&self) -> DeliveryMode { + self.mode + } +} + +#[derive(Default)] +pub struct ConfigurationBuilder { + mode: DeliveryMode, +} + +impl ConfigurationBuilder { + pub fn build(self) -> Configuration { + Configuration { mode: self.mode } + } + + pub fn delivery_mode(self, mode: DeliveryMode) -> Self { + // Self { mode, ..self } + Self { mode } + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum DeliveryMode { + Direct, + Queued, +} + +impl Default for DeliveryMode { fn default() -> Self { - Self::Blocking + Self::Direct } } @@ -41,3 +72,25 @@ impl RemovalCause { matches!(self, Self::Expired | Self::Size) } } + +#[cfg(test)] +pub(crate) mod macros { + + macro_rules! assert_with_mode { + ($cond:expr, $delivery_mode:ident) => { + assert!( + $cond, + "assertion failed. (delivery mode: {:?})", + $delivery_mode + ) + }; + } + + macro_rules! assert_eq_with_mode { + ($left:expr, $right:expr, $delivery_mode:ident) => { + assert_eq!($left, $right, "(delivery mode: {:?})", $delivery_mode) + }; + } + + pub(crate) use {assert_eq_with_mode, assert_with_mode}; +} diff --git a/src/sync/builder.rs b/src/sync/builder.rs index ee7fa40b..824f2c6e 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -1,7 +1,7 @@ use super::{Cache, SegmentedCache}; use crate::{ common::{builder_utils, concurrent::Weigher}, - notification::{EvictionListener, EvictionNotificationMode, RemovalCause}, + notification::{self, EvictionListener, RemovalCause}, }; use std::{ @@ -64,7 +64,7 @@ use std::{ /// // anyhow = "1.0" /// // uuid = { version = "1.1", features = ["v4"] } /// -/// use moka::{sync::Cache, notification::EvictionNotificationMode}; +/// use moka::{sync::Cache, notification}; /// /// use anyhow::{anyhow, Context}; /// use std::{ @@ -160,12 +160,16 @@ use std::{ /// } /// }; /// +/// let listener_conf = notification::Configuration::builder() +/// .delivery_mode(notification::DeliveryMode::Queued) +/// .build(); +/// /// // Create the cache. Set time to live for two seconds and set the /// // eviction listener. /// let cache = Cache::builder() /// .max_capacity(100) /// .time_to_live(Duration::from_secs(2)) -/// .eviction_listener(listener, EvictionNotificationMode::NonBlocking) +/// .eviction_listener_with_conf(listener, listener_conf) /// .build(); /// /// // Insert an entry to the cache. @@ -212,7 +216,7 @@ pub struct CacheBuilder { num_segments: Option, weigher: Option>, eviction_listener: Option>, - eviction_notification_mode: Option, + eviction_listener_conf: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -231,7 +235,7 @@ where num_segments: None, weigher: None, eviction_listener: None, - eviction_notification_mode: None, + eviction_listener_conf: None, time_to_live: None, time_to_idle: None, invalidator_enabled: false, @@ -271,7 +275,7 @@ where num_segments: Some(num_segments), weigher: None, eviction_listener: None, - eviction_notification_mode: None, + eviction_listener_conf: None, time_to_live: self.time_to_live, time_to_idle: self.time_to_idle, invalidator_enabled: self.invalidator_enabled, @@ -298,7 +302,7 @@ where build_hasher, self.weigher, self.eviction_listener, - self.eviction_notification_mode, + self.eviction_listener_conf, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -326,7 +330,7 @@ where hasher, self.weigher, self.eviction_listener, - self.eviction_notification_mode, + self.eviction_listener_conf, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -359,7 +363,7 @@ where build_hasher, self.weigher, self.eviction_listener, - self.eviction_notification_mode, + self.eviction_listener_conf, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -388,7 +392,7 @@ where hasher, self.weigher, self.eviction_listener, - self.eviction_notification_mode, + self.eviction_listener_conf, self.time_to_live, self.time_to_idle, self.invalidator_enabled, @@ -424,16 +428,25 @@ impl CacheBuilder { } } - // TODO: Need to come up with a better interface than always specifying the mode. - pub fn eviction_listener( self, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, - mode: EvictionNotificationMode, ) -> Self { Self { eviction_listener: Some(Arc::new(listener)), - eviction_notification_mode: Some(mode), + eviction_listener_conf: Some(Default::default()), + ..self + } + } + + pub fn eviction_listener_with_conf( + self, + listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, + conf: notification::Configuration, + ) -> Self { + Self { + eviction_listener: Some(Arc::new(listener)), + eviction_listener_conf: Some(conf), ..self } } diff --git a/src/sync/cache.rs b/src/sync/cache.rs index f74f808e..badca9a1 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -8,7 +8,7 @@ use crate::{ housekeeper::InnerSync, Weigher, WriteOp, }, - notification::{EvictionListener, EvictionNotificationMode}, + notification::{self, EvictionListener}, sync::{Iter, PredicateId}, sync_base::{ base_cache::{BaseCache, HouseKeeperArc}, @@ -400,7 +400,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_notification_mode: Option, + eviction_listener_conf: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -412,7 +412,7 @@ where build_hasher.clone(), weigher, eviction_listener, - eviction_notification_mode, + eviction_listener_conf, time_to_live, time_to_idle, invalidator_enabled, @@ -1029,7 +1029,11 @@ mod tests { use super::{Cache, ConcurrentCacheExt}; use crate::{ common::time::Clock, - notification::{EvictionNotificationMode, RemovalCause}, + notification::{ + self, + macros::{assert_eq_with_mode, assert_with_mode}, + DeliveryMode, RemovalCause, + }, }; use parking_lot::Mutex; @@ -1037,217 +1041,241 @@ mod tests { #[test] fn basic_single_thread() { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(3) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", "alice"); - cache.insert("b", "bob"); - assert_eq!(cache.get(&"a"), Some("alice")); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert_eq!(cache.get(&"b"), Some("bob")); - cache.sync(); - // counts: a -> 1, b -> 1 - - cache.insert("c", "cindy"); - assert_eq!(cache.get(&"c"), Some("cindy")); - assert!(cache.contains_key(&"c")); - // counts: a -> 1, b -> 1, c -> 1 - cache.sync(); - - assert!(cache.contains_key(&"a")); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - assert!(cache.contains_key(&"b")); - cache.sync(); - // counts: a -> 2, b -> 2, c -> 1 - - // "d" should not be admitted because its frequency is too low. - cache.insert("d", "david"); // count: d -> 0 - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 1 - assert!(!cache.contains_key(&"d")); - - cache.insert("d", "david"); - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert!(!cache.contains_key(&"d")); - assert_eq!(cache.get(&"d"), None); // d -> 2 - - // "d" should be admitted and "c" should be evicted - // because d's frequency is higher than c's. - cache.insert("d", "dennis"); - expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - assert_eq!(cache.get(&"c"), None); - assert_eq!(cache.get(&"d"), Some("dennis")); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert!(!cache.contains_key(&"c")); - assert!(cache.contains_key(&"d")); - - cache.invalidate(&"b"); - expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); - cache.sync(); - assert_eq!(cache.get(&"b"), None); - assert!(!cache.contains_key(&"b")); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); + + fn run_test(delivery_mode: DeliveryMode) { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice"); + cache.insert("b", "bob"); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + cache.sync(); + // counts: a -> 1, b -> 1 + + cache.insert("c", "cindy"); + assert_eq_with_mode!(cache.get(&"c"), Some("cindy"), delivery_mode); + assert_with_mode!(cache.contains_key(&"c"), delivery_mode); + // counts: a -> 1, b -> 1, c -> 1 + cache.sync(); + + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + cache.sync(); + // counts: a -> 2, b -> 2, c -> 1 + + // "d" should not be admitted because its frequency is too low. + cache.insert("d", "david"); // count: d -> 0 + expected.push((Arc::new("d"), "david", RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 1 + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + cache.insert("d", "david"); + expected.push((Arc::new("d"), "david", RemovalCause::Size)); + cache.sync(); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 2 + + // "d" should be admitted and "c" should be evicted + // because d's frequency is higher than c's. + cache.insert("d", "dennis"); + expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + assert_eq_with_mode!(cache.get(&"c"), None, delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), Some("dennis"), delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); + assert_with_mode!(cache.contains_key(&"d"), delivery_mode); + + cache.invalidate(&"b"); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); + assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } } } #[test] fn size_aware_eviction() { - let weigher = |_k: &&str, v: &(&str, u32)| v.1; - - let alice = ("alice", 10); - let bob = ("bob", 15); - let bill = ("bill", 20); - let cindy = ("cindy", 5); - let david = ("david", 15); - let dennis = ("dennis", 15); - - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(31) - .weigher(weigher) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", alice); - cache.insert("b", bob); - assert_eq!(cache.get(&"a"), Some(alice)); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert_eq!(cache.get(&"b"), Some(bob)); - cache.sync(); - // order (LRU -> MRU) and counts: a -> 1, b -> 1 - - cache.insert("c", cindy); - assert_eq!(cache.get(&"c"), Some(cindy)); - assert!(cache.contains_key(&"c")); - // order and counts: a -> 1, b -> 1, c -> 1 - cache.sync(); - - assert!(cache.contains_key(&"a")); - assert_eq!(cache.get(&"a"), Some(alice)); - assert_eq!(cache.get(&"b"), Some(bob)); - assert!(cache.contains_key(&"b")); - cache.sync(); - // order and counts: c -> 1, a -> 2, b -> 2 - - // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). - // "d" must have higher count than 3, which is the aggregated count - // of "a" and "c". - cache.insert("d", david); // count: d -> 0 - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 1 - assert!(!cache.contains_key(&"d")); - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert!(!cache.contains_key(&"d")); - assert_eq!(cache.get(&"d"), None); // d -> 2 - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 3 - assert!(!cache.contains_key(&"d")); - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert!(!cache.contains_key(&"d")); - assert_eq!(cache.get(&"d"), None); // d -> 4 - - // Finally "d" should be admitted by evicting "c" and "a". - cache.insert("d", dennis); - expected.push((Arc::new("c"), cindy, RemovalCause::Size)); - expected.push((Arc::new("a"), alice, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), Some(bob)); - assert_eq!(cache.get(&"c"), None); - assert_eq!(cache.get(&"d"), Some(dennis)); - assert!(!cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert!(!cache.contains_key(&"c")); - assert!(cache.contains_key(&"d")); - - // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). - cache.insert("b", bill); - expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); - expected.push((Arc::new("d"), dennis, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"b"), Some(bill)); - assert_eq!(cache.get(&"d"), None); - assert!(cache.contains_key(&"b")); - assert!(!cache.contains_key(&"d")); - - // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). - cache.insert("a", alice); - cache.insert("b", bob); - expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); - cache.sync(); - assert_eq!(cache.get(&"a"), Some(alice)); - assert_eq!(cache.get(&"b"), Some(bob)); - assert_eq!(cache.get(&"d"), None); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert!(!cache.contains_key(&"d")); - - // Verify the sizes. - assert_eq!(cache.entry_count(), 2); - assert_eq!(cache.weighted_size(), 25); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); + + fn run_test(delivery_mode: DeliveryMode) { + let weigher = |_k: &&str, v: &(&str, u32)| v.1; + + let alice = ("alice", 10); + let bob = ("bob", 15); + let bill = ("bill", 20); + let cindy = ("cindy", 5); + let david = ("david", 15); + let dennis = ("dennis", 15); + + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(31) + .weigher(weigher) + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", alice); + cache.insert("b", bob); + assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); + cache.sync(); + // order (LRU -> MRU) and counts: a -> 1, b -> 1 + + cache.insert("c", cindy); + assert_eq_with_mode!(cache.get(&"c"), Some(cindy), delivery_mode); + assert_with_mode!(cache.contains_key(&"c"), delivery_mode); + // order and counts: a -> 1, b -> 1, c -> 1 + cache.sync(); + + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + cache.sync(); + // order and counts: c -> 1, a -> 2, b -> 2 + + // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). + // "d" must have higher count than 3, which is the aggregated count + // of "a" and "c". + cache.insert("d", david); // count: d -> 0 + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 1 + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.sync(); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 2 + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 3 + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.sync(); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 4 + + // Finally "d" should be admitted by evicting "c" and "a". + cache.insert("d", dennis); + expected.push((Arc::new("c"), cindy, RemovalCause::Size)); + expected.push((Arc::new("a"), alice, RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); + assert_eq_with_mode!(cache.get(&"c"), None, delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), Some(dennis), delivery_mode); + assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); + assert_with_mode!(cache.contains_key(&"d"), delivery_mode); + + // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). + cache.insert("b", bill); + expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); + expected.push((Arc::new("d"), dennis, RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"b"), Some(bill), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). + cache.insert("a", alice); + cache.insert("b", bob); + expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + // Verify the sizes. + assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); + assert_eq_with_mode!(cache.weighted_size(), 25, delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } } } @@ -1280,167 +1308,193 @@ mod tests { #[test] fn invalidate_all() { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", "alice"); - cache.insert("b", "bob"); - cache.insert("c", "cindy"); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - assert_eq!(cache.get(&"c"), Some("cindy")); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert!(cache.contains_key(&"c")); - cache.sync(); - - cache.invalidate_all(); - expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); - expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); - expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); - cache.sync(); - - cache.insert("d", "david"); - cache.sync(); - - assert!(cache.get(&"a").is_none()); - assert!(cache.get(&"b").is_none()); - assert!(cache.get(&"c").is_none()); - assert_eq!(cache.get(&"d"), Some("david")); - assert!(!cache.contains_key(&"a")); - assert!(!cache.contains_key(&"b")); - assert!(!cache.contains_key(&"c")); - assert!(cache.contains_key(&"d")); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); + + fn run_test(delivery_mode: DeliveryMode) { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice"); + cache.insert("b", "bob"); + cache.insert("c", "cindy"); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + assert_eq_with_mode!(cache.get(&"c"), Some("cindy"), delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(cache.contains_key(&"c"), delivery_mode); + cache.sync(); + + cache.invalidate_all(); + expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); + cache.sync(); + + cache.insert("d", "david"); + cache.sync(); + + assert_with_mode!(cache.get(&"a").is_none(), delivery_mode); + assert_with_mode!(cache.get(&"b").is_none(), delivery_mode); + assert_with_mode!(cache.get(&"c").is_none(), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), Some("david"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); + assert_with_mode!(cache.contains_key(&"d"), delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } } } #[test] fn invalidate_entries_if() -> Result<(), Box> { - use std::collections::HashSet; - - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + run_test(DeliveryMode::Direct)?; + run_test(DeliveryMode::Queued)?; + + fn run_test(delivery_mode: DeliveryMode) -> Result<(), Box> { + use std::collections::HashSet; + + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .support_invalidation_closures() + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert(0, "alice"); + cache.insert(1, "bob"); + cache.insert(2, "alex"); + cache.sync(); + + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.sync(); + + assert_eq_with_mode!(cache.get(&0), Some("alice"), delivery_mode); + assert_eq_with_mode!(cache.get(&1), Some("bob"), delivery_mode); + assert_eq_with_mode!(cache.get(&2), Some("alex"), delivery_mode); + assert_with_mode!(cache.contains_key(&0), delivery_mode); + assert_with_mode!(cache.contains_key(&1), delivery_mode); + assert_with_mode!(cache.contains_key(&2), delivery_mode); + + let names = ["alice", "alex"].iter().cloned().collect::>(); + cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; + assert_eq_with_mode!(cache.base.invalidation_predicate_count(), 1, delivery_mode); + expected.push((Arc::new(0), "alice", RemovalCause::Explicit)); + expected.push((Arc::new(2), "alex", RemovalCause::Explicit)); + + mock.increment(Duration::from_secs(5)); // 10 secs from the start. + + cache.insert(3, "alice"); + + // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) + cache.sync(); // To submit the invalidation task. + std::thread::sleep(Duration::from_millis(200)); + cache.sync(); // To process the task result. + std::thread::sleep(Duration::from_millis(200)); + + assert_with_mode!(cache.get(&0).is_none(), delivery_mode); + assert_with_mode!(cache.get(&2).is_none(), delivery_mode); + assert_eq_with_mode!(cache.get(&1), Some("bob"), delivery_mode); + // This should survive as it was inserted after calling invalidate_entries_if. + assert_eq_with_mode!(cache.get(&3), Some("alice"), delivery_mode); + + assert_with_mode!(!cache.contains_key(&0), delivery_mode); + assert_with_mode!(cache.contains_key(&1), delivery_mode); + assert_with_mode!(!cache.contains_key(&2), delivery_mode); + assert_with_mode!(cache.contains_key(&3), delivery_mode); + + assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); + assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); + + mock.increment(Duration::from_secs(5)); // 15 secs from the start. + + cache.invalidate_entries_if(|_k, &v| v == "alice")?; + cache.invalidate_entries_if(|_k, &v| v == "bob")?; + assert_eq_with_mode!(cache.invalidation_predicate_count(), 2, delivery_mode); + // key 1 was inserted before key 3. + expected.push((Arc::new(1), "bob", RemovalCause::Explicit)); + expected.push((Arc::new(3), "alice", RemovalCause::Explicit)); + + // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) + cache.sync(); // To submit the invalidation task. + std::thread::sleep(Duration::from_millis(200)); + cache.sync(); // To process the task result. + std::thread::sleep(Duration::from_millis(200)); + + assert_with_mode!(cache.get(&1).is_none(), delivery_mode); + assert_with_mode!(cache.get(&3).is_none(), delivery_mode); + + assert_with_mode!(!cache.contains_key(&1), delivery_mode); + assert_with_mode!(!cache.contains_key(&3), delivery_mode); + + assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); + assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .support_invalidation_closures() - .eviction_listener(listener, EvictionNotificationMode::Blocking) - .build(); - cache.reconfigure_for_testing(); - - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert(0, "alice"); - cache.insert(1, "bob"); - cache.insert(2, "alex"); - cache.sync(); - - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); - - assert_eq!(cache.get(&0), Some("alice")); - assert_eq!(cache.get(&1), Some("bob")); - assert_eq!(cache.get(&2), Some("alex")); - assert!(cache.contains_key(&0)); - assert!(cache.contains_key(&1)); - assert!(cache.contains_key(&2)); - - let names = ["alice", "alex"].iter().cloned().collect::>(); - cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; - assert_eq!(cache.base.invalidation_predicate_count(), 1); - expected.push((Arc::new(0), "alice", RemovalCause::Explicit)); - expected.push((Arc::new(2), "alex", RemovalCause::Explicit)); - - mock.increment(Duration::from_secs(5)); // 10 secs from the start. - - cache.insert(3, "alice"); - - // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. - std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. - std::thread::sleep(Duration::from_millis(200)); - - assert!(cache.get(&0).is_none()); - assert!(cache.get(&2).is_none()); - assert_eq!(cache.get(&1), Some("bob")); - // This should survive as it was inserted after calling invalidate_entries_if. - assert_eq!(cache.get(&3), Some("alice")); - - assert!(!cache.contains_key(&0)); - assert!(cache.contains_key(&1)); - assert!(!cache.contains_key(&2)); - assert!(cache.contains_key(&3)); - - assert_eq!(cache.entry_count(), 2); - assert_eq!(cache.invalidation_predicate_count(), 0); - - mock.increment(Duration::from_secs(5)); // 15 secs from the start. - - cache.invalidate_entries_if(|_k, &v| v == "alice")?; - cache.invalidate_entries_if(|_k, &v| v == "bob")?; - assert_eq!(cache.invalidation_predicate_count(), 2); - // key 1 was inserted before key 3. - expected.push((Arc::new(1), "bob", RemovalCause::Explicit)); - expected.push((Arc::new(3), "alice", RemovalCause::Explicit)); - - // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. - std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. - std::thread::sleep(Duration::from_millis(200)); - - assert!(cache.get(&1).is_none()); - assert!(cache.get(&3).is_none()); - - assert!(!cache.contains_key(&1)); - assert!(!cache.contains_key(&3)); - - assert_eq!(cache.entry_count(), 0); - assert_eq!(cache.invalidation_predicate_count(), 0); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + Ok(()) } Ok(()) @@ -1448,178 +1502,202 @@ mod tests { #[test] fn time_to_live() { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + fn run_test(delivery_mode: DeliveryMode) { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .time_to_live(Duration::from_secs(10)) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - .build(); - cache.reconfigure_for_testing(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .time_to_live(Duration::from_secs(10)) + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); - // Make the cache exterior immutable. - let cache = cache; + // Make the cache exterior immutable. + let cache = cache; - cache.insert("a", "alice"); - cache.sync(); + cache.insert("a", "alice"); + cache.sync(); - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.sync(); - assert_eq!(cache.get(&"a"), Some("alice")); - assert!(cache.contains_key(&"a")); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - mock.increment(Duration::from_secs(5)); // 10 secs. - expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert!(!cache.contains_key(&"a")); + mock.increment(Duration::from_secs(5)); // 10 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); + assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); + assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_eq!(cache.iter().count(), 0); + assert_eq_with_mode!(cache.iter().count(), 0, delivery_mode); - cache.sync(); - assert!(cache.is_table_empty()); + cache.sync(); + assert_with_mode!(cache.is_table_empty(), delivery_mode); - cache.insert("b", "bob"); - cache.sync(); + cache.insert("b", "bob"); + cache.sync(); - assert_eq!(cache.entry_count(), 1); + assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); - mock.increment(Duration::from_secs(5)); // 15 secs. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 15 secs. + cache.sync(); - assert_eq!(cache.get(&"b"), Some("bob")); - assert!(cache.contains_key(&"b")); - assert_eq!(cache.entry_count(), 1); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); - cache.insert("b", "bill"); - expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); - cache.sync(); + cache.insert("b", "bill"); + expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); + cache.sync(); - mock.increment(Duration::from_secs(5)); // 20 secs - cache.sync(); + mock.increment(Duration::from_secs(5)); // 20 secs + cache.sync(); - assert_eq!(cache.get(&"b"), Some("bill")); - assert!(cache.contains_key(&"b")); - assert_eq!(cache.entry_count(), 1); + assert_eq_with_mode!(cache.get(&"b"), Some("bill"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); - mock.increment(Duration::from_secs(5)); // 25 secs - expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); + mock.increment(Duration::from_secs(5)); // 25 secs + expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), None); - assert!(!cache.contains_key(&"a")); - assert!(!cache.contains_key(&"b")); + assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); + assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - assert_eq!(cache.iter().count(), 0); + assert_eq_with_mode!(cache.iter().count(), 0, delivery_mode); - cache.sync(); - assert!(cache.is_table_empty()); + cache.sync(); + assert_with_mode!(cache.is_table_empty(), delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } } } #[test] fn time_to_idle() { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + fn run_test(delivery_mode: DeliveryMode) { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .time_to_idle(Duration::from_secs(10)) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - .build(); - cache.reconfigure_for_testing(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .time_to_idle(Duration::from_secs(10)) + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); - // Make the cache exterior immutable. - let cache = cache; + // Make the cache exterior immutable. + let cache = cache; - cache.insert("a", "alice"); - cache.sync(); + cache.insert("a", "alice"); + cache.sync(); - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.sync(); - assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - mock.increment(Duration::from_secs(5)); // 10 secs. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 10 secs. + cache.sync(); - cache.insert("b", "bob"); - cache.sync(); + cache.insert("b", "bob"); + cache.sync(); - assert_eq!(cache.entry_count(), 2); + assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); - mock.increment(Duration::from_secs(2)); // 12 secs. - cache.sync(); + mock.increment(Duration::from_secs(2)); // 12 secs. + cache.sync(); - // contains_key does not reset the idle timer for the key. - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - cache.sync(); + // contains_key does not reset the idle timer for the key. + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + cache.sync(); - assert_eq!(cache.entry_count(), 2); + assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); - mock.increment(Duration::from_secs(3)); // 15 secs. - expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); + mock.increment(Duration::from_secs(3)); // 15 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), Some("bob")); - assert!(!cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); + assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_eq!(cache.iter().count(), 1); + assert_eq_with_mode!(cache.iter().count(), 1, delivery_mode); - cache.sync(); - assert_eq!(cache.entry_count(), 1); + cache.sync(); + assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); - mock.increment(Duration::from_secs(10)); // 25 secs - expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); + mock.increment(Duration::from_secs(10)); // 25 secs + expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), None); - assert!(!cache.contains_key(&"a")); - assert!(!cache.contains_key(&"b")); + assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); + assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - assert_eq!(cache.iter().count(), 0); + assert_eq_with_mode!(cache.iter().count(), 0, delivery_mode); - cache.sync(); - assert!(cache.is_table_empty()); + cache.sync(); + assert_with_mode!(cache.is_table_empty(), delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } } } @@ -2134,168 +2212,192 @@ mod tests { // - time_to_live // - time_to_idle - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(3) - .eviction_listener(listener, EvictionNotificationMode::Blocking) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert('a', "alice"); - cache.invalidate(&'a'); - expected.push((Arc::new('a'), "alice", RemovalCause::Explicit)); - - cache.sync(); - assert_eq!(cache.entry_count(), 0); - - cache.insert('b', "bob"); - cache.insert('c', "cathy"); - cache.insert('d', "david"); - cache.sync(); - assert_eq!(cache.entry_count(), 3); - - // This will be rejected due to the size constraint. - cache.insert('e', "emily"); - expected.push((Arc::new('e'), "emily", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.entry_count(), 3); - - // Raise the popularity of 'e' so it will be accepted next time. - cache.get(&'e'); - cache.sync(); - - // Retry. - cache.insert('e', "eliza"); - // and the LRU entry will be evicted. - expected.push((Arc::new('b'), "bob", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.entry_count(), 3); - - // Replace an existing entry. - cache.insert('d', "dennis"); - expected.push((Arc::new('d'), "david", RemovalCause::Replaced)); - cache.sync(); - assert_eq!(cache.entry_count(), 3); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); + + fn run_test(delivery_mode: DeliveryMode) { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert('a', "alice"); + cache.invalidate(&'a'); + expected.push((Arc::new('a'), "alice", RemovalCause::Explicit)); + + cache.sync(); + assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); + + cache.insert('b', "bob"); + cache.insert('c', "cathy"); + cache.insert('d', "david"); + cache.sync(); + assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); + + // This will be rejected due to the size constraint. + cache.insert('e', "emily"); + expected.push((Arc::new('e'), "emily", RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); + + // Raise the popularity of 'e' so it will be accepted next time. + cache.get(&'e'); + cache.sync(); + + // Retry. + cache.insert('e', "eliza"); + // and the LRU entry will be evicted. + expected.push((Arc::new('b'), "bob", RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); + + // Replace an existing entry. + cache.insert('d', "dennis"); + expected.push((Arc::new('d'), "david", RemovalCause::Replaced)); + cache.sync(); + assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } } } #[test] fn test_removal_notifications_with_updates() { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - - // Create a cache with the eviction listener and also TTL and TTI. - let mut cache = Cache::builder() - .eviction_listener(listener, EvictionNotificationMode::Blocking) - .time_to_live(Duration::from_secs(7)) - .time_to_idle(Duration::from_secs(5)) - .build(); - cache.reconfigure_for_testing(); - - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("alice", "a0"); - cache.sync(); - - // Now alice (a0) has been expired by the idle timeout (TTI). - mock.increment(Duration::from_secs(6)); - expected.push((Arc::new("alice"), "a0", RemovalCause::Expired)); - assert_eq!(cache.get(&"alice"), None); - - // We have not ran sync after the expiration of alice (a0), so it is - // still in the cache. - assert_eq!(cache.entry_count(), 1); - - // Re-insert alice with a different value. Since alice (a0) is still - // in the cache, this is actually a replace operation rather than an - // insert operation. We want to verify that the RemovalCause of a0 is - // Expired, not Replaced. - cache.insert("alice", "a1"); - cache.sync(); - - mock.increment(Duration::from_secs(4)); - assert_eq!(cache.get(&"alice"), Some("a1")); - cache.sync(); - - // Now alice has been expired by time-to-live (TTL). - mock.increment(Duration::from_secs(4)); - expected.push((Arc::new("alice"), "a1", RemovalCause::Expired)); - assert_eq!(cache.get(&"alice"), None); - - // But, again, it is still in the cache. - assert_eq!(cache.entry_count(), 1); - - // Re-insert alice with a different value and verify that the - // RemovalCause of a1 is Expired (not Replaced). - cache.insert("alice", "a2"); - cache.sync(); - - assert_eq!(cache.entry_count(), 1); - - // Now alice (a2) has been expired by the idle timeout. - mock.increment(Duration::from_secs(6)); - expected.push((Arc::new("alice"), "a2", RemovalCause::Expired)); - assert_eq!(cache.get(&"alice"), None); - assert_eq!(cache.entry_count(), 1); - - // This invalidate will internally remove alice (a2). - cache.invalidate(&"alice"); - cache.sync(); - assert_eq!(cache.entry_count(), 0); - - // Re-insert, and this time, make it expired by the TTL. - cache.insert("alice", "a3"); - cache.sync(); - mock.increment(Duration::from_secs(4)); - assert_eq!(cache.get(&"alice"), Some("a3")); - cache.sync(); - mock.increment(Duration::from_secs(4)); - expected.push((Arc::new("alice"), "a3", RemovalCause::Expired)); - assert_eq!(cache.get(&"alice"), None); - assert_eq!(cache.entry_count(), 1); - - // This invalidate will internally remove alice (a2). - cache.invalidate(&"alice"); - cache.sync(); - assert_eq!(cache.entry_count(), 0); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); + + fn run_test(delivery_mode: DeliveryMode) { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener and also TTL and TTI. + let mut cache = Cache::builder() + .eviction_listener_with_conf(listener, listener_conf) + .time_to_live(Duration::from_secs(7)) + .time_to_idle(Duration::from_secs(5)) + .build(); + cache.reconfigure_for_testing(); + + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("alice", "a0"); + cache.sync(); + + // Now alice (a0) has been expired by the idle timeout (TTI). + mock.increment(Duration::from_secs(6)); + expected.push((Arc::new("alice"), "a0", RemovalCause::Expired)); + assert_eq_with_mode!(cache.get(&"alice"), None, delivery_mode); + + // We have not ran sync after the expiration of alice (a0), so it is + // still in the cache. + assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + + // Re-insert alice with a different value. Since alice (a0) is still + // in the cache, this is actually a replace operation rather than an + // insert operation. We want to verify that the RemovalCause of a0 is + // Expired, not Replaced. + cache.insert("alice", "a1"); + cache.sync(); + + mock.increment(Duration::from_secs(4)); + assert_eq_with_mode!(cache.get(&"alice"), Some("a1"), delivery_mode); + cache.sync(); + + // Now alice has been expired by time-to-live (TTL). + mock.increment(Duration::from_secs(4)); + expected.push((Arc::new("alice"), "a1", RemovalCause::Expired)); + assert_eq_with_mode!(cache.get(&"alice"), None, delivery_mode); + + // But, again, it is still in the cache. + assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + + // Re-insert alice with a different value and verify that the + // RemovalCause of a1 is Expired (not Replaced). + cache.insert("alice", "a2"); + cache.sync(); + + assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + + // Now alice (a2) has been expired by the idle timeout. + mock.increment(Duration::from_secs(6)); + expected.push((Arc::new("alice"), "a2", RemovalCause::Expired)); + assert_eq_with_mode!(cache.get(&"alice"), None, delivery_mode); + assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + + // This invalidate will internally remove alice (a2). + cache.invalidate(&"alice"); + cache.sync(); + assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); + + // Re-insert, and this time, make it expired by the TTL. + cache.insert("alice", "a3"); + cache.sync(); + mock.increment(Duration::from_secs(4)); + assert_eq_with_mode!(cache.get(&"alice"), Some("a3"), delivery_mode); + cache.sync(); + mock.increment(Duration::from_secs(4)); + expected.push((Arc::new("alice"), "a3", RemovalCause::Expired)); + assert_eq_with_mode!(cache.get(&"alice"), None, delivery_mode); + assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + + // This invalidate will internally remove alice (a2). + cache.invalidate(&"alice"); + cache.sync(); + assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } } } diff --git a/src/sync/segment.rs b/src/sync/segment.rs index ebb4cd41..5fcbc2e2 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -1,7 +1,7 @@ use super::{cache::Cache, CacheBuilder, ConcurrentCacheExt}; use crate::{ common::concurrent::Weigher, - notification::{EvictionListener, EvictionNotificationMode}, + notification::{self, EvictionListener}, sync_base::iter::{Iter, ScanningGet}, Policy, PredicateError, }; @@ -205,7 +205,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_notification_mode: Option, + eviction_listener_conf: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -218,7 +218,7 @@ where build_hasher, weigher, eviction_listener, - eviction_notification_mode, + eviction_listener_conf, time_to_live, time_to_idle, invalidator_enabled, @@ -584,7 +584,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_notification_mode: Option, + eviction_listener_conf: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -606,7 +606,7 @@ where build_hasher.clone(), weigher.as_ref().map(Arc::clone), eviction_listener.as_ref().map(Arc::clone), - eviction_notification_mode.clone(), + eviction_listener_conf.clone(), time_to_live, time_to_idle, invalidator_enabled, @@ -652,93 +652,109 @@ where #[cfg(test)] mod tests { use super::{ConcurrentCacheExt, SegmentedCache}; - use crate::notification::{EvictionNotificationMode, RemovalCause}; + use crate::notification::{ + self, + macros::{assert_eq_with_mode, assert_with_mode}, + DeliveryMode, RemovalCause, + }; use parking_lot::Mutex; use std::{sync::Arc, time::Duration}; #[test] fn basic_single_thread() { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - - // Create a cache with the eviction listener. - let mut cache = SegmentedCache::builder(1) - .max_capacity(3) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", "alice"); - cache.insert("b", "bob"); - assert_eq!(cache.get(&"a"), Some("alice")); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert_eq!(cache.get(&"b"), Some("bob")); - cache.sync(); - // counts: a -> 1, b -> 1 - - cache.insert("c", "cindy"); - assert_eq!(cache.get(&"c"), Some("cindy")); - assert!(cache.contains_key(&"c")); - // counts: a -> 1, b -> 1, c -> 1 - cache.sync(); - - assert!(cache.contains_key(&"a")); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - assert!(cache.contains_key(&"b")); - cache.sync(); - // counts: a -> 2, b -> 2, c -> 1 - - // "d" should not be admitted because its frequency is too low. - cache.insert("d", "david"); // count: d -> 0 - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 1 - assert!(!cache.contains_key(&"d")); - - cache.insert("d", "david"); - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert!(!cache.contains_key(&"d")); - assert_eq!(cache.get(&"d"), None); // d -> 2 - - // "d" should be admitted and "c" should be evicted - // because d's frequency is higher than c's. - cache.insert("d", "dennis"); - expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - assert_eq!(cache.get(&"c"), None); - assert_eq!(cache.get(&"d"), Some("dennis")); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert!(!cache.contains_key(&"c")); - assert!(cache.contains_key(&"d")); - - cache.invalidate(&"b"); - expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); - cache.sync(); - assert_eq!(cache.get(&"b"), None); - assert!(!cache.contains_key(&"b")); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); + + fn run_test(delivery_mode: DeliveryMode) { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(1) + .max_capacity(3) + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice"); + cache.insert("b", "bob"); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + cache.sync(); + // counts: a -> 1, b -> 1 + + cache.insert("c", "cindy"); + assert_eq_with_mode!(cache.get(&"c"), Some("cindy"), delivery_mode); + assert_with_mode!(cache.contains_key(&"c"), delivery_mode); + // counts: a -> 1, b -> 1, c -> 1 + cache.sync(); + + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + cache.sync(); + // counts: a -> 2, b -> 2, c -> 1 + + // "d" should not be admitted because its frequency is too low. + cache.insert("d", "david"); // count: d -> 0 + expected.push((Arc::new("d"), "david", RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 1 + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + cache.insert("d", "david"); + expected.push((Arc::new("d"), "david", RemovalCause::Size)); + cache.sync(); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 2 + + // "d" should be admitted and "c" should be evicted + // because d's frequency is higher than c's. + cache.insert("d", "dennis"); + expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + assert_eq_with_mode!(cache.get(&"c"), None, delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), Some("dennis"), delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); + assert_with_mode!(cache.contains_key(&"d"), delivery_mode); + + cache.invalidate(&"b"); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); + assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } } } @@ -763,131 +779,143 @@ mod tests { #[test] fn size_aware_eviction() { - let weigher = |_k: &&str, v: &(&str, u32)| v.1; - - let alice = ("alice", 10); - let bob = ("bob", 15); - let bill = ("bill", 20); - let cindy = ("cindy", 5); - let david = ("david", 15); - let dennis = ("dennis", 15); - - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - - // Create a cache with the eviction listener. - let mut cache = SegmentedCache::builder(1) - .max_capacity(31) - .weigher(weigher) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", alice); - cache.insert("b", bob); - assert_eq!(cache.get(&"a"), Some(alice)); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert_eq!(cache.get(&"b"), Some(bob)); - cache.sync(); - // order (LRU -> MRU) and counts: a -> 1, b -> 1 - - cache.insert("c", cindy); - assert_eq!(cache.get(&"c"), Some(cindy)); - assert!(cache.contains_key(&"c")); - // order and counts: a -> 1, b -> 1, c -> 1 - cache.sync(); - - assert!(cache.contains_key(&"a")); - assert_eq!(cache.get(&"a"), Some(alice)); - assert_eq!(cache.get(&"b"), Some(bob)); - assert!(cache.contains_key(&"b")); - cache.sync(); - // order and counts: c -> 1, a -> 2, b -> 2 - - // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). - // "d" must have higher count than 3, which is the aggregated count - // of "a" and "c". - cache.insert("d", david); // count: d -> 0 - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 1 - assert!(!cache.contains_key(&"d")); - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert!(!cache.contains_key(&"d")); - assert_eq!(cache.get(&"d"), None); // d -> 2 - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 3 - assert!(!cache.contains_key(&"d")); - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert!(!cache.contains_key(&"d")); - assert_eq!(cache.get(&"d"), None); // d -> 4 - - // Finally "d" should be admitted by evicting "c" and "a". - cache.insert("d", dennis); - expected.push((Arc::new("c"), cindy, RemovalCause::Size)); - expected.push((Arc::new("a"), alice, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), Some(bob)); - assert_eq!(cache.get(&"c"), None); - assert_eq!(cache.get(&"d"), Some(dennis)); - assert!(!cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert!(!cache.contains_key(&"c")); - assert!(cache.contains_key(&"d")); - - // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). - cache.insert("b", bill); - expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); - expected.push((Arc::new("d"), dennis, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"b"), Some(bill)); - assert_eq!(cache.get(&"d"), None); - assert!(cache.contains_key(&"b")); - assert!(!cache.contains_key(&"d")); - - // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). - cache.insert("a", alice); - cache.insert("b", bob); - expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); - cache.sync(); - assert_eq!(cache.get(&"a"), Some(alice)); - assert_eq!(cache.get(&"b"), Some(bob)); - assert_eq!(cache.get(&"d"), None); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert!(!cache.contains_key(&"d")); - - // Verify the sizes. - assert_eq!(cache.entry_count(), 2); - assert_eq!(cache.weighted_size(), 25); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); + + fn run_test(delivery_mode: DeliveryMode) { + let weigher = |_k: &&str, v: &(&str, u32)| v.1; + + let alice = ("alice", 10); + let bob = ("bob", 15); + let bill = ("bill", 20); + let cindy = ("cindy", 5); + let david = ("david", 15); + let dennis = ("dennis", 15); + + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(1) + .max_capacity(31) + .weigher(weigher) + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", alice); + cache.insert("b", bob); + assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); + cache.sync(); + // order (LRU -> MRU) and counts: a -> 1, b -> 1 + + cache.insert("c", cindy); + assert_eq_with_mode!(cache.get(&"c"), Some(cindy), delivery_mode); + assert_with_mode!(cache.contains_key(&"c"), delivery_mode); + // order and counts: a -> 1, b -> 1, c -> 1 + cache.sync(); + + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + cache.sync(); + // order and counts: c -> 1, a -> 2, b -> 2 + + // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). + // "d" must have higher count than 3, which is the aggregated count + // of "a" and "c". + cache.insert("d", david); // count: d -> 0 + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 1 + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.sync(); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 2 + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 3 + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.sync(); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 4 + + // Finally "d" should be admitted by evicting "c" and "a". + cache.insert("d", dennis); + expected.push((Arc::new("c"), cindy, RemovalCause::Size)); + expected.push((Arc::new("a"), alice, RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); + assert_eq_with_mode!(cache.get(&"c"), None, delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), Some(dennis), delivery_mode); + assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); + assert_with_mode!(cache.contains_key(&"d"), delivery_mode); + + // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). + cache.insert("b", bill); + expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); + expected.push((Arc::new("d"), dennis, RemovalCause::Size)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"b"), Some(bill), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). + cache.insert("a", alice); + cache.insert("b", bob); + expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); + cache.sync(); + assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); + + // Verify the sizes. + assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); + assert_eq_with_mode!(cache.weighted_size(), 25, delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, &expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } } } @@ -928,184 +956,212 @@ mod tests { #[test] fn invalidate_all() { - use std::collections::HashMap; - - // The following `HashMap`s will hold actual and expected notifications. - // Note: We use `HashMap` here as the order of invalidations is non-deterministic. - let actual = Arc::new(Mutex::new(HashMap::new())); - let mut expected = HashMap::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| { - a1.lock().insert(k, (v, cause)); - }; - - // Create a cache with the eviction listener. - let mut cache = SegmentedCache::builder(4) - .max_capacity(100) - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", "alice"); - cache.insert("b", "bob"); - cache.insert("c", "cindy"); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - assert_eq!(cache.get(&"c"), Some("cindy")); - assert!(cache.contains_key(&"a")); - assert!(cache.contains_key(&"b")); - assert!(cache.contains_key(&"c")); - cache.sync(); - - cache.invalidate_all(); - expected.insert(Arc::new("a"), ("alice", RemovalCause::Explicit)); - expected.insert(Arc::new("b"), ("bob", RemovalCause::Explicit)); - expected.insert(Arc::new("c"), ("cindy", RemovalCause::Explicit)); - cache.sync(); - - cache.insert("d", "david"); - cache.sync(); - - assert!(cache.get(&"a").is_none()); - assert!(cache.get(&"b").is_none()); - assert!(cache.get(&"c").is_none()); - assert_eq!(cache.get(&"d"), Some("david")); - assert!(!cache.contains_key(&"a")); - assert!(!cache.contains_key(&"b")); - assert!(!cache.contains_key(&"c")); - assert!(cache.contains_key(&"d")); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for actual_key in actual.keys() { - assert_eq!( - actual.get(actual_key), - expected.get(actual_key), - "expected[{}]", - actual_key - ); + run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Queued); + + fn run_test(delivery_mode: DeliveryMode) { + use std::collections::HashMap; + + // The following `HashMap`s will hold actual and expected notifications. + // Note: We use `HashMap` here as the order of invalidations is non-deterministic. + let actual = Arc::new(Mutex::new(HashMap::new())); + let mut expected = HashMap::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| { + a1.lock().insert(k, (v, cause)); + }; + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(4) + .max_capacity(100) + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice"); + cache.insert("b", "bob"); + cache.insert("c", "cindy"); + assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); + assert_eq_with_mode!(cache.get(&"c"), Some("cindy"), delivery_mode); + assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(cache.contains_key(&"c"), delivery_mode); + cache.sync(); + + cache.invalidate_all(); + expected.insert(Arc::new("a"), ("alice", RemovalCause::Explicit)); + expected.insert(Arc::new("b"), ("bob", RemovalCause::Explicit)); + expected.insert(Arc::new("c"), ("cindy", RemovalCause::Explicit)); + cache.sync(); + + cache.insert("d", "david"); + cache.sync(); + + assert_with_mode!(cache.get(&"a").is_none(), delivery_mode); + assert_with_mode!(cache.get(&"b").is_none(), delivery_mode); + assert_with_mode!(cache.get(&"c").is_none(), delivery_mode); + assert_eq_with_mode!(cache.get(&"d"), Some("david"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); + assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); + assert_with_mode!(cache.contains_key(&"d"), delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for actual_key in actual.keys() { + assert_eq!( + actual.get(actual_key), + expected.get(actual_key), + "expected[{}] (delivery mode: {:?})", + actual_key, + delivery_mode + ); + } } } #[test] fn invalidate_entries_if() -> Result<(), Box> { - use std::collections::{HashMap, HashSet}; + run_test(DeliveryMode::Direct)?; + run_test(DeliveryMode::Queued)?; + + fn run_test(delivery_mode: DeliveryMode) -> Result<(), Box> { + use std::collections::{HashMap, HashSet}; + + const SEGMENTS: usize = 4; + + // The following `HashMap`s will hold actual and expected notifications. + // Note: We use `HashMap` here as the order of invalidations is non-deterministic. + let actual = Arc::new(Mutex::new(HashMap::new())); + let mut expected = HashMap::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| { + a1.lock().insert(k, (v, cause)); + }; + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(SEGMENTS) + .max_capacity(100) + .support_invalidation_closures() + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + let mut mock = cache.create_mock_expiration_clock(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert(0, "alice"); + cache.insert(1, "bob"); + cache.insert(2, "alex"); + cache.sync(); + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.sync(); + + assert_eq_with_mode!(cache.get(&0), Some("alice"), delivery_mode); + assert_eq_with_mode!(cache.get(&1), Some("bob"), delivery_mode); + assert_eq_with_mode!(cache.get(&2), Some("alex"), delivery_mode); + assert_with_mode!(cache.contains_key(&0), delivery_mode); + assert_with_mode!(cache.contains_key(&1), delivery_mode); + assert_with_mode!(cache.contains_key(&2), delivery_mode); + + let names = ["alice", "alex"].iter().cloned().collect::>(); + cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; + assert_eq_with_mode!( + cache.invalidation_predicate_count(), + SEGMENTS, + delivery_mode + ); + expected.insert(Arc::new(0), ("alice", RemovalCause::Explicit)); + expected.insert(Arc::new(2), ("alex", RemovalCause::Explicit)); - const SEGMENTS: usize = 4; + mock.increment(Duration::from_secs(5)); // 10 secs from the start. - // The following `HashMap`s will hold actual and expected notifications. - // Note: We use `HashMap` here as the order of invalidations is non-deterministic. - let actual = Arc::new(Mutex::new(HashMap::new())); - let mut expected = HashMap::new(); + cache.insert(3, "alice"); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| { - a1.lock().insert(k, (v, cause)); - }; + // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) + cache.sync(); // To submit the invalidation task. + std::thread::sleep(Duration::from_millis(200)); + cache.sync(); // To process the task result. + std::thread::sleep(Duration::from_millis(200)); - // Create a cache with the eviction listener. - let mut cache = SegmentedCache::builder(SEGMENTS) - .max_capacity(100) - .support_invalidation_closures() - .eviction_listener(listener, EvictionNotificationMode::NonBlocking) - .build(); - cache.reconfigure_for_testing(); + assert_with_mode!(cache.get(&0).is_none(), delivery_mode); + assert_with_mode!(cache.get(&2).is_none(), delivery_mode); + assert_eq_with_mode!(cache.get(&1), Some("bob"), delivery_mode); + // This should survive as it was inserted after calling invalidate_entries_if. + assert_eq_with_mode!(cache.get(&3), Some("alice"), delivery_mode); - let mut mock = cache.create_mock_expiration_clock(); + assert_with_mode!(!cache.contains_key(&0), delivery_mode); + assert_with_mode!(cache.contains_key(&1), delivery_mode); + assert_with_mode!(!cache.contains_key(&2), delivery_mode); + assert_with_mode!(cache.contains_key(&3), delivery_mode); - // Make the cache exterior immutable. - let cache = cache; + assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); + assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); - cache.insert(0, "alice"); - cache.insert(1, "bob"); - cache.insert(2, "alex"); - cache.sync(); - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 15 secs from the start. - assert_eq!(cache.get(&0), Some("alice")); - assert_eq!(cache.get(&1), Some("bob")); - assert_eq!(cache.get(&2), Some("alex")); - assert!(cache.contains_key(&0)); - assert!(cache.contains_key(&1)); - assert!(cache.contains_key(&2)); - - let names = ["alice", "alex"].iter().cloned().collect::>(); - cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; - assert_eq!(cache.invalidation_predicate_count(), SEGMENTS); - expected.insert(Arc::new(0), ("alice", RemovalCause::Explicit)); - expected.insert(Arc::new(2), ("alex", RemovalCause::Explicit)); - - mock.increment(Duration::from_secs(5)); // 10 secs from the start. - - cache.insert(3, "alice"); - - // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. - std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. - std::thread::sleep(Duration::from_millis(200)); - - assert!(cache.get(&0).is_none()); - assert!(cache.get(&2).is_none()); - assert_eq!(cache.get(&1), Some("bob")); - // This should survive as it was inserted after calling invalidate_entries_if. - assert_eq!(cache.get(&3), Some("alice")); - - assert!(!cache.contains_key(&0)); - assert!(cache.contains_key(&1)); - assert!(!cache.contains_key(&2)); - assert!(cache.contains_key(&3)); - - assert_eq!(cache.entry_count(), 2); - assert_eq!(cache.invalidation_predicate_count(), 0); - - mock.increment(Duration::from_secs(5)); // 15 secs from the start. - - cache.invalidate_entries_if(|_k, &v| v == "alice")?; - cache.invalidate_entries_if(|_k, &v| v == "bob")?; - assert_eq!(cache.invalidation_predicate_count(), SEGMENTS * 2); - expected.insert(Arc::new(1), ("bob", RemovalCause::Explicit)); - expected.insert(Arc::new(3), ("alice", RemovalCause::Explicit)); - - // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. - std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. - std::thread::sleep(Duration::from_millis(200)); - - assert!(cache.get(&1).is_none()); - assert!(cache.get(&3).is_none()); - - assert!(!cache.contains_key(&1)); - assert!(!cache.contains_key(&3)); - - assert_eq!(cache.entry_count(), 0); - assert_eq!(cache.invalidation_predicate_count(), 0); - - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for actual_key in actual.keys() { - assert_eq!( - actual.get(actual_key), - expected.get(actual_key), - "expected[{}]", - actual_key + cache.invalidate_entries_if(|_k, &v| v == "alice")?; + cache.invalidate_entries_if(|_k, &v| v == "bob")?; + assert_eq_with_mode!( + cache.invalidation_predicate_count(), + SEGMENTS * 2, + delivery_mode ); + expected.insert(Arc::new(1), ("bob", RemovalCause::Explicit)); + expected.insert(Arc::new(3), ("alice", RemovalCause::Explicit)); + + // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) + cache.sync(); // To submit the invalidation task. + std::thread::sleep(Duration::from_millis(200)); + cache.sync(); // To process the task result. + std::thread::sleep(Duration::from_millis(200)); + + assert_with_mode!(cache.get(&1).is_none(), delivery_mode); + assert_with_mode!(cache.get(&3).is_none(), delivery_mode); + + assert_with_mode!(!cache.contains_key(&1), delivery_mode); + assert_with_mode!(!cache.contains_key(&3), delivery_mode); + + assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); + assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); + + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_secs(1)); + + // Verify the notifications. + let actual = &*actual.lock(); + assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); + for actual_key in actual.keys() { + assert_eq!( + actual.get(actual_key), + expected.get(actual_key), + "expected[{}] (delivery mode: {:?})", + actual_key, + delivery_mode + ); + } + + Ok(()) } Ok(()) diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 01b8e6ba..8d1f310a 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -26,7 +26,7 @@ use crate::{ time::{CheckedTimeOps, Clock, Instant}, CacheRegion, }, - notification::{EvictionListener, EvictionNotificationMode, RemovalCause}, + notification::{self, EvictionListener, RemovalCause}, sync_base::removal_notifier::RemovalNotifier, Policy, PredicateError, }; @@ -144,7 +144,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_notification_mode: Option, + eviction_listener_conf: Option, time_to_live: Option, time_to_idle: Option, invalidator_enabled: bool, @@ -157,7 +157,7 @@ where build_hasher, weigher, eviction_listener, - eviction_notification_mode, + eviction_listener_conf, r_rcv, w_rcv, time_to_live, @@ -792,7 +792,7 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_notification_mode: Option, + eviction_listener_conf: Option, read_op_ch: Receiver>, write_op_ch: Receiver>, time_to_live: Option, @@ -809,7 +809,7 @@ where build_hasher.clone(), ); let (removal_notifier, key_locks) = if let Some(listener) = eviction_listener { - let rn = RemovalNotifier::new(listener, eviction_notification_mode.unwrap_or_default()); + let rn = RemovalNotifier::new(listener, eviction_listener_conf.unwrap_or_default()); if rn.is_blocking() { let kl = KeyLockMap::with_hasher(build_hasher.clone()); (Some(rn), Some(kl)) diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index 07ba443f..0e36f50a 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -8,7 +8,7 @@ use std::{ use crate::{ common::concurrent::thread_pool::{PoolName, ThreadPool, ThreadPoolRegistry}, - notification::{EvictionListener, EvictionListenerRef, EvictionNotificationMode, RemovalCause}, + notification::{self, DeliveryMode, EvictionListener, EvictionListenerRef, RemovalCause}, }; use crossbeam_channel::{Receiver, Sender}; @@ -25,14 +25,10 @@ pub(crate) enum RemovalNotifier { } impl RemovalNotifier { - pub(crate) fn new(listener: EvictionListener, mode: EvictionNotificationMode) -> Self { - match mode { - EvictionNotificationMode::Blocking => { - Self::Blocking(BlockingRemovalNotifier::new(listener)) - } - EvictionNotificationMode::NonBlocking => { - Self::ThreadPool(ThreadPoolRemovalNotifier::new(listener)) - } + pub(crate) fn new(listener: EvictionListener, conf: notification::Configuration) -> Self { + match conf.delivery_mode() { + DeliveryMode::Direct => Self::Blocking(BlockingRemovalNotifier::new(listener)), + DeliveryMode::Queued => Self::ThreadPool(ThreadPoolRemovalNotifier::new(listener)), } } From fac2a00798ea8d9ced9725f34bed793fde7e2205 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 21 Jun 2022 07:54:56 +0800 Subject: [PATCH 22/44] Support notification on eviction Rename `DeliveryMode::Direct` to `DeliveryMode::Immediate`. --- src/notification.rs | 6 +++--- src/sync/cache.rs | 16 ++++++++-------- src/sync/segment.rs | 8 ++++---- src/sync_base/base_cache.rs | 5 ++++- src/sync_base/removal_notifier.rs | 2 +- 5 files changed, 20 insertions(+), 17 deletions(-) diff --git a/src/notification.rs b/src/notification.rs index 78d59860..c632fd77 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -44,13 +44,13 @@ impl ConfigurationBuilder { #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum DeliveryMode { - Direct, + Immediate, Queued, } impl Default for DeliveryMode { fn default() -> Self { - Self::Direct + Self::Immediate } } @@ -73,7 +73,7 @@ impl RemovalCause { } } -#[cfg(test)] +#[cfg(all(test, feature = "sync"))] pub(crate) mod macros { macro_rules! assert_with_mode { diff --git a/src/sync/cache.rs b/src/sync/cache.rs index badca9a1..2e5e476c 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1041,7 +1041,7 @@ mod tests { #[test] fn basic_single_thread() { - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { @@ -1139,7 +1139,7 @@ mod tests { #[test] fn size_aware_eviction() { - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { @@ -1308,7 +1308,7 @@ mod tests { #[test] fn invalidate_all() { - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { @@ -1380,7 +1380,7 @@ mod tests { #[test] fn invalidate_entries_if() -> Result<(), Box> { - run_test(DeliveryMode::Direct)?; + run_test(DeliveryMode::Immediate)?; run_test(DeliveryMode::Queued)?; fn run_test(delivery_mode: DeliveryMode) -> Result<(), Box> { @@ -1502,7 +1502,7 @@ mod tests { #[test] fn time_to_live() { - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { @@ -1604,7 +1604,7 @@ mod tests { #[test] fn time_to_idle() { - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { @@ -2212,7 +2212,7 @@ mod tests { // - time_to_live // - time_to_idle - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { @@ -2291,7 +2291,7 @@ mod tests { #[test] fn test_removal_notifications_with_updates() { - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 5fcbc2e2..9c910416 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -662,7 +662,7 @@ mod tests { #[test] fn basic_single_thread() { - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { @@ -779,7 +779,7 @@ mod tests { #[test] fn size_aware_eviction() { - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { @@ -956,7 +956,7 @@ mod tests { #[test] fn invalidate_all() { - run_test(DeliveryMode::Direct); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { @@ -1035,7 +1035,7 @@ mod tests { #[test] fn invalidate_entries_if() -> Result<(), Box> { - run_test(DeliveryMode::Direct)?; + run_test(DeliveryMode::Immediate)?; run_test(DeliveryMode::Queued)?; fn run_test(delivery_mode: DeliveryMode) -> Result<(), Box> { diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 8d1f310a..cdb73246 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -102,6 +102,7 @@ impl BaseCache { } #[inline] + #[cfg(feature = "sync")] pub(crate) fn is_blocking_removal_notification(&self) -> bool { self.inner.is_blocking_removal_notification() } @@ -240,6 +241,7 @@ where } } + #[cfg(feature = "sync")] pub(crate) fn get_key_with_hash(&self, key: &Q, hash: u64) -> Option> where Arc: Borrow, @@ -690,7 +692,8 @@ impl Inner { } #[inline] - pub(crate) fn is_blocking_removal_notification(&self) -> bool { + #[cfg(feature = "sync")] + fn is_blocking_removal_notification(&self) -> bool { self.removal_notifier .as_ref() .map(|rn| rn.is_blocking()) diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index 0e36f50a..3ed85181 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -27,7 +27,7 @@ pub(crate) enum RemovalNotifier { impl RemovalNotifier { pub(crate) fn new(listener: EvictionListener, conf: notification::Configuration) -> Self { match conf.delivery_mode() { - DeliveryMode::Direct => Self::Blocking(BlockingRemovalNotifier::new(listener)), + DeliveryMode::Immediate => Self::Blocking(BlockingRemovalNotifier::new(listener)), DeliveryMode::Queued => Self::ThreadPool(ThreadPoolRemovalNotifier::new(listener)), } } From e8f75ac334948cc8ea19cf82475e60037b02b000 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 21 Jun 2022 09:30:07 +0800 Subject: [PATCH 23/44] Support notification on eviction Try to stabilize flaky tests caused by slow QEMU VMs. --- src/future/cache.rs | 112 ++++++--------- src/sync/cache.rs | 323 ++++++++++++++++++++------------------------ src/sync/segment.rs | 149 ++++++++++++-------- 3 files changed, 279 insertions(+), 305 deletions(-) diff --git a/src/future/cache.rs b/src/future/cache.rs index 13c271f9..1d571d62 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1140,6 +1140,8 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); + // We use non-async mutex in the eviction listener (because the listener + // is a regular closure). let listener = move |k, v, cause| a1.lock().push((k, v, cause)); // Create a cache with the eviction listener. @@ -1207,15 +1209,7 @@ mod tests { assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"b")); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); - } + verify_notification_vec(actual, &expected); } #[test] @@ -1384,15 +1378,7 @@ mod tests { assert_eq!(cache.entry_count(), 2); assert_eq!(cache.weighted_size(), 25); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); - } + verify_notification_vec(actual, &expected); } #[tokio::test] @@ -1478,15 +1464,7 @@ mod tests { assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); - } + verify_notification_vec(actual, &expected); } #[tokio::test] @@ -1584,15 +1562,7 @@ mod tests { assert_eq!(cache.entry_count(), 0); assert_eq!(cache.invalidation_predicate_count(), 0); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); - } + verify_notification_vec(actual, &expected); Ok(()) } @@ -1676,15 +1646,7 @@ mod tests { cache.sync(); assert!(cache.is_table_empty()); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); - } + verify_notification_vec(actual, &expected); } #[tokio::test] @@ -1763,15 +1725,7 @@ mod tests { cache.sync(); assert!(cache.is_table_empty()); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); - } + verify_notification_vec(actual, &expected); } #[tokio::test] @@ -2391,15 +2345,7 @@ mod tests { cache.sync(); assert_eq!(cache.entry_count(), 3); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); - } + verify_notification_vec(actual, &expected); } #[tokio::test] @@ -2491,14 +2437,40 @@ mod tests { cache.sync(); assert_eq!(cache.entry_count(), 0); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); + verify_notification_vec(actual, &expected); + } + + type NotificationTuple = (Arc, V, RemovalCause); + + fn verify_notification_vec( + actual: Arc>>>, + expected: &[NotificationTuple], + ) where + K: Eq + std::fmt::Debug, + V: Eq + std::fmt::Debug, + { + // Retries will be needed when testing in a QEMU VM. + const MAX_RETRIES: usize = 5; + let mut retries = 0; + loop { + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_millis(500)); + + let actual = &*actual.lock(); + if actual.len() != expected.len() { + if retries <= MAX_RETRIES { + retries += 1; + continue; + } else { + assert_eq!(actual.len(), expected.len(), "Retries exhausted"); + } + } + + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!(actual, expected, "expected[{}]", i); + } - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq!(actual.len(), expected.len()); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!(actual, &expected, "expected[{}]", i); + break; } } diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 2e5e476c..c6a092ac 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1121,19 +1121,7 @@ mod tests { assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + verify_notification_vec(actual, &expected, delivery_mode); } } @@ -1263,19 +1251,7 @@ mod tests { assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); assert_eq_with_mode!(cache.weighted_size(), 25, delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + verify_notification_vec(actual, &expected, delivery_mode); } } @@ -1362,19 +1338,7 @@ mod tests { assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + verify_notification_vec(actual, &expected, delivery_mode); } } @@ -1480,19 +1444,7 @@ mod tests { assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + verify_notification_vec(actual, &expected, delivery_mode); Ok(()) } @@ -1586,19 +1538,7 @@ mod tests { cache.sync(); assert_with_mode!(cache.is_table_empty(), delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + verify_notification_vec(actual, &expected, delivery_mode); } } @@ -1685,19 +1625,7 @@ mod tests { cache.sync(); assert_with_mode!(cache.is_table_empty(), delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + verify_notification_vec(actual, &expected, delivery_mode); } } @@ -2273,131 +2201,124 @@ mod tests { cache.sync(); assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + verify_notification_vec(actual, &expected, delivery_mode); } } #[test] - fn test_removal_notifications_with_updates() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); + fn test_immediate_removal_notifications_with_updates() { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener_conf = notification::Configuration::builder() + .delivery_mode(DeliveryMode::Immediate) + .build(); - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // Create a cache with the eviction listener and also TTL and TTI. + let mut cache = Cache::builder() + .eviction_listener_with_conf(listener, listener_conf) + .time_to_live(Duration::from_secs(7)) + .time_to_idle(Duration::from_secs(5)) + .build(); + cache.reconfigure_for_testing(); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); - // Create a cache with the eviction listener and also TTL and TTI. - let mut cache = Cache::builder() - .eviction_listener_with_conf(listener, listener_conf) - .time_to_live(Duration::from_secs(7)) - .time_to_idle(Duration::from_secs(5)) - .build(); - cache.reconfigure_for_testing(); + // Make the cache exterior immutable. + let cache = cache; - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + cache.insert("alice", "a0"); + cache.sync(); - // Make the cache exterior immutable. - let cache = cache; + // Now alice (a0) has been expired by the idle timeout (TTI). + mock.increment(Duration::from_secs(6)); + assert_eq!(cache.get(&"alice"), None); - cache.insert("alice", "a0"); - cache.sync(); + // We have not ran sync after the expiration of alice (a0), so it is + // still in the cache. + assert_eq!(cache.entry_count(), 1); - // Now alice (a0) has been expired by the idle timeout (TTI). - mock.increment(Duration::from_secs(6)); - expected.push((Arc::new("alice"), "a0", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"alice"), None, delivery_mode); + // Re-insert alice with a different value. Since alice (a0) is still + // in the cache, this is actually a replace operation rather than an + // insert operation. We want to verify that the RemovalCause of a0 is + // Expired, not Replaced. + cache.insert("alice", "a1"); + { + let mut a = actual.lock(); + assert_eq!(a.len(), 1); + assert_eq!(a[0], (Arc::new("alice"), "a0", RemovalCause::Expired)); + a.clear(); + } - // We have not ran sync after the expiration of alice (a0), so it is - // still in the cache. - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + cache.sync(); - // Re-insert alice with a different value. Since alice (a0) is still - // in the cache, this is actually a replace operation rather than an - // insert operation. We want to verify that the RemovalCause of a0 is - // Expired, not Replaced. - cache.insert("alice", "a1"); - cache.sync(); + mock.increment(Duration::from_secs(4)); + assert_eq!(cache.get(&"alice"), Some("a1")); + cache.sync(); - mock.increment(Duration::from_secs(4)); - assert_eq_with_mode!(cache.get(&"alice"), Some("a1"), delivery_mode); - cache.sync(); + // Now alice has been expired by time-to-live (TTL). + mock.increment(Duration::from_secs(4)); + assert_eq!(cache.get(&"alice"), None); - // Now alice has been expired by time-to-live (TTL). - mock.increment(Duration::from_secs(4)); - expected.push((Arc::new("alice"), "a1", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"alice"), None, delivery_mode); + // But, again, it is still in the cache. + assert_eq!(cache.entry_count(), 1); - // But, again, it is still in the cache. - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + // Re-insert alice with a different value and verify that the + // RemovalCause of a1 is Expired (not Replaced). + cache.insert("alice", "a2"); + { + let mut a = actual.lock(); + assert_eq!(a.len(), 1); + assert_eq!(a[0], (Arc::new("alice"), "a1", RemovalCause::Expired)); + a.clear(); + } - // Re-insert alice with a different value and verify that the - // RemovalCause of a1 is Expired (not Replaced). - cache.insert("alice", "a2"); - cache.sync(); + cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + assert_eq!(cache.entry_count(), 1); - // Now alice (a2) has been expired by the idle timeout. - mock.increment(Duration::from_secs(6)); - expected.push((Arc::new("alice"), "a2", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"alice"), None, delivery_mode); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + // Now alice (a2) has been expired by the idle timeout. + mock.increment(Duration::from_secs(6)); + assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.entry_count(), 1); - // This invalidate will internally remove alice (a2). - cache.invalidate(&"alice"); - cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); + // This invalidate will internally remove alice (a2). + cache.invalidate(&"alice"); + cache.sync(); + assert_eq!(cache.entry_count(), 0); - // Re-insert, and this time, make it expired by the TTL. - cache.insert("alice", "a3"); - cache.sync(); - mock.increment(Duration::from_secs(4)); - assert_eq_with_mode!(cache.get(&"alice"), Some("a3"), delivery_mode); - cache.sync(); - mock.increment(Duration::from_secs(4)); - expected.push((Arc::new("alice"), "a3", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"alice"), None, delivery_mode); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + { + let mut a = actual.lock(); + assert_eq!(a.len(), 1); + assert_eq!(a[0], (Arc::new("alice"), "a2", RemovalCause::Expired)); + a.clear(); + } - // This invalidate will internally remove alice (a2). - cache.invalidate(&"alice"); - cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); + // Re-insert, and this time, make it expired by the TTL. + cache.insert("alice", "a3"); + cache.sync(); + mock.increment(Duration::from_secs(4)); + assert_eq!(cache.get(&"alice"), Some("a3")); + cache.sync(); + mock.increment(Duration::from_secs(4)); + assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.entry_count(), 1); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); + // This invalidate will internally remove alice (a2). + cache.invalidate(&"alice"); + cache.sync(); - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + assert_eq!(cache.entry_count(), 0); + + { + let mut a = actual.lock(); + assert_eq!(a.len(), 1); + assert_eq!(a[0], (Arc::new("alice"), "a3", RemovalCause::Expired)); + a.clear(); } } @@ -2415,4 +2336,48 @@ mod tests { assert!(debug_str.contains(r#"'c': "cindy""#)); assert!(debug_str.ends_with('}')); } + + type NotificationTuple = (Arc, V, RemovalCause); + + fn verify_notification_vec( + actual: Arc>>>, + expected: &[NotificationTuple], + delivery_mode: DeliveryMode, + ) where + K: Eq + std::fmt::Debug, + V: Eq + std::fmt::Debug, + { + // Retries will be needed when testing in a QEMU VM. + const MAX_RETRIES: usize = 5; + let mut retries = 0; + loop { + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_millis(500)); + + let actual = &*actual.lock(); + if actual.len() != expected.len() { + if retries <= MAX_RETRIES { + retries += 1; + continue; + } else { + assert_eq!( + actual.len(), + expected.len(), + "Retries exhausted (delivery mode: {:?})", + delivery_mode + ); + } + } + + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } + + break; + } + } } diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 9c910416..571e4f67 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -742,19 +742,7 @@ mod tests { assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + verify_notification_vec(actual, &expected, delivery_mode); } } @@ -903,19 +891,7 @@ mod tests { assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); assert_eq_with_mode!(cache.weighted_size(), 25, delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, &expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); - } + verify_notification_vec(actual, &expected, delivery_mode); } } @@ -1015,21 +991,7 @@ mod tests { assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for actual_key in actual.keys() { - assert_eq!( - actual.get(actual_key), - expected.get(actual_key), - "expected[{}] (delivery mode: {:?})", - actual_key, - delivery_mode - ); - } + verify_notification_map(actual, &expected, delivery_mode); } } @@ -1145,21 +1107,7 @@ mod tests { assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); - // Ensure all scheduled notifications have been processed. - std::thread::sleep(Duration::from_secs(1)); - - // Verify the notifications. - let actual = &*actual.lock(); - assert_eq_with_mode!(actual.len(), expected.len(), delivery_mode); - for actual_key in actual.keys() { - assert_eq!( - actual.get(actual_key), - expected.get(actual_key), - "expected[{}] (delivery mode: {:?})", - actual_key, - delivery_mode - ); - } + verify_notification_map(actual, &expected, delivery_mode); Ok(()) } @@ -1633,4 +1581,93 @@ mod tests { assert!(debug_str.contains(r#"'c': "cindy""#)); assert!(debug_str.ends_with('}')); } + + type NotificationPair = (V, RemovalCause); + type NotificationTriple = (Arc, V, RemovalCause); + + fn verify_notification_vec( + actual: Arc>>>, + expected: &[NotificationTriple], + delivery_mode: DeliveryMode, + ) where + K: Eq + std::fmt::Debug, + V: Eq + std::fmt::Debug, + { + // Retries will be needed when testing in a QEMU VM. + const MAX_RETRIES: usize = 5; + let mut retries = 0; + loop { + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_millis(500)); + + let actual = &*actual.lock(); + if actual.len() != expected.len() { + if retries <= MAX_RETRIES { + retries += 1; + continue; + } else { + assert_eq!( + actual.len(), + expected.len(), + "Retries exhausted (delivery mode: {:?})", + delivery_mode + ); + } + } + + for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { + assert_eq!( + actual, expected, + "expected[{}] (delivery mode: {:?})", + i, delivery_mode + ); + } + + break; + } + } + + fn verify_notification_map( + actual: Arc, NotificationPair>>>, + expected: &std::collections::HashMap, NotificationPair>, + delivery_mode: DeliveryMode, + ) where + K: Eq + std::hash::Hash + std::fmt::Display, + V: Eq + std::fmt::Debug, + { + // Retries will be needed when testing in a QEMU VM. + const MAX_RETRIES: usize = 5; + let mut retries = 0; + loop { + // Ensure all scheduled notifications have been processed. + std::thread::sleep(Duration::from_millis(500)); + + let actual = &*actual.lock(); + if actual.len() != expected.len() { + if retries <= MAX_RETRIES { + retries += 1; + continue; + } else { + assert_eq!( + actual.len(), + expected.len(), + "Retries exhausted (delivery mode: {:?})", + delivery_mode + ); + } + } + + for actual_key in actual.keys() { + assert_eq!( + actual.get(actual_key), + expected.get(actual_key), + "expected[{}] (delivery mode: {:?})", + actual_key, + delivery_mode + ); + } + + break; + } + } } From 6cf464ac64e713e7c3f8e2518f7c8e07b4df2c4d Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 21 Jun 2022 09:47:28 +0800 Subject: [PATCH 24/44] Support notification on eviction Try to stabilize flaky tests caused by slow QEMU VMs. --- src/future/cache.rs | 25 ++++++++++++++----------- src/sync/cache.rs | 23 +++++++++++++---------- src/sync/segment.rs | 26 ++++++++++++++++---------- 3 files changed, 43 insertions(+), 31 deletions(-) diff --git a/src/future/cache.rs b/src/future/cache.rs index 1d571d62..235da758 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1209,7 +1209,7 @@ mod tests { assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"b")); - verify_notification_vec(actual, &expected); + verify_notification_vec(&cache, actual, &expected); } #[test] @@ -1378,7 +1378,7 @@ mod tests { assert_eq!(cache.entry_count(), 2); assert_eq!(cache.weighted_size(), 25); - verify_notification_vec(actual, &expected); + verify_notification_vec(&cache, actual, &expected); } #[tokio::test] @@ -1464,7 +1464,7 @@ mod tests { assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); - verify_notification_vec(actual, &expected); + verify_notification_vec(&cache, actual, &expected); } #[tokio::test] @@ -1562,7 +1562,7 @@ mod tests { assert_eq!(cache.entry_count(), 0); assert_eq!(cache.invalidation_predicate_count(), 0); - verify_notification_vec(actual, &expected); + verify_notification_vec(&cache, actual, &expected); Ok(()) } @@ -1646,7 +1646,7 @@ mod tests { cache.sync(); assert!(cache.is_table_empty()); - verify_notification_vec(actual, &expected); + verify_notification_vec(&cache, actual, &expected); } #[tokio::test] @@ -1725,7 +1725,7 @@ mod tests { cache.sync(); assert!(cache.is_table_empty()); - verify_notification_vec(actual, &expected); + verify_notification_vec(&cache, actual, &expected); } #[tokio::test] @@ -2345,7 +2345,7 @@ mod tests { cache.sync(); assert_eq!(cache.entry_count(), 3); - verify_notification_vec(actual, &expected); + verify_notification_vec(&cache, actual, &expected); } #[tokio::test] @@ -2437,17 +2437,19 @@ mod tests { cache.sync(); assert_eq!(cache.entry_count(), 0); - verify_notification_vec(actual, &expected); + verify_notification_vec(&cache, actual, &expected); } type NotificationTuple = (Arc, V, RemovalCause); - fn verify_notification_vec( + fn verify_notification_vec( + cache: &Cache, actual: Arc>>>, expected: &[NotificationTuple], ) where - K: Eq + std::fmt::Debug, - V: Eq + std::fmt::Debug, + K: std::hash::Hash + Eq + std::fmt::Debug + Send + Sync + 'static, + V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, + S: std::hash::BuildHasher + Clone + Send + Sync + 'static, { // Retries will be needed when testing in a QEMU VM. const MAX_RETRIES: usize = 5; @@ -2460,6 +2462,7 @@ mod tests { if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; + cache.sync(); continue; } else { assert_eq!(actual.len(), expected.len(), "Retries exhausted"); diff --git a/src/sync/cache.rs b/src/sync/cache.rs index c6a092ac..f269b8b1 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1121,7 +1121,7 @@ mod tests { assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - verify_notification_vec(actual, &expected, delivery_mode); + verify_notification_vec(&cache, actual, &expected, delivery_mode); } } @@ -1251,7 +1251,7 @@ mod tests { assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); assert_eq_with_mode!(cache.weighted_size(), 25, delivery_mode); - verify_notification_vec(actual, &expected, delivery_mode); + verify_notification_vec(&cache, actual, &expected, delivery_mode); } } @@ -1338,7 +1338,7 @@ mod tests { assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - verify_notification_vec(actual, &expected, delivery_mode); + verify_notification_vec(&cache, actual, &expected, delivery_mode); } } @@ -1444,7 +1444,7 @@ mod tests { assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); - verify_notification_vec(actual, &expected, delivery_mode); + verify_notification_vec(&cache, actual, &expected, delivery_mode); Ok(()) } @@ -1538,7 +1538,7 @@ mod tests { cache.sync(); assert_with_mode!(cache.is_table_empty(), delivery_mode); - verify_notification_vec(actual, &expected, delivery_mode); + verify_notification_vec(&cache, actual, &expected, delivery_mode); } } @@ -1625,7 +1625,7 @@ mod tests { cache.sync(); assert_with_mode!(cache.is_table_empty(), delivery_mode); - verify_notification_vec(actual, &expected, delivery_mode); + verify_notification_vec(&cache, actual, &expected, delivery_mode); } } @@ -2201,7 +2201,7 @@ mod tests { cache.sync(); assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); - verify_notification_vec(actual, &expected, delivery_mode); + verify_notification_vec(&cache, actual, &expected, delivery_mode); } } @@ -2339,19 +2339,22 @@ mod tests { type NotificationTuple = (Arc, V, RemovalCause); - fn verify_notification_vec( + fn verify_notification_vec( + cache: &Cache, actual: Arc>>>, expected: &[NotificationTuple], delivery_mode: DeliveryMode, ) where - K: Eq + std::fmt::Debug, - V: Eq + std::fmt::Debug, + K: std::hash::Hash + Eq + std::fmt::Debug + Send + Sync + 'static, + V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, + S: std::hash::BuildHasher + Clone + Send + Sync + 'static, { // Retries will be needed when testing in a QEMU VM. const MAX_RETRIES: usize = 5; let mut retries = 0; loop { // Ensure all scheduled notifications have been processed. + cache.sync(); std::thread::sleep(Duration::from_millis(500)); let actual = &*actual.lock(); diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 571e4f67..bb25a51b 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -742,7 +742,7 @@ mod tests { assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - verify_notification_vec(actual, &expected, delivery_mode); + verify_notification_vec(&cache, actual, &expected, delivery_mode); } } @@ -891,7 +891,7 @@ mod tests { assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); assert_eq_with_mode!(cache.weighted_size(), 25, delivery_mode); - verify_notification_vec(actual, &expected, delivery_mode); + verify_notification_vec(&cache, actual, &expected, delivery_mode); } } @@ -991,7 +991,7 @@ mod tests { assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - verify_notification_map(actual, &expected, delivery_mode); + verify_notification_map(&cache, actual, &expected, delivery_mode); } } @@ -1107,7 +1107,7 @@ mod tests { assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); - verify_notification_map(actual, &expected, delivery_mode); + verify_notification_map(&cache, actual, &expected, delivery_mode); Ok(()) } @@ -1585,13 +1585,15 @@ mod tests { type NotificationPair = (V, RemovalCause); type NotificationTriple = (Arc, V, RemovalCause); - fn verify_notification_vec( + fn verify_notification_vec( + cache: &SegmentedCache, actual: Arc>>>, expected: &[NotificationTriple], delivery_mode: DeliveryMode, ) where - K: Eq + std::fmt::Debug, - V: Eq + std::fmt::Debug, + K: std::hash::Hash + Eq + std::fmt::Debug + Send + Sync + 'static, + V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, + S: std::hash::BuildHasher + Clone + Send + Sync + 'static, { // Retries will be needed when testing in a QEMU VM. const MAX_RETRIES: usize = 5; @@ -1604,6 +1606,7 @@ mod tests { if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; + cache.sync(); continue; } else { assert_eq!( @@ -1627,13 +1630,15 @@ mod tests { } } - fn verify_notification_map( + fn verify_notification_map( + cache: &SegmentedCache, actual: Arc, NotificationPair>>>, expected: &std::collections::HashMap, NotificationPair>, delivery_mode: DeliveryMode, ) where - K: Eq + std::hash::Hash + std::fmt::Display, - V: Eq + std::fmt::Debug, + K: std::hash::Hash + Eq + std::fmt::Display + Send + Sync + 'static, + V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, + S: std::hash::BuildHasher + Clone + Send + Sync + 'static, { // Retries will be needed when testing in a QEMU VM. const MAX_RETRIES: usize = 5; @@ -1646,6 +1651,7 @@ mod tests { if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; + cache.sync(); continue; } else { assert_eq!( From 9b65faa2f9ae7ec3004c0e42fe8622951e0cc382 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Wed, 22 Jun 2022 06:34:20 +0800 Subject: [PATCH 25/44] Support notification on eviction Add an unit test to ensure the key-level lock is working. --- src/sync/cache.rs | 123 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 122 insertions(+), 1 deletion(-) diff --git a/src/sync/cache.rs b/src/sync/cache.rs index f269b8b1..97687475 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -2207,7 +2207,7 @@ mod tests { #[test] fn test_immediate_removal_notifications_with_updates() { - // The following `Vec`s will hold actual and expected notifications. + // The following `Vec` will hold actual notifications. let actual = Arc::new(Mutex::new(Vec::new())); // Create an eviction listener. @@ -2322,6 +2322,127 @@ mod tests { } } + // This test ensures the key-level lock for the immediate delivery mode + // is working so that the notifications for a given key should always ordered. + // This should be true even if multiple clients try to modify the entries + // for the key at the same time. + #[test] + fn test_immediate_removal_notifications_on_the_same_key() { + use std::thread::{sleep, spawn}; + + const KEY: &str = "alice"; + + type Val = &'static str; + + #[derive(PartialEq, Eq, Debug)] + enum Event { + Insert(Val), + Invalidate(Val), + BeginNotify(Val, RemovalCause), + EndNotify(Val, RemovalCause), + } + + // The following `Vec will hold actual notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + + // Create an eviction listener. + // Note that this listener is slow and will take ~100 ms to complete. + let a0 = Arc::clone(&actual); + let listener = move |_k, v, cause| { + a0.lock().push(Event::BeginNotify(v, cause)); + sleep(Duration::from_millis(100)); + a0.lock().push(Event::EndNotify(v, cause)); + }; + let listener_conf = notification::Configuration::builder() + .delivery_mode(DeliveryMode::Immediate) + .build(); + + // Create a cache with the eviction listener and also TTL. + let cache = Cache::builder() + .eviction_listener_with_conf(listener, listener_conf) + .time_to_live(Duration::from_millis(200)) + .build(); + + // Time Event + // ----- ------------------------------------- + // 0000: Insert value a0 + // 0200: a0 expired + // 0210: Insert value a1 -> expired a0 (N-A0) + // 0220: Insert value a2 (waiting) (A-A2) + // 0310: N-A0 processed + // I-A2 inserted -> replace a1 (N-A1) + // 0320: Invalidate (waiting) (R-A2) + // 0410: N-A1 processed + // R-A2 processed -> explicit a2 (N-A2) + // 0510: N-A2 processed + + // - Notifications for the same key must not overlap! + + let expected = vec![ + Event::Insert("a0"), + Event::Insert("a1"), + Event::BeginNotify("a0", RemovalCause::Expired), + Event::Insert("a2"), + Event::EndNotify("a0", RemovalCause::Expired), + Event::BeginNotify("a1", RemovalCause::Replaced), + Event::Invalidate("a2"), + Event::EndNotify("a1", RemovalCause::Replaced), + Event::BeginNotify("a2", RemovalCause::Explicit), + Event::EndNotify("a2", RemovalCause::Explicit), + ]; + + // 0000: Insert value a0 + actual.lock().push(Event::Insert("a0")); + cache.insert(KEY, "a0"); + // Call `sync` to set the last modified for the KEY immediately so that + // this entry should expire in 200 ms from now. + cache.sync(); + + // 0210: Insert value a1 -> expired a0 (N-A0) + let thread1 = { + let a1 = Arc::clone(&actual); + let c1 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(210)); + a1.lock().push(Event::Insert("a1")); + c1.insert(KEY, "a1"); + }) + }; + + // 0220: Insert value a2 (waiting) (A-A2) + let thread2 = { + let a2 = Arc::clone(&actual); + let c2 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(220)); + a2.lock().push(Event::Insert("a2")); + c2.insert(KEY, "a2"); + }) + }; + + // 0320: Invalidate (waiting) (R-A2) + let thread3 = { + let a3 = Arc::clone(&actual); + let c3 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(320)); + a3.lock().push(Event::Invalidate("a2")); + c3.invalidate(&KEY); + }) + }; + + for t in vec![thread1, thread2, thread3] { + t.join().expect("Failed to join"); + } + + let actual = actual.lock(); + assert_eq!(actual.len(), expected.len()); + + for (i, (actual, expected)) in actual.iter().zip(&expected).enumerate() { + assert_eq!(actual, expected, "expected[{}]", i); + } + } + #[test] fn test_debug_format() { let cache = Cache::new(10); From b49f5e954e48aa524c552da1e9a11c06938a6e76 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 26 Jun 2022 08:03:35 +0000 Subject: [PATCH 26/44] Support notification on eviction Avoid the housekeeper and non-blocking notifier threads from stalling when the notification channel is full. --- src/sync_base/removal_notifier.rs | 34 +++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index 3ed85181..ca7f4852 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -7,11 +7,14 @@ use std::{ }; use crate::{ - common::concurrent::thread_pool::{PoolName, ThreadPool, ThreadPoolRegistry}, + common::concurrent::{ + constants::WRITE_RETRY_INTERVAL_MICROS, + thread_pool::{PoolName, ThreadPool, ThreadPoolRegistry}, + }, notification::{self, DeliveryMode, EvictionListener, EvictionListenerRef, RemovalCause}, }; -use crossbeam_channel::{Receiver, Sender}; +use crossbeam_channel::{Receiver, Sender, TrySendError}; use parking_lot::Mutex; const CHANNEL_CAPACITY: usize = 1_024; @@ -156,14 +159,33 @@ where { fn add_single_notification(&self, key: Arc, value: V, cause: RemovalCause) { let entry = RemovedEntries::new_single(key, value, cause); - self.snd.send(entry).unwrap(); - self.submit_task_if_necessary(); + self.send_entries(entry) + .expect("Failed to send notification"); } fn add_multiple_notifications(&self, entries: Vec>) { let entries = RemovedEntries::new_multi(entries); - self.snd.send(entries).unwrap(); // TODO: Error handling? - self.submit_task_if_necessary(); + self.send_entries(entries) + .expect("Failed to send notification"); + } + + fn send_entries( + &self, + entries: RemovedEntries, + ) -> Result<(), TrySendError>> { + let mut entries = entries; + loop { + self.submit_task_if_necessary(); + match self.snd.try_send(entries) { + Ok(()) => break, + Err(TrySendError::Full(entries1)) => { + entries = entries1; + std::thread::sleep(Duration::from_millis(WRITE_RETRY_INTERVAL_MICROS)); + } + Err(e @ TrySendError::Disconnected(_)) => return Err(e), + } + } + Ok(()) } fn submit_task(&self) { From 81d36ffe8bc65120e602d2dce09cf5479572d939 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 26 Jun 2022 08:56:19 +0000 Subject: [PATCH 27/44] Bump the version to v0.9.0 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 973e8dec..831f8c06 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "moka" -version = "0.8.6" +version = "0.9.0" edition = "2018" rust-version = "1.51" From 8ec7c2bd2fb0c64b38948f314f2ee25b6cd0b02d Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 26 Jun 2022 08:57:49 +0000 Subject: [PATCH 28/44] Support notification on eviction Tweak the name of an unit test and its code comments. --- src/sync/cache.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 97687475..255900c6 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -2322,12 +2322,13 @@ mod tests { } } - // This test ensures the key-level lock for the immediate delivery mode - // is working so that the notifications for a given key should always ordered. - // This should be true even if multiple clients try to modify the entries - // for the key at the same time. + // This test ensures the key-level lock for the immediate notification + // delivery mode is working so that the notifications for a given key + // should always be ordered. This is true even if multiple client threads + // try to modify the entries for the key at the same time. (This test will + // run three client threads) #[test] - fn test_immediate_removal_notifications_on_the_same_key() { + fn test_key_lock_used_by_immediate_removal_notifications() { use std::thread::{sleep, spawn}; const KEY: &str = "alice"; @@ -2363,6 +2364,8 @@ mod tests { .time_to_live(Duration::from_millis(200)) .build(); + // - Notifications for the same key must not overlap. + // Time Event // ----- ------------------------------------- // 0000: Insert value a0 @@ -2370,13 +2373,12 @@ mod tests { // 0210: Insert value a1 -> expired a0 (N-A0) // 0220: Insert value a2 (waiting) (A-A2) // 0310: N-A0 processed - // I-A2 inserted -> replace a1 (N-A1) + // A-A2 finished waiting -> replace a1 (N-A1) // 0320: Invalidate (waiting) (R-A2) // 0410: N-A1 processed - // R-A2 processed -> explicit a2 (N-A2) + // R-A2 finished waiting -> explicit a2 (N-A2) // 0510: N-A2 processed - // - Notifications for the same key must not overlap! let expected = vec![ Event::Insert("a0"), From a6fbf2a6892cb97b18f848849b1e62e055486969 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 26 Jun 2022 09:48:45 +0000 Subject: [PATCH 29/44] Support notification on eviction Add panic handling to the notifiers. When the user-provided listener panics, the notifier will no longer call the listener for safely. --- src/sync/cache.rs | 1 - src/sync_base/removal_notifier.rs | 89 +++++++++++++++++++++++-------- 2 files changed, 66 insertions(+), 24 deletions(-) diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 255900c6..4aae6d2f 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -2379,7 +2379,6 @@ mod tests { // R-A2 finished waiting -> explicit a2 (N-A2) // 0510: N-A2 processed - let expected = vec![ Event::Insert("a0"), Event::Insert("a1"), diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index ca7f4852..fc4d66fe 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -88,23 +88,33 @@ impl RemovalNotifier { pub(crate) struct BlockingRemovalNotifier { listener: EvictionListener, + is_enabled: AtomicBool, } impl BlockingRemovalNotifier { fn new(listener: EvictionListener) -> Self { - Self { listener } + Self { + listener, + is_enabled: AtomicBool::new(true), + } } fn notify(&self, key: Arc, value: V, cause: RemovalCause) { - // use std::panic::{catch_unwind, AssertUnwindSafe}; + use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; - (self.listener)(key, value, cause); + if !self.is_enabled.load(Ordering::Acquire) { + return; + } + + let listener_clo = || (self.listener)(key, value, cause); - // let listener_clo = || listener(key, value, cause); - // match catch_unwind(AssertUnwindSafe(listener_clo)) { - // Ok(_) => todo!(), - // Err(_) => todo!(), - // } + // Safety: It is safe to assert unwind safety here because we will not + // call the listener again if it has been panicked. + let result = catch_unwind(AssertUnwindSafe(listener_clo)); + if let Err(payload) = result { + self.is_enabled.store(false, Ordering::Release); + resume_unwind(payload); + } } } @@ -141,6 +151,7 @@ impl ThreadPoolRemovalNotifier { task_lock: Default::default(), rcv, listener, + is_enabled: AtomicBool::new(true), is_running: Default::default(), is_shutting_down: Default::default(), }; @@ -191,12 +202,14 @@ where fn submit_task(&self) { // TODO: Use compare and exchange to ensure it was false. - if self.state.is_running() { + let state = &self.state; + + if state.is_running() || !state.is_enabled() || state.is_shutting_down() { return; } - self.state.set_running(true); + state.set_running(true); - let task = NotificationTask::new(&self.state); + let task = NotificationTask::new(state); self.thread_pool.pool.execute(move || { task.execute(); }); @@ -221,23 +234,36 @@ impl NotificationTask { } fn execute(&self) { + // Only one task can be executed at a time for a cache segment. let task_lock = self.state.task_lock.lock(); let mut count = 0u16; + let mut is_enabled = self.state.is_enabled(); + + if !is_enabled { + return; + } while let Ok(entries) = self.state.rcv.try_recv() { match entries { RemovedEntries::Single(entry) => { - self.notify(&self.state.listener, entry); + let result = self.notify(&self.state.listener, entry); + if result.is_err() { + is_enabled = false; + break; + } count += 1; } RemovedEntries::Multi(entries) => { for entry in entries { - self.notify(&self.state.listener, entry); - count += 1; - + let result = self.notify(&self.state.listener, entry); + if result.is_err() { + is_enabled = false; + break; + } if self.state.is_shutting_down() { break; } + count += 1; } } } @@ -247,21 +273,29 @@ impl NotificationTask { } } + if !is_enabled { + self.state.set_enabled(false); + } + std::mem::drop(task_lock); self.state.set_running(false); } - fn notify(&self, listener: EvictionListenerRef<'_, K, V>, entry: RemovedEntry) { - // use std::panic::{catch_unwind, AssertUnwindSafe}; + /// Returns `Ok(())` when calling the listener succeeded. Returns + /// `Err(panic_payload)` when the listener panicked. + fn notify( + &self, + listener: EvictionListenerRef<'_, K, V>, + entry: RemovedEntry, + ) -> Result<(), Box> { + use std::panic::{catch_unwind, AssertUnwindSafe}; let RemovedEntry { key, value, cause } = entry; - listener(key, value, cause); + let listener_clo = || (listener)(key, value, cause); - // let listener_clo = || listener(key, value, cause); - // match catch_unwind(AssertUnwindSafe(listener_clo)) { - // Ok(_) => todo!(), - // Err(_) => todo!(), - // } + // Safety: It is safe to assert unwind safety here because we will not + // call the listener again if it has been panicked. + catch_unwind(AssertUnwindSafe(listener_clo)) } } @@ -269,11 +303,20 @@ struct NotifierState { task_lock: Mutex<()>, rcv: Receiver>, listener: EvictionListener, + is_enabled: AtomicBool, is_running: AtomicBool, is_shutting_down: AtomicBool, } impl NotifierState { + fn is_enabled(&self) -> bool { + self.is_enabled.load(Ordering::Acquire) + } + + fn set_enabled(&self, value: bool) { + self.is_enabled.store(value, Ordering::Release); + } + fn is_running(&self) -> bool { self.is_running.load(Ordering::Acquire) } From 84f52ec558fdcd60cc3ef55b1787efcccca71464 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 28 Jun 2022 01:04:29 +0000 Subject: [PATCH 30/44] Merge branch 'master' into next (continued) Add `#[cfg(any(feature = ..., ))]` to a method. --- src/common/concurrent.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/concurrent.rs b/src/common/concurrent.rs index 3841fe0a..3da5b898 100644 --- a/src/common/concurrent.rs +++ b/src/common/concurrent.rs @@ -82,7 +82,7 @@ impl KeyDate { self.entry_info.last_modified() } - // #[cfg(any(feature = "sync", feature = "future"))] + #[cfg(any(feature = "sync", feature = "future"))] pub(crate) fn is_dirty(&self) -> bool { self.entry_info.is_dirty() } From 3b40d960e219fb421577926c5a8cd93ff398c9c0 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 28 Jun 2022 11:16:26 +0000 Subject: [PATCH 31/44] Support notification on eviction Add unit tests for testing recovery from panicking eviction listener. --- src/future/cache.rs | 91 ++++++++++++++++++++++++++++++++++----------- src/sync/cache.rs | 72 +++++++++++++++++++++++++++++++---- 2 files changed, 133 insertions(+), 30 deletions(-) diff --git a/src/future/cache.rs b/src/future/cache.rs index 235da758..edfef93d 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -2284,13 +2284,6 @@ mod tests { #[tokio::test] async fn test_removal_notifications() { - // NOTE: The following tests also check the notifications: - // - basic_single_thread - // - size_aware_eviction - // - invalidate_entries_if - // - time_to_live - // - time_to_idle - // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); @@ -2440,6 +2433,75 @@ mod tests { verify_notification_vec(&cache, actual, &expected); } + #[tokio::test] + async fn recover_from_panicking_eviction_listener() { + use futures_util::FutureExt; + use std::panic::AssertUnwindSafe; + + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener that panics when it see + // a value "panic now!". + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| { + if v == "panic now!" { + panic!("Panic now!"); + } + a1.lock().push((k, v, cause)) + }; + + // Create a cache with the eviction listener. + let mut cache = Cache::builder().eviction_listener(listener).build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + // Insert an okay value. + cache.insert("alice", "a0").await; + cache.sync(); + + // Insert a value that will cause the eviction listener to panic. + cache.insert("alice", "panic now!").await; + expected.push((Arc::new("alice"), "a0", RemovalCause::Replaced)); + cache.sync(); + + // Insert an okay value. This will replace the previsous + // value "panic now!" so the eviction listener will panick. + match AssertUnwindSafe(cache.insert("alice", "a2")) + .catch_unwind() + .await + { + Ok(()) => (), // pass + r => panic!("Unexpected result: {:?}", r), + } + cache.sync(); + // No more removal notification should be sent. + + // Invalidate the okay value. + cache.invalidate(&"alice").await; + cache.sync(); + + verify_notification_vec(&cache, actual, &expected); + } + + #[tokio::test] + async fn test_debug_format() { + let cache = Cache::new(10); + cache.insert('a', "alice").await; + cache.insert('b', "bob").await; + cache.insert('c', "cindy").await; + + let debug_str = format!("{:?}", cache); + assert!(debug_str.starts_with('{')); + assert!(debug_str.contains(r#"'a': "alice""#)); + assert!(debug_str.contains(r#"'b': "bob""#)); + assert!(debug_str.contains(r#"'c': "cindy""#)); + assert!(debug_str.ends_with('}')); + } + type NotificationTuple = (Arc, V, RemovalCause); fn verify_notification_vec( @@ -2476,19 +2538,4 @@ mod tests { break; } } - - #[tokio::test] - async fn test_debug_format() { - let cache = Cache::new(10); - cache.insert('a', "alice").await; - cache.insert('b', "bob").await; - cache.insert('c', "cindy").await; - - let debug_str = format!("{:?}", cache); - assert!(debug_str.starts_with('{')); - assert!(debug_str.contains(r#"'a': "alice""#)); - assert!(debug_str.contains(r#"'b': "bob""#)); - assert!(debug_str.contains(r#"'c': "cindy""#)); - assert!(debug_str.ends_with('}')); - } } diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 4aae6d2f..9e8f2272 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -2130,16 +2130,8 @@ mod tests { ); } - // TODO: In general, test both blocking and non-blocking notifications. #[test] fn test_removal_notifications() { - // NOTE: The following tests also check the notifications: - // - basic_single_thread - // - size_aware_eviction - // - invalidate_entries_if - // - time_to_live - // - time_to_idle - run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); @@ -2444,6 +2436,70 @@ mod tests { } } + #[test] + fn recover_from_panicking_eviction_listener() { + run_test(DeliveryMode::Immediate); + run_test(DeliveryMode::Queued); + + fn run_test(delivery_mode: DeliveryMode) { + use std::panic::{catch_unwind, AssertUnwindSafe}; + + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener that panics when it see + // a value "panic now!". + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| { + if v == "panic now!" { + panic!("Panic now!"); + } + a1.lock().push((k, v, cause)) + }; + let listener_conf = notification::Configuration::builder() + .delivery_mode(delivery_mode) + .build(); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .eviction_listener_with_conf(listener, listener_conf) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + // Insert an okay value. + cache.insert("alice", "a0"); + cache.sync(); + + // Insert a value that will cause the eviction listener to panic. + cache.insert("alice", "panic now!"); + expected.push((Arc::new("alice"), "a0", RemovalCause::Replaced)); + cache.sync(); + + // Insert an okay value. This will replace the previsous + // value "panic now!" so the eviction listener will panick. + match catch_unwind(AssertUnwindSafe(|| cache.insert("alice", "a2"))) { + Ok(()) if delivery_mode == DeliveryMode::Queued => (), // pass + Err(_) if delivery_mode == DeliveryMode::Immediate => (), // pass + r => panic!( + "Unexpected result for delivery mode {:?}: {:?}", + delivery_mode, r + ), + } + cache.sync(); + // No more removal notification should be sent. + + // Invalidate the okay value. + cache.invalidate(&"alice"); + cache.sync(); + + verify_notification_vec(&cache, actual, &expected, delivery_mode); + } + } + #[test] fn test_debug_format() { let cache = Cache::new(10); From 09b986cbe2c62438f0b538405f6f4cec2bc8e0b6 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 2 Jul 2022 16:15:36 +0800 Subject: [PATCH 32/44] Update the trybuild expectations for Rust 1.62 --- tests/compile_tests/dash/clone/dash_cache_clone.stderr | 8 ++++++++ tests/compile_tests/default/clone/sync_cache_clone.stderr | 8 ++++++++ .../default/clone/sync_seg_cache_clone.stderr | 8 ++++++++ .../compile_tests/future/clone/future_cache_clone.stderr | 8 ++++++++ 4 files changed, 32 insertions(+) diff --git a/tests/compile_tests/dash/clone/dash_cache_clone.stderr b/tests/compile_tests/dash/clone/dash_cache_clone.stderr index c611960c..a9241c43 100644 --- a/tests/compile_tests/dash/clone/dash_cache_clone.stderr +++ b/tests/compile_tests/dash/clone/dash_cache_clone.stderr @@ -9,6 +9,10 @@ note: required by a bound in `moka::dash::Cache::::new` | | V: Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `moka::dash::Cache::::new` +help: consider annotating `MyValue` with `#[derive(Clone)]` + | +41 | #[derive(Clone)] + | error[E0277]: the trait bound `MyBuildHasher1: Clone` is not satisfied --> tests/compile_tests/dash/clone/dash_cache_clone.rs:28:84 @@ -23,3 +27,7 @@ note: required by a bound in `moka::dash::CacheBuilder::>::build_with_hasher` +help: consider annotating `MyBuildHasher1` with `#[derive(Clone)]` + | +44 | #[derive(Clone)] + | diff --git a/tests/compile_tests/default/clone/sync_cache_clone.stderr b/tests/compile_tests/default/clone/sync_cache_clone.stderr index 33d5307a..63471d6a 100644 --- a/tests/compile_tests/default/clone/sync_cache_clone.stderr +++ b/tests/compile_tests/default/clone/sync_cache_clone.stderr @@ -9,6 +9,10 @@ note: required by a bound in `moka::sync::Cache::::new` | | V: Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `moka::sync::Cache::::new` +help: consider annotating `MyValue` with `#[derive(Clone)]` + | +41 | #[derive(Clone)] + | error[E0277]: the trait bound `MyBuildHasher1: Clone` is not satisfied --> tests/compile_tests/default/clone/sync_cache_clone.rs:28:84 @@ -23,3 +27,7 @@ note: required by a bound in `moka::sync::CacheBuilder::>::build_with_hasher` +help: consider annotating `MyBuildHasher1` with `#[derive(Clone)]` + | +44 | #[derive(Clone)] + | diff --git a/tests/compile_tests/default/clone/sync_seg_cache_clone.stderr b/tests/compile_tests/default/clone/sync_seg_cache_clone.stderr index 587300ef..689a2267 100644 --- a/tests/compile_tests/default/clone/sync_seg_cache_clone.stderr +++ b/tests/compile_tests/default/clone/sync_seg_cache_clone.stderr @@ -9,6 +9,10 @@ note: required by a bound in `SegmentedCache::::new` | | V: Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `SegmentedCache::::new` +help: consider annotating `MyValue` with `#[derive(Clone)]` + | +44 | #[derive(Clone)] + | error[E0277]: the trait bound `MyBuildHasher1: Clone` is not satisfied --> tests/compile_tests/default/clone/sync_seg_cache_clone.rs:30:56 @@ -23,3 +27,7 @@ note: required by a bound in `moka::sync::CacheBuilder::>::build_with_hasher` +help: consider annotating `MyBuildHasher1` with `#[derive(Clone)]` + | +47 | #[derive(Clone)] + | diff --git a/tests/compile_tests/future/clone/future_cache_clone.stderr b/tests/compile_tests/future/clone/future_cache_clone.stderr index 4bbfd621..f3b6d229 100644 --- a/tests/compile_tests/future/clone/future_cache_clone.stderr +++ b/tests/compile_tests/future/clone/future_cache_clone.stderr @@ -9,6 +9,10 @@ note: required by a bound in `moka::future::Cache::::new` | | V: Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `moka::future::Cache::::new` +help: consider annotating `MyValue` with `#[derive(Clone)]` + | +42 | #[derive(Clone)] + | error[E0277]: the trait bound `MyBuildHasher1: Clone` is not satisfied --> tests/compile_tests/future/clone/future_cache_clone.rs:29:84 @@ -23,3 +27,7 @@ note: required by a bound in `moka::future::CacheBuilder::>::build_with_hasher` +help: consider annotating `MyBuildHasher1` with `#[derive(Clone)]` + | +45 | #[derive(Clone)] + | From 7911debcac67eab97dad33a269b2547e4823c93a Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 2 Jul 2022 16:45:26 +0800 Subject: [PATCH 33/44] Add `logging` feature to enable optional `log` crate dependency --- .github/workflows/CI.yml | 4 ++-- .github/workflows/CIQuantaDisabled.yml | 4 ++-- Cargo.toml | 8 ++++++++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 73bff17c..23d21112 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -88,11 +88,11 @@ jobs: command: test args: --no-default-features --features 'future, atomic64, quanta' - - name: Run tests (future and sync features) + - name: Run tests (future, sync and logging features) uses: actions-rs/cargo@v1 with: command: test - args: --features 'future, sync' + args: --features 'future, sync, logging' - name: Run tests (dash feature, but no sync feature) uses: actions-rs/cargo@v1 diff --git a/.github/workflows/CIQuantaDisabled.yml b/.github/workflows/CIQuantaDisabled.yml index 31b563ab..c59a7a37 100644 --- a/.github/workflows/CIQuantaDisabled.yml +++ b/.github/workflows/CIQuantaDisabled.yml @@ -82,11 +82,11 @@ jobs: command: test args: --no-default-features --features 'future, atomic64' - - name: Run tests (future feature, but no quanta feature) + - name: Run tests (future, sync and logging features, but no quanta feature) uses: actions-rs/cargo@v1 with: command: test - args: --no-default-features --features 'sync, future, atomic64' + args: --no-default-features --features 'sync, future, atomic64, logging' - name: Run tests (dash feature, but no quanta and sync features) uses: actions-rs/cargo@v1 diff --git a/Cargo.toml b/Cargo.toml index 15b527b1..c45460e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,11 @@ future = ["crossbeam-epoch", "thiserror", "uuid", "async-io", "async-lock", "fut # few releases. dash = ["dashmap"] +# Enable this feature to activate optional logging from caches. +# Currently cache will emit log only when it encounters a panic in user provided +# callback closure. +logging = ["log"] + # This feature is enabled by default. Disable it when the target platform does not # support `std::sync::atomic::AtomicU64`. (e.g. `armv5te-unknown-linux-musleabi` # or `mips-unknown-linux-musl`) @@ -73,6 +78,9 @@ async-io = { version = "1.4", optional = true } async-lock = { version = "2.4", optional = true } futures-util = { version = "0.3", optional = true } +# Optional dependencies (logging) +log = { version = "0.4", optional = true } + [dev-dependencies] actix-rt = { version = "2.7", default-features = false } async-std = { version = "1.11", features = ["attributes"] } From b97f62266d64bedab56045db5eb79b46f709f6dc Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 2 Jul 2022 10:18:00 +0000 Subject: [PATCH 34/44] Support notification on eviction Emit an error log when the user provided eviction listener panics. (Requires `logging` feature enabled) --- Cargo.toml | 1 + src/future/cache.rs | 19 +++++++------ src/sync/cache.rs | 21 +++++++-------- src/sync_base/removal_notifier.rs | 44 ++++++++++++++++++++++--------- 4 files changed, 51 insertions(+), 34 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d3f8c0fc..ed95765b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,7 @@ log = { version = "0.4", optional = true } actix-rt = { version = "2.7", default-features = false } anyhow = "1.0" async-std = { version = "1.11", features = ["attributes"] } +env_logger = "0.9" getrandom = "0.2" reqwest = "0.11.11" skeptic = "0.13" diff --git a/src/future/cache.rs b/src/future/cache.rs index 81d0c7ae..af5334d1 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -2449,10 +2449,15 @@ mod tests { verify_notification_vec(&cache, actual, &expected); } + // NOTE: To enable the panic logging, run the following command: + // + // RUST_LOG=moka=info cargo test --features 'future, logging' -- \ + // future::cache::tests::recover_from_panicking_eviction_listener --exact --nocapture + // #[tokio::test] async fn recover_from_panicking_eviction_listener() { - use futures_util::FutureExt; - use std::panic::AssertUnwindSafe; + #[cfg(feature = "logging")] + let _ = env_logger::builder().is_test(true).try_init(); // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); @@ -2485,14 +2490,8 @@ mod tests { cache.sync(); // Insert an okay value. This will replace the previsous - // value "panic now!" so the eviction listener will panick. - match AssertUnwindSafe(cache.insert("alice", "a2")) - .catch_unwind() - .await - { - Ok(()) => (), // pass - r => panic!("Unexpected result: {:?}", r), - } + // value "panic now!" so the eviction listener will panic. + cache.insert("alice", "a2").await; cache.sync(); // No more removal notification should be sent. diff --git a/src/sync/cache.rs b/src/sync/cache.rs index c60d374d..f43747b2 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -2439,14 +2439,20 @@ mod tests { } } + // NOTE: To enable the panic logging, run the following command: + // + // RUST_LOG=moka=info cargo test --features 'logging' -- \ + // sync::cache::tests::recover_from_panicking_eviction_listener --exact --nocapture + // #[test] fn recover_from_panicking_eviction_listener() { + #[cfg(feature = "logging")] + let _ = env_logger::builder().is_test(true).try_init(); + run_test(DeliveryMode::Immediate); run_test(DeliveryMode::Queued); fn run_test(delivery_mode: DeliveryMode) { - use std::panic::{catch_unwind, AssertUnwindSafe}; - // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); @@ -2483,15 +2489,8 @@ mod tests { cache.sync(); // Insert an okay value. This will replace the previsous - // value "panic now!" so the eviction listener will panick. - match catch_unwind(AssertUnwindSafe(|| cache.insert("alice", "a2"))) { - Ok(()) if delivery_mode == DeliveryMode::Queued => (), // pass - Err(_) if delivery_mode == DeliveryMode::Immediate => (), // pass - r => panic!( - "Unexpected result for delivery mode {:?}: {:?}", - delivery_mode, r - ), - } + // value "panic now!" so the eviction listener will panic. + cache.insert("alice", "a2"); cache.sync(); // No more removal notification should be sent. diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index fc4d66fe..283ddd1b 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -23,7 +23,6 @@ const MAX_NOTIFICATIONS_PER_TASK: u16 = 5_000; pub(crate) enum RemovalNotifier { Blocking(BlockingRemovalNotifier), - // NonBlocking(NonBlockingRemovalNotifier), ThreadPool(ThreadPoolRemovalNotifier), } @@ -40,11 +39,7 @@ impl RemovalNotifier { } pub(crate) fn is_batching_supported(&self) -> bool { - matches!( - self, - // RemovalNotifier::NonBlocking(_) | RemovalNotifier::ThreadPool(_) - RemovalNotifier::ThreadPool(_) - ) + matches!(self, RemovalNotifier::ThreadPool(_)) } pub(crate) fn notify(&self, key: Arc, value: V, cause: RemovalCause) @@ -54,7 +49,6 @@ impl RemovalNotifier { { match self { RemovalNotifier::Blocking(notifier) => notifier.notify(key, value, cause), - // RemovalNotifier::NonBlocking(_) => todo!(), RemovalNotifier::ThreadPool(notifier) => { notifier.add_single_notification(key, value, cause) } @@ -68,7 +62,6 @@ impl RemovalNotifier { { match self { RemovalNotifier::Blocking(_) => unreachable!(), - // RemovalNotifier::NonBlocking(_) => todo!(), RemovalNotifier::ThreadPool(notifier) => notifier.add_multiple_notifications(entries), } } @@ -80,7 +73,6 @@ impl RemovalNotifier { { match self { RemovalNotifier::Blocking(_) => unreachable!(), - // RemovalNotifier::NonBlocking(_) => todo!(), RemovalNotifier::ThreadPool(notifier) => notifier.submit_task(), } } @@ -100,7 +92,7 @@ impl BlockingRemovalNotifier { } fn notify(&self, key: Arc, value: V, cause: RemovalCause) { - use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; + use std::panic::{catch_unwind, AssertUnwindSafe}; if !self.is_enabled.load(Ordering::Acquire) { return; @@ -111,9 +103,10 @@ impl BlockingRemovalNotifier { // Safety: It is safe to assert unwind safety here because we will not // call the listener again if it has been panicked. let result = catch_unwind(AssertUnwindSafe(listener_clo)); - if let Err(payload) = result { + if let Err(_payload) = result { self.is_enabled.store(false, Ordering::Release); - resume_unwind(payload); + #[cfg(feature = "logging")] + log_panic(&*_payload); } } } @@ -295,7 +288,14 @@ impl NotificationTask { // Safety: It is safe to assert unwind safety here because we will not // call the listener again if it has been panicked. - catch_unwind(AssertUnwindSafe(listener_clo)) + let result = catch_unwind(AssertUnwindSafe(listener_clo)); + #[cfg(feature = "logging")] + { + if let Err(payload) = &result { + log_panic(&**payload); + } + } + result } } @@ -360,3 +360,21 @@ impl RemovedEntries { Self::Multi(entries) } } + +#[cfg(feature = "logging")] +fn log_panic(payload: &(dyn std::any::Any + Send + 'static)) { + let message: Option> = if let Some(s) = payload.downcast_ref::<&str>() + { + Some((*s).into()) + } else if let Some(s) = payload.downcast_ref::() { + Some(s.into()) + } else { + None + }; + + if let Some(m) = message { + log::error!("Eviction listener panicked at '{}'", m); + } else { + log::error!("Eviction listener panicked"); + } +} From cf758d575a53db0bd9263f117ca56fe5c5050136 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 2 Jul 2022 10:35:56 +0000 Subject: [PATCH 35/44] Fix Clippy warning --- src/sync_base/removal_notifier.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index 283ddd1b..12aaff60 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -363,14 +363,13 @@ impl RemovedEntries { #[cfg(feature = "logging")] fn log_panic(payload: &(dyn std::any::Any + Send + 'static)) { - let message: Option> = if let Some(s) = payload.downcast_ref::<&str>() - { - Some((*s).into()) - } else if let Some(s) = payload.downcast_ref::() { - Some(s.into()) - } else { - None - }; + // Try to downcast the payload into &str or String. + // + // NOTE: Clippy will complain if we use `if let Some(_)` here. + // https://rust-lang.github.io/rust-clippy/master/index.html#manual_map + let message: Option> = + (payload.downcast_ref::<&str>().map(|s| (*s).into())) + .or_else(|| payload.downcast_ref::().map(Into::into)); if let Some(m) = message { log::error!("Eviction listener panicked at '{}'", m); From b206dbcc88c637acf41d7e880efaf171897f9c86 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 3 Jul 2022 22:03:32 +0800 Subject: [PATCH 36/44] Support notification on eviction - Rename the `eviction_listener` method of `future::CacheBuilder` to `eviction_listener_with_queued_delivery_mode`. - Write the documentation. --- .vscode/settings.json | 1 + src/future/builder.rs | 181 ++--------------- src/future/cache.rs | 252 ++++++++++++++++++++++-- src/notification.rs | 34 +++- src/sync/builder.rs | 198 +++---------------- src/sync/cache.rs | 443 +++++++++++++++++++++++++++++++++++++++++- 6 files changed, 756 insertions(+), 353 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 9c0f9d19..b1b0c9f4 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,6 +3,7 @@ "rust-analyzer.server.extraEnv": { "CARGO_TARGET_DIR": "target/ra" }, + "editor.rulers": [85], "cSpell.words": [ "aarch", "actix", diff --git a/src/future/builder.rs b/src/future/builder.rs index 26855681..4e0ea033 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -52,168 +52,6 @@ use std::{ /// } /// ``` /// -/// # Example: Eviction Listener -/// -/// A `Cache` can be configured with an `eviction_listener`, a closure that is called -/// every time there is a cache eviction. The closure takes the key, value and -/// [`RemovalCause`](../notification/enum.RemovalCause.html) as parameters. It can be -/// used to keep other data structures in sync with the cache. -/// -/// The following example demonstrates how to use a cache with an `eviction_listener` -/// and `time_to_live` to manage the lifecycle of temporary files on a filesystem. -/// The cache stores the paths of the files, and when one of them has -/// expired, the eviction lister will be called with the path, so it can remove the -/// file from the filesystem. -/// -/// ```rust -/// // Cargo.toml -/// // -/// // [dependencies] -/// // anyhow = "1.0" -/// // uuid = { version = "1.1", features = ["v4"] } -/// // tokio = { version = "1.18", features = ["fs", "macros", "rt-multi-thread", "sync", "time"] } -/// -/// use moka::future::Cache; -/// -/// use anyhow::{anyhow, Context}; -/// use std::{ -/// io, -/// path::{Path, PathBuf}, -/// sync::Arc, -/// time::Duration, -/// }; -/// use tokio::{fs, sync::RwLock}; -/// use uuid::Uuid; -/// -/// /// The DataFileManager writes, reads and removes data files. -/// struct DataFileManager { -/// base_dir: PathBuf, -/// file_count: usize, -/// } -/// -/// impl DataFileManager { -/// fn new(base_dir: PathBuf) -> Self { -/// Self { -/// base_dir, -/// file_count: 0, -/// } -/// } -/// -/// async fn write_data_file(&mut self, contents: String) -> io::Result { -/// loop { -/// // Generate a unique file path. -/// let mut path = self.base_dir.to_path_buf(); -/// path.push(Uuid::new_v4().as_hyphenated().to_string()); -/// -/// if path.exists() { -/// continue; // This path is already taken by others. Retry. -/// } -/// -/// // We have got a unique file path, so create the file at -/// // the path and write the contents to the file. -/// fs::write(&path, contents).await?; -/// self.file_count += 1; -/// println!( -/// "Created a data file at {:?} (file count: {})", -/// path, self.file_count -/// ); -/// -/// // Return the path. -/// return Ok(path); -/// } -/// } -/// -/// async fn read_data_file(&self, path: impl AsRef) -> io::Result { -/// // Reads the contents of the file at the path, and return the contents. -/// fs::read_to_string(path).await -/// } -/// -/// async fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { -/// // Remove the file at the path. -/// fs::remove_file(path.as_ref()).await?; -/// self.file_count -= 1; -/// println!( -/// "Removed a data file at {:?} (file count: {})", -/// path.as_ref(), -/// self.file_count -/// ); -/// -/// Ok(()) -/// } -/// } -/// -/// #[tokio::main] -/// async fn main() -> anyhow::Result<()> { -/// // Create an instance of the DataFileManager and wrap it with -/// // Arc> so it can be shared across threads. -/// let file_mgr = DataFileManager::new(std::env::temp_dir()); -/// let file_mgr = Arc::new(RwLock::new(file_mgr)); -/// -/// let file_mgr1 = Arc::clone(&file_mgr); -/// let rt = tokio::runtime::Handle::current(); -/// -/// // Create an eviction lister closure. -/// let listener = move |k, v: PathBuf, cause| { -/// // Try to remove the data file at the path `v`. -/// println!( -/// "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", -/// k, v, cause -/// ); -/// rt.block_on(async { -/// // Acquire the write lock of the DataFileManager. -/// let mut mgr = file_mgr1.write().await; -/// // Remove the data file. We must handle error cases here to -/// // prevent the listener from panicking. -/// if let Err(_e) = mgr.remove_data_file(v.as_path()).await { -/// eprintln!("Failed to remove a data file at {:?}", v); -/// } -/// }); -/// }; -/// -/// // Create the cache. Set time to live for two seconds and set the -/// // eviction listener. -/// let cache = Cache::builder() -/// .max_capacity(100) -/// .time_to_live(Duration::from_secs(2)) -/// .eviction_listener(listener) -/// .build(); -/// -/// // Insert an entry to the cache. -/// // This will create and write a data file for the key "user1", store the -/// // path of the file to the cache, and return it. -/// println!("== try_get_with()"); -/// let path = cache -/// .try_get_with("user1", async { -/// let mut mgr = file_mgr.write().await; -/// let path = mgr -/// .write_data_file("user data".into()) -/// .await -/// .with_context(|| format!("Failed to create a data file"))?; -/// Ok(path) as anyhow::Result<_> -/// }) -/// .await -/// .map_err(|e| anyhow!("{}", e))?; -/// -/// // Read the data file at the path and print the contents. -/// println!("\n== read_data_file()"); -/// { -/// let mgr = file_mgr.read().await; -/// let contents = mgr -/// .read_data_file(path.as_path()) -/// .await -/// .with_context(|| format!("Failed to read data from {:?}", path))?; -/// println!("contents: {}", contents); -/// } -/// -/// // Sleep for five seconds. While sleeping, the cache entry for key "user1" -/// // will be expired and evicted, so the eviction lister will be called to -/// // remove the file. -/// tokio::time::sleep(Duration::from_secs(5)).await; -/// -/// Ok(()) -/// } -/// ``` -/// #[must_use] pub struct CacheBuilder { max_capacity: Option, @@ -327,7 +165,7 @@ impl CacheBuilder { } } - /// Sets the weigher closure of the cache. + /// Sets the weigher closure to the cache. /// /// The closure should take `&K` and `&V` as the arguments and returns a `u32` /// representing the relative size of the entry. @@ -338,7 +176,22 @@ impl CacheBuilder { } } - pub fn eviction_listener( + /// Sets the eviction listener closure to the cache. + /// + /// The closure should take `Arc`, `V` and [`RemovalCause`][removal-cause] as + /// the arguments. The [queued delivery mode][queued-mode] is used for the + /// listener. + /// + /// # Panics + /// + /// It is very important to make the listener closure not to panic. Otherwise, + /// the cache will stop calling the listener after a panic. This is an intended + /// behavior because the cache cannot know whether is is memory safe or not to + /// call the panicked lister again. + /// + /// [removal-cause]: ../notification/enum.RemovalCause.html + /// [queued-mode]: ../notification/enum.DeliveryMode.html#variant.Queued + pub fn eviction_listener_with_queued_delivery_mode( self, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, ) -> Self { diff --git a/src/future/cache.rs b/src/future/cache.rs index af5334d1..4d4932a1 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -40,14 +40,27 @@ use std::{ /// /// To use this cache, enable a crate feature called "future". /// -/// # Examples +/// # Table of Contents +/// +/// - [Example: `insert`, `get` and `invalidate`](#example-insert-get-and-invalidate) +/// - [Avoiding to clone the value at `get`](#avoiding-to-clone-the-value-at-get) +/// - [Example: Size-based Eviction](#example-size-based-eviction) +/// - [Example: Time-based Expirations](#example-time-based-expirations) +/// - [Example: Eviction Listener](#example-eviction-listener) +/// - [You should avoid eviction listener to panic](#you-should-avoid-eviction-listener-to-panic) +/// - [Delivery Modes for Eviction Listener](#delivery-modes-for-eviction-listener) +/// - [Thread Safety](#thread-safety) +/// - [Sharing a cache across threads](#sharing-a-cache-across-threads) +/// - [Hashing Algorithm](#hashing-algorithm) +/// +/// # Example: `insert`, `get` and `invalidate` /// /// Cache entries are manually added using an insert method, and are stored in the /// cache until either evicted or manually invalidated: /// /// - Inside an async context (`async fn` or `async` block), use -/// [`insert`](#method.insert), [`get_with`](#method.get_with) -/// or [`invalidate`](#method.invalidate) methods for updating the cache and `await` +/// [`insert`](#method.insert), [`get_with`](#method.get_with) or +/// [`invalidate`](#method.invalidate) methods for updating the cache and `await` /// them. /// - Outside any async context, use [`blocking`](#method.blocking) method to access /// blocking version of [`insert`](./struct.BlockingOp.html#method.insert) or @@ -123,8 +136,7 @@ use std::{ /// /// If you want to atomically initialize and insert a value when the key is not /// present, you might want to check other insertion methods -/// [`get_with`](#method.get_with) and -/// [`try_get_with`](#method.try_get_with). +/// [`get_with`](#method.get_with) and [`try_get_with`](#method.try_get_with). /// /// # Avoiding to clone the value at `get` /// @@ -141,7 +153,7 @@ use std::{ /// /// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// -/// # Size-based Eviction +/// # Example: Size-based Eviction /// /// ```rust /// // Cargo.toml @@ -196,7 +208,7 @@ use std::{ /// /// [builder-struct]: ./struct.CacheBuilder.html /// -/// # Time-based Expirations +/// # Example: Time-based Expirations /// /// `Cache` supports the following expiration policies: /// @@ -237,6 +249,206 @@ use std::{ /// } /// ``` /// +/// # Example: Eviction Listener +/// +/// A `Cache` can be configured with an eviction listener, a closure that is called +/// every time there is a cache eviction. The listener takes three parameters: the +/// key and value of the evicted entry, and the +/// [`RemovalCause`](../notification/enum.RemovalCause.html) to indicate why the +/// entry was evicted. +/// +/// An eviction listener can be used to keep other data structures in sync with the +/// cache. +/// +/// The following example demonstrates how to use an eviction listener with +/// time-to-live expiration to manage the lifecycle of temporary files on a +/// filesystem. The cache stores the paths of the files, and when one of them has +/// expired, the eviction lister will be called with the path, so it can remove the +/// file from the filesystem. +/// +/// ```rust +/// // Cargo.toml +/// // +/// // [dependencies] +/// // anyhow = "1.0" +/// // uuid = { version = "1.1", features = ["v4"] } +/// // tokio = { version = "1.18", features = ["fs", "macros", "rt-multi-thread", "sync", "time"] } +/// +/// use moka::future::Cache; +/// +/// use anyhow::{anyhow, Context}; +/// use std::{ +/// io, +/// path::{Path, PathBuf}, +/// sync::Arc, +/// time::Duration, +/// }; +/// use tokio::{fs, sync::RwLock}; +/// use uuid::Uuid; +/// +/// /// The DataFileManager writes, reads and removes data files. +/// struct DataFileManager { +/// base_dir: PathBuf, +/// file_count: usize, +/// } +/// +/// impl DataFileManager { +/// fn new(base_dir: PathBuf) -> Self { +/// Self { +/// base_dir, +/// file_count: 0, +/// } +/// } +/// +/// async fn write_data_file(&mut self, contents: String) -> io::Result { +/// loop { +/// // Generate a unique file path. +/// let mut path = self.base_dir.to_path_buf(); +/// path.push(Uuid::new_v4().as_hyphenated().to_string()); +/// +/// if path.exists() { +/// continue; // This path is already taken by others. Retry. +/// } +/// +/// // We have got a unique file path, so create the file at +/// // the path and write the contents to the file. +/// fs::write(&path, contents).await?; +/// self.file_count += 1; +/// println!( +/// "Created a data file at {:?} (file count: {})", +/// path, self.file_count +/// ); +/// +/// // Return the path. +/// return Ok(path); +/// } +/// } +/// +/// async fn read_data_file(&self, path: impl AsRef) -> io::Result { +/// // Reads the contents of the file at the path, and return the contents. +/// fs::read_to_string(path).await +/// } +/// +/// async fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { +/// // Remove the file at the path. +/// fs::remove_file(path.as_ref()).await?; +/// self.file_count -= 1; +/// println!( +/// "Removed a data file at {:?} (file count: {})", +/// path.as_ref(), +/// self.file_count +/// ); +/// +/// Ok(()) +/// } +/// } +/// +/// #[tokio::main] +/// async fn main() -> anyhow::Result<()> { +/// // Create an instance of the DataFileManager and wrap it with +/// // Arc> so it can be shared across threads. +/// let file_mgr = DataFileManager::new(std::env::temp_dir()); +/// let file_mgr = Arc::new(RwLock::new(file_mgr)); +/// +/// let file_mgr1 = Arc::clone(&file_mgr); +/// let rt = tokio::runtime::Handle::current(); +/// +/// // Create an eviction lister closure. +/// let listener = move |k, v: PathBuf, cause| { +/// // Try to remove the data file at the path `v`. +/// println!( +/// "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", +/// k, v, cause +/// ); +/// rt.block_on(async { +/// // Acquire the write lock of the DataFileManager. +/// let mut mgr = file_mgr1.write().await; +/// // Remove the data file. We must handle error cases here to +/// // prevent the listener from panicking. +/// if let Err(_e) = mgr.remove_data_file(v.as_path()).await { +/// eprintln!("Failed to remove a data file at {:?}", v); +/// } +/// }); +/// }; +/// +/// // Create the cache. Set time to live for two seconds and set the +/// // eviction listener. +/// let cache = Cache::builder() +/// .max_capacity(100) +/// .time_to_live(Duration::from_secs(2)) +/// .eviction_listener_with_queued_delivery_mode(listener) +/// .build(); +/// +/// // Insert an entry to the cache. +/// // This will create and write a data file for the key "user1", store the +/// // path of the file to the cache, and return it. +/// println!("== try_get_with()"); +/// let path = cache +/// .try_get_with("user1", async { +/// let mut mgr = file_mgr.write().await; +/// let path = mgr +/// .write_data_file("user data".into()) +/// .await +/// .with_context(|| format!("Failed to create a data file"))?; +/// Ok(path) as anyhow::Result<_> +/// }) +/// .await +/// .map_err(|e| anyhow!("{}", e))?; +/// +/// // Read the data file at the path and print the contents. +/// println!("\n== read_data_file()"); +/// { +/// let mgr = file_mgr.read().await; +/// let contents = mgr +/// .read_data_file(path.as_path()) +/// .await +/// .with_context(|| format!("Failed to read data from {:?}", path))?; +/// println!("contents: {}", contents); +/// } +/// +/// // Sleep for five seconds. While sleeping, the cache entry for key "user1" +/// // will be expired and evicted, so the eviction lister will be called to +/// // remove the file. +/// tokio::time::sleep(Duration::from_secs(5)).await; +/// +/// Ok(()) +/// } +/// ``` +/// +/// ## You should avoid eviction listener to panic +/// +/// It is very important to make an eviction listener closure not to panic. +/// Otherwise, the cache will stop calling the listener after a panic. This is an +/// intended behavior because the cache cannot know whether it is memory safe or not +/// to call the panicked lister again. +/// +/// When a listener panics, the cache will swallow the panic and disable the +/// listener. If you want to know when a listener panics and the reason of the panic, +/// you can enable an optional `logging` feature of Moka and check error-level logs. +/// +/// To enable the `logging`, do the followings: +/// +/// 1. In `Cargo.toml`, add the crate feature `logging` for `moka`. +/// 2. Set the logging level for `moka` to `error` or any lower levels (`warn`, +/// `info`, ...): +/// - If you are using the `env_logger` crate, you can achieve this by setting +/// `RUST_LOG` environment variable to `moka=error`. +/// 3. If you have more than one cache, you may want to set a distinct name for each +/// cache by using builder's [`name`](#method.name) method. (TODO: Add the `name` +/// method to the builder) +/// +/// ## Delivery Modes for Eviction Listener +/// +/// The [`DeliveryMode`][delivery-mode] specifies how and when an eviction +/// notifications should be delivered to an eviction listener. Currently, the +/// `future::Cache` supports only one delivery mode: `Queued` mode. +/// +/// For more details about the delivery modes, see [this section][sync-delivery-modes] +/// of `sync::Cache` documentation. +/// +/// [delivery-mode]: ../notification/enum.DeliveryMode.html +/// [sync-delivery-modes]: ../sync/struct.Cache.html#delivery-modes-for-eviction-listener +/// /// # Thread Safety /// /// All methods provided by the `Cache` are considered thread-safe, and can be safely @@ -272,9 +484,9 @@ use std::{ /// protect against attacks such as HashDoS. /// /// The hashing algorithm can be replaced on a per-`Cache` basis using the -/// [`build_with_hasher`][build-with-hasher-method] method of the -/// `CacheBuilder`. Many alternative algorithms are available on crates.io, such -/// as the [aHash][ahash-crate] crate. +/// [`build_with_hasher`][build-with-hasher-method] method of the `CacheBuilder`. +/// Many alternative algorithms are available on crates.io, such as the +/// [aHash][ahash-crate] crate. /// /// [build-with-hasher-method]: ./struct.CacheBuilder.html#method.build_with_hasher /// [ahash-crate]: https://crates.io/crates/ahash @@ -1147,7 +1359,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener) + .eviction_listener_with_queued_delivery_mode(listener) .build(); cache.reconfigure_for_testing(); @@ -1282,7 +1494,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(31) .weigher(weigher) - .eviction_listener(listener) + .eviction_listener_with_queued_delivery_mode(listener) .build(); cache.reconfigure_for_testing(); @@ -1428,7 +1640,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) - .eviction_listener(listener) + .eviction_listener_with_queued_delivery_mode(listener) .build(); cache.reconfigure_for_testing(); @@ -1499,7 +1711,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() - .eviction_listener(listener) + .eviction_listener_with_queued_delivery_mode(listener) .build(); cache.reconfigure_for_testing(); @@ -1597,7 +1809,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) - .eviction_listener(listener) + .eviction_listener_with_queued_delivery_mode(listener) .build(); cache.reconfigure_for_testing(); @@ -1679,7 +1891,7 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) - .eviction_listener(listener) + .eviction_listener_with_queued_delivery_mode(listener) .build(); cache.reconfigure_for_testing(); @@ -2311,7 +2523,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener(listener) + .eviction_listener_with_queued_delivery_mode(listener) .build(); cache.reconfigure_for_testing(); @@ -2369,7 +2581,7 @@ mod tests { // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() - .eviction_listener(listener) + .eviction_listener_with_queued_delivery_mode(listener) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .build(); @@ -2474,7 +2686,9 @@ mod tests { }; // Create a cache with the eviction listener. - let mut cache = Cache::builder().eviction_listener(listener).build(); + let mut cache = Cache::builder() + .eviction_listener_with_queued_delivery_mode(listener) + .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. diff --git a/src/notification.rs b/src/notification.rs index c632fd77..d2b4effe 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -11,6 +11,11 @@ pub(crate) type EvictionListenerRef<'a, K, V> = // the notifications, but currently there is no way to know when all entries // have been invalidated and their notifications have been sent. +/// Configuration for an eviction listener of a cache. +/// +/// Currently only setting the [`DeliveryMode`][delivery-mode] is supported. +/// +/// [delivery-mode]: ./enum.DeliveryMode.html #[derive(Clone, Debug, Default)] pub struct Configuration { mode: DeliveryMode, @@ -26,6 +31,12 @@ impl Configuration { } } +/// Builds a [`Configuration`][conf] with some configuration knobs. +/// +/// Currently only setting the [`DeliveryMode`][delivery-mode] is supported. +/// +/// [conf]: ./struct.Configuration.html +/// [delivery-mode]: ./enum.DeliveryMode.html #[derive(Default)] pub struct ConfigurationBuilder { mode: DeliveryMode, @@ -37,14 +48,34 @@ impl ConfigurationBuilder { } pub fn delivery_mode(self, mode: DeliveryMode) -> Self { - // Self { mode, ..self } Self { mode } } } +/// Specifies how and when an eviction notifications should be delivered to an +/// eviction listener. +/// +/// For more details, see [the document][delivery-mode-doc] for `sync::CacheBuilder`. +/// +/// [delivery-mode-doc]: ./sync/struct.CacheBuilder.html#delivery-modes-for-eviction-listener #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum DeliveryMode { + /// When the `Immediate` mode is used, a notification should be delivered to the + /// listener immediately after an entry is evicted. This mode also guarantees + /// that cache write operations such and `insert`, `get_with` and `invalidate` + /// and eviction notifications for a given cache key are ordered by the time when + /// they occurred. + /// + /// To guarantee the order, it adds some performance overheads to cache write + /// operations. Use this mode when the order is more import than the write + /// performance. Immediate, + /// When tne `Queued` mode is used, a notification will be delivered to the + /// listener some time after an entry was evicted. Therefore, it does not + /// preserve the order of write operations and eviction notifications. + /// + /// Use this mode when write performance is more important than preserving the + /// order of write operations and eviction notifications. Queued, } @@ -54,6 +85,7 @@ impl Default for DeliveryMode { } } +/// Indicates the reason why a cached entry was removed. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum RemovalCause { /// The entry's expiration timestamp has passed. diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 824f2c6e..c5a7ffc6 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -44,171 +44,6 @@ use std::{ /// // after 30 minutes (TTL) from the insert(). /// ``` /// -/// # Example: Eviction Listener -/// -/// A `Cache` can be configured with an `eviction_listener`, a closure that is called -/// every time there is a cache eviction. The closure takes the key, value and -/// [`RemovalCause`](../notification/enum.RemovalCause.html) as parameters. It can be -/// used to keep other data structures in sync with the cache. -/// -/// The following example demonstrates how to use a cache with an `eviction_listener` -/// and `time_to_live` to manage the lifecycle of temporary files on a filesystem. -/// The cache stores the paths of the files, and when one of them has expired, the -/// eviction lister will be called with the path, so it can remove the file from the -/// filesystem. -/// -/// ```rust -/// // Cargo.toml -/// // -/// // [dependencies] -/// // anyhow = "1.0" -/// // uuid = { version = "1.1", features = ["v4"] } -/// -/// use moka::{sync::Cache, notification}; -/// -/// use anyhow::{anyhow, Context}; -/// use std::{ -/// fs, io, -/// path::{Path, PathBuf}, -/// sync::{Arc, RwLock}, -/// time::Duration, -/// }; -/// use uuid::Uuid; -/// -/// /// The DataFileManager writes, reads and removes data files. -/// struct DataFileManager { -/// base_dir: PathBuf, -/// file_count: usize, -/// } -/// -/// impl DataFileManager { -/// fn new(base_dir: PathBuf) -> Self { -/// Self { -/// base_dir, -/// file_count: 0, -/// } -/// } -/// -/// fn write_data_file(&mut self, contents: String) -> io::Result { -/// loop { -/// // Generate a unique file path. -/// let mut path = self.base_dir.to_path_buf(); -/// path.push(Uuid::new_v4().as_hyphenated().to_string()); -/// -/// if path.exists() { -/// continue; // This path is already taken by others. Retry. -/// } -/// -/// // We have got a unique file path, so create the file at -/// // the path and write the contents to the file. -/// fs::write(&path, contents)?; -/// self.file_count += 1; -/// println!("Created a data file at {:?} (file count: {})", path, self.file_count); -/// -/// // Return the path. -/// return Ok(path); -/// } -/// } -/// -/// fn read_data_file(&self, path: impl AsRef) -> io::Result { -/// // Reads the contents of the file at the path, and return the contents. -/// fs::read_to_string(path) -/// } -/// -/// fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { -/// // Remove the file at the path. -/// fs::remove_file(path.as_ref())?; -/// self.file_count -= 1; -/// println!( -/// "Removed a data file at {:?} (file count: {})", -/// path.as_ref(), -/// self.file_count -/// ); -/// -/// Ok(()) -/// } -/// } -/// -/// fn main() -> anyhow::Result<()> { -/// // Create an instance of the DataFileManager and wrap it with -/// // Arc> so it can be shared across threads. -/// let file_mgr = DataFileManager::new(std::env::temp_dir()); -/// let file_mgr = Arc::new(RwLock::new(file_mgr)); -/// -/// let file_mgr1 = Arc::clone(&file_mgr); -/// -/// // Create an eviction lister closure. -/// let listener = move |k, v: PathBuf, cause| { -/// // Try to remove the data file at the path `v`. -/// println!( -/// "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", -/// k, v, cause -/// ); -/// -/// // Acquire the write lock of the DataFileManager. We must handle -/// // error cases here to prevent the listener from panicking. -/// match file_mgr1.write() { -/// Err(_e) => { -/// eprintln!("The lock has been poisoned"); -/// } -/// Ok(mut mgr) => { -/// // Remove the data file using the DataFileManager. -/// if let Err(_e) = mgr.remove_data_file(v.as_path()) { -/// eprintln!("Failed to remove a data file at {:?}", v); -/// } -/// } -/// } -/// }; -/// -/// let listener_conf = notification::Configuration::builder() -/// .delivery_mode(notification::DeliveryMode::Queued) -/// .build(); -/// -/// // Create the cache. Set time to live for two seconds and set the -/// // eviction listener. -/// let cache = Cache::builder() -/// .max_capacity(100) -/// .time_to_live(Duration::from_secs(2)) -/// .eviction_listener_with_conf(listener, listener_conf) -/// .build(); -/// -/// // Insert an entry to the cache. -/// // This will create and write a data file for the key "user1", store the -/// // path of the file to the cache, and return it. -/// println!("== try_get_with()"); -/// let path = cache -/// .try_get_with("user1", || -> anyhow::Result<_> { -/// let mut mgr = file_mgr -/// .write() -/// .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; -/// let path = mgr -/// .write_data_file("user data".into()) -/// .with_context(|| format!("Failed to create a data file"))?; -/// Ok(path) -/// }) -/// .map_err(|e| anyhow!("{}", e))?; -/// -/// // Read the data file at the path and print the contents. -/// println!("\n== read_data_file()"); -/// { -/// let mgr = file_mgr -/// .read() -/// .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; -/// let contents = mgr -/// .read_data_file(path.as_path()) -/// .with_context(|| format!("Failed to read data from {:?}", path))?; -/// println!("contents: {}", contents); -/// } -/// -/// // Sleep for five seconds. While sleeping, the cache entry for key "user1" -/// // will be expired and evicted, so the eviction lister will be called to -/// // remove the file. -/// std::thread::sleep(Duration::from_secs(5)); -/// -/// Ok(()) -/// } -/// ``` -/// #[must_use] pub struct CacheBuilder { max_capacity: Option, @@ -417,7 +252,7 @@ impl CacheBuilder { } } - /// Sets the weigher closure of the cache. + /// Sets the weigher closure to the cache. /// /// The closure should take `&K` and `&V` as the arguments and returns a `u32` /// representing the relative size of the entry. @@ -428,6 +263,21 @@ impl CacheBuilder { } } + /// Sets the eviction listener closure to the cache. + /// + /// The closure should take `Arc`, `V` and [`RemovalCause`][removal-cause] as + /// the arguments. The [immediate delivery mode][immediate-mode] is used for the + /// listener. + /// + /// # Panics + /// + /// It is very important to make the listener closure not to panic. Otherwise, + /// the cache will stop calling the listener after a panic. This is an intended + /// behavior because the cache cannot know whether is is memory safe or not to + /// call the panicked lister again. + /// + /// [removal-cause]: ../notification/enum.RemovalCause.html + /// [immediate-mode]: ../notification/enum.DeliveryMode.html#variant.Immediate pub fn eviction_listener( self, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, @@ -439,6 +289,22 @@ impl CacheBuilder { } } + /// Sets the eviction listener closure to the cache with a custom + /// [`Configuration`][conf]. Use this method if you want to change the delivery + /// mode to the queued mode. + /// + /// The closure should take `Arc`, `V` and [`RemovalCause`][removal-cause] as + /// the arguments. + /// + /// # Panics + /// + /// It is very important to make the listener closure not to panic. Otherwise, + /// the cache will stop calling the listener after a panic. This is an intended + /// behavior because the cache cannot know whether is is memory safe or not to + /// call the panicked lister again. + /// + /// [removal-cause]: ../notification/enum.RemovalCause.html + /// [conf]: ../notification/struct.Configuration.html pub fn eviction_listener_with_conf( self, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, diff --git a/src/sync/cache.rs b/src/sync/cache.rs index f43747b2..4f73a2fa 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -38,7 +38,23 @@ use std::{ /// replacement algorithm to determine which entries to evict when the capacity is /// exceeded. /// -/// # Examples +/// # Table of Contents +/// +/// - [Example: `insert`, `get` and `invalidate`](#example-insert-get-and-invalidate) +/// - [Avoiding to clone the value at `get`](#avoiding-to-clone-the-value-at-get) +/// - [Example: Size-based Eviction](#example-size-based-eviction) +/// - [Example: Time-based Expirations](#example-time-based-expirations) +/// - [Example: Eviction Listener](#example-eviction-listener) +/// - [You should avoid eviction listener to panic](#you-should-avoid-eviction-listener-to-panic) +/// - [Delivery Modes for Eviction Listener](#delivery-modes-for-eviction-listener) +/// - [`Immediate` Mode](#immediate-mode) +/// - [`Queued` Mode](#queued-mode) +/// - [Example: `Queued` Delivery Mode](#example-queued-delivery-mode) +/// - [Thread Safety](#thread-safety) +/// - [Sharing a cache across threads](#sharing-a-cache-across-threads) +/// - [Hashing Algorithm](#hashing-algorithm) +/// +/// # Example: `insert`, `get` and `invalidate` /// /// Cache entries are manually added using [`insert`](#method.insert) or /// [`get_with`](#method.get_with) methods, and are stored in @@ -119,7 +135,7 @@ use std::{ /// /// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// -/// # Size-based Eviction +/// # Example: Size-based Eviction /// /// ```rust /// use std::convert::TryInto; @@ -164,7 +180,7 @@ use std::{ /// /// [builder-struct]: ./struct.CacheBuilder.html /// -/// # Time-based Expirations +/// # Example: Time-based Expirations /// /// `Cache` supports the following expiration policies: /// @@ -195,6 +211,427 @@ use std::{ /// // after 30 minutes (TTL) from the insert(). /// ``` /// +/// # Example: Eviction Listener +/// +/// A `Cache` can be configured with an eviction listener, a closure that is called +/// every time there is a cache eviction. The listener takes three parameters: the +/// key and value of the evicted entry, and the +/// [`RemovalCause`](../notification/enum.RemovalCause.html) to indicate why the +/// entry was evicted. +/// +/// An eviction listener can be used to keep other data structures in sync with the +/// cache. +/// +/// The following example demonstrates how to use an eviction listener with +/// time-to-live expiration to manage the lifecycle of temporary files on a +/// filesystem. The cache stores the paths of the files, and when one of them has +/// expired, the eviction lister will be called with the path, so it can remove the +/// file from the filesystem. +/// +/// ```rust +/// // Cargo.toml +/// // +/// // [dependencies] +/// // anyhow = "1.0" +/// +/// use moka::{sync::Cache, notification}; +/// +/// use anyhow::{anyhow, Context}; +/// use std::{ +/// fs, io, +/// path::{Path, PathBuf}, +/// sync::{Arc, RwLock}, +/// time::Duration, +/// }; +/// +/// /// The DataFileManager writes, reads and removes data files. +/// struct DataFileManager { +/// base_dir: PathBuf, +/// file_count: usize, +/// } +/// +/// impl DataFileManager { +/// fn new(base_dir: PathBuf) -> Self { +/// Self { +/// base_dir, +/// file_count: 0, +/// } +/// } +/// +/// fn write_data_file( +/// &mut self, +/// key: impl AsRef, +/// contents: String +/// ) -> io::Result { +/// // Use the key as a part of the filename. +/// let mut path = self.base_dir.to_path_buf(); +/// path.push(key.as_ref()); +/// +/// assert!(!path.exists(), "Path already exists: {:?}", path); +/// +/// // create the file at the path and write the contents to the file. +/// fs::write(&path, contents)?; +/// self.file_count += 1; +/// println!("Created a data file at {:?} (file count: {})", path, self.file_count); +/// Ok(path) +/// } +/// +/// fn read_data_file(&self, path: impl AsRef) -> io::Result { +/// // Reads the contents of the file at the path, and return the contents. +/// fs::read_to_string(path) +/// } +/// +/// fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { +/// // Remove the file at the path. +/// fs::remove_file(path.as_ref())?; +/// self.file_count -= 1; +/// println!( +/// "Removed a data file at {:?} (file count: {})", +/// path.as_ref(), +/// self.file_count +/// ); +/// +/// Ok(()) +/// } +/// } +/// +/// fn main() -> anyhow::Result<()> { +/// // Create an instance of the DataFileManager and wrap it with +/// // Arc> so it can be shared across threads. +/// let file_mgr = DataFileManager::new(std::env::temp_dir()); +/// let file_mgr = Arc::new(RwLock::new(file_mgr)); +/// +/// let file_mgr1 = Arc::clone(&file_mgr); +/// +/// // Create an eviction lister closure. +/// let listener = move |k, v: PathBuf, cause| { +/// // Try to remove the data file at the path `v`. +/// println!( +/// "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", +/// k, v, cause +/// ); +/// +/// // Acquire the write lock of the DataFileManager. We must handle +/// // error cases here to prevent the listener from panicking. +/// match file_mgr1.write() { +/// Err(_e) => { +/// eprintln!("The lock has been poisoned"); +/// } +/// Ok(mut mgr) => { +/// // Remove the data file using the DataFileManager. +/// if let Err(_e) = mgr.remove_data_file(v.as_path()) { +/// eprintln!("Failed to remove a data file at {:?}", v); +/// } +/// } +/// } +/// }; +/// +/// let listener_conf = notification::Configuration::builder() +/// .delivery_mode(notification::DeliveryMode::Queued) +/// .build(); +/// +/// // Create the cache. Set time to live for two seconds and set the +/// // eviction listener. +/// let cache = Cache::builder() +/// .max_capacity(100) +/// .time_to_live(Duration::from_secs(2)) +/// .eviction_listener_with_conf(listener, listener_conf) +/// .build(); +/// +/// // Insert an entry to the cache. +/// // This will create and write a data file for the key "user1", store the +/// // path of the file to the cache, and return it. +/// println!("== try_get_with()"); +/// let key = "user1"; +/// let path = cache +/// .try_get_with(key, || -> anyhow::Result<_> { +/// let mut mgr = file_mgr +/// .write() +/// .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; +/// let path = mgr +/// .write_data_file(key, "user data".into()) +/// .with_context(|| format!("Failed to create a data file"))?; +/// Ok(path) +/// }) +/// .map_err(|e| anyhow!("{}", e))?; +/// +/// // Read the data file at the path and print the contents. +/// println!("\n== read_data_file()"); +/// { +/// let mgr = file_mgr +/// .read() +/// .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; +/// let contents = mgr +/// .read_data_file(path.as_path()) +/// .with_context(|| format!("Failed to read data from {:?}", path))?; +/// println!("contents: {}", contents); +/// } +/// +/// // Sleep for five seconds. While sleeping, the cache entry for key "user1" +/// // will be expired and evicted, so the eviction lister will be called to +/// // remove the file. +/// std::thread::sleep(Duration::from_secs(5)); +/// +/// Ok(()) +/// } +/// ``` +/// +/// ## You should avoid eviction listener to panic +/// +/// It is very important to make an eviction listener closure not to panic. +/// Otherwise, the cache will stop calling the listener after a panic. This is an +/// intended behavior because the cache cannot know whether it is memory safe or not +/// to call the panicked lister again. +/// +/// When a listener panics, the cache will swallow the panic and disable the +/// listener. If you want to know when a listener panics and the reason of the panic, +/// you can enable an optional `logging` feature of Moka and check error-level logs. +/// +/// To enable the `logging`, do the followings: +/// +/// 1. In `Cargo.toml`, add the crate feature `logging` for `moka`. +/// 2. Set the logging level for `moka` to `error` or any lower levels (`warn`, +/// `info`, ...): +/// - If you are using the `env_logger` crate, you can achieve this by setting +/// `RUST_LOG` environment variable to `moka=error`. +/// 3. If you have more than one cache, you may want to set a distinct name for each +/// cache by using builder's [`name`](#method.name) method. (TODO: Add the `name` +/// method to the builder) +/// +/// ## Delivery Modes for Eviction Listener +/// +/// The [`DeliveryMode`][delivery-mode] specifies how and when an eviction +/// notifications should be delivered to an eviction listener. The `sync` caches +/// (`Cache` and `SegmentedCache`) support two delivery modes: `Immediate` and +/// `Queued` modes. +/// +/// [delivery-mode]: ../notification/enum.DeliveryMode.html +/// +/// ### `Immediate` Mode +/// +/// Tne `Immediate` mode is the default delivery mode for the `sync` caches. Use this +/// mode when it is import to keep the order of write operations and eviction +/// notifications. +/// +/// This mode has the following characteristics: +/// +/// - The listener is called immediately after an entry is evicted. +/// - The listener is called by the thread who evicted the entry: +/// - The calling thread can be a background eviction thread or a user thread +/// invoking a cache write operation such as `insert`, `get_with` or +/// `invalidate`. +/// - The calling thread is blocked until the listener returns. +/// - This mode guarantees that write operations and eviction notifications for a +/// given cache key are ordered by the time when they occurred. +/// - This mode adds some performance overhead to cache write operations as it uses +/// internal per-key lock to guarantee the ordering. +/// +/// ### `Queued` Mode +/// +/// Use this mode when write performance is more important than preserving the order +/// of write operations and eviction notifications. +/// +/// - The listener will be called some time after an entry was evicted. +/// - A notification will be stashed in a queue. The queue will be processed by +/// dedicated notification thread(s) and that thread will call the listener. +/// - This mode does not preserve the order of write operations and eviction +/// notifications. +/// - This mode adds almost no performance overhead to cache write operations as it +/// does not use the per-key lock. +/// +/// ### Example: `Queued` Delivery Mode +/// +/// Because the `Immediate` mode is the default mode for `sync` caches, the previous +/// example was using it implicitly. +/// +/// The following is the same example but modified for the `Queued` delivery mode. +/// (Showing only changed lines) +/// +/// ```rust +/// // Cargo.toml +/// // +/// // [dependencies] +/// // anyhow = "1.0" +/// // uuid = { version = "1.1", features = ["v4"] } +/// +/// use moka::{sync::Cache, notification}; +/// +/// # use anyhow::{anyhow, Context}; +/// # use std::{ +/// # fs, io, +/// # path::{Path, PathBuf}, +/// # sync::{Arc, RwLock}, +/// # time::Duration, +/// # }; +/// // Use UUID crate to generate a random file name. +/// use uuid::Uuid; +/// +/// # struct DataFileManager { +/// # base_dir: PathBuf, +/// # file_count: usize, +/// # } +/// # +/// impl DataFileManager { +/// # fn new(base_dir: PathBuf) -> Self { +/// # Self { +/// # base_dir, +/// # file_count: 0, +/// # } +/// # } +/// # +/// fn write_data_file( +/// &mut self, +/// _key: impl AsRef, +/// contents: String +/// ) -> io::Result { +/// // We do not use the key for the filename anymore. Instead, we +/// // use UUID to generate a unique filename for each call. +/// loop { +/// // Generate a file path with unique file name. +/// let mut path = self.base_dir.to_path_buf(); +/// path.push(Uuid::new_v4().as_hyphenated().to_string()); +/// +/// if path.exists() { +/// continue; // This path is already taken by others. Retry. +/// } +/// +/// // We have got a unique file path, so create the file at +/// // the path and write the contents to the file. +/// fs::write(&path, contents)?; +/// self.file_count += 1; +/// println!("Created a data file at {:?} (file count: {})", path, self.file_count); +/// +/// // Return the path. +/// return Ok(path); +/// } +/// } +/// +/// // Other associate functions and methods are unchanged. +/// # +/// # fn read_data_file(&self, path: impl AsRef) -> io::Result { +/// # fs::read_to_string(path) +/// # } +/// # +/// # fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { +/// # fs::remove_file(path.as_ref())?; +/// # self.file_count -= 1; +/// # println!( +/// # "Removed a data file at {:?} (file count: {})", +/// # path.as_ref(), +/// # self.file_count +/// # ); +/// # +/// # Ok(()) +/// # } +/// } +/// +/// fn main() -> anyhow::Result<()> { +/// // (Omitted unchanged lines) +/// +/// # let file_mgr = DataFileManager::new(std::env::temp_dir()); +/// # let file_mgr = Arc::new(RwLock::new(file_mgr)); +/// # +/// # let file_mgr1 = Arc::clone(&file_mgr); +/// # +/// // Create an eviction lister closure. +/// // let listener = ... +/// +/// # let listener = move |k, v: PathBuf, cause| { +/// # println!( +/// # "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", +/// # k, v, cause +/// # ); +/// # +/// # match file_mgr1.write() { +/// # Err(_e) => { +/// # eprintln!("The lock has been poisoned"); +/// # } +/// # Ok(mut mgr) => { +/// # if let Err(_e) = mgr.remove_data_file(v.as_path()) { +/// # eprintln!("Failed to remove a data file at {:?}", v); +/// # } +/// # } +/// # } +/// # }; +/// # +/// // Create a listener configuration with Queued delivery mode. +/// let listener_conf = notification::Configuration::builder() +/// .delivery_mode(notification::DeliveryMode::Queued) +/// .build(); +/// +/// // Create the cache. +/// let cache = Cache::builder() +/// .max_capacity(100) +/// .time_to_live(Duration::from_secs(2)) +/// // Set the eviction listener with the configuration. +/// .eviction_listener_with_conf(listener, listener_conf) +/// .build(); +/// +/// // Insert an entry to the cache. +/// // ... +/// # println!("== try_get_with()"); +/// # let key = "user1"; +/// # let path = cache +/// # .try_get_with(key, || -> anyhow::Result<_> { +/// # let mut mgr = file_mgr +/// # .write() +/// # .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; +/// # let path = mgr +/// # .write_data_file(key, "user data".into()) +/// # .with_context(|| format!("Failed to create a data file"))?; +/// # Ok(path) +/// # }) +/// # .map_err(|e| anyhow!("{}", e))?; +/// # +/// // Read the data file at the path and print the contents. +/// // ... +/// # println!("\n== read_data_file()"); +/// # { +/// # let mgr = file_mgr +/// # .read() +/// # .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; +/// # let contents = mgr +/// # .read_data_file(path.as_path()) +/// # .with_context(|| format!("Failed to read data from {:?}", path))?; +/// # println!("contents: {}", contents); +/// # } +/// # +/// // Sleep for five seconds. +/// // ... +/// # std::thread::sleep(Duration::from_secs(5)); +/// +/// Ok(()) +/// } +/// ``` +/// +/// As you can see, `DataFileManager::write_data_file` method no longer uses the +/// cache key for the file name. Instead, it generates a UUID-based unique file name +/// on each call. This kind of treatment will be needed for `Queued` mode because +/// notifications will be delivered with some delay. +/// +/// For example, a user thread could do the followings: +/// +/// 1. `insert` an entry, and create a file. +/// 2. The entry is evicted due to size constraint: +/// - This will trigger an eviction notification but it will be fired some time +/// later. +/// - The notification listener will remove the file when it is called, but we +/// cannot predict when the call would be made. +/// 3. `insert` the entry again, and create the file again. +/// +/// In `Queued` mode, the notification of the eviction at step 2 can be delivered +/// either before or after the re-`insert` at step 3. If the `write_data_file` method +/// does not generate unique file name on each call and the notification has not been +/// delivered before step 3, the user thread could overwrite the file created at step +/// 1. And then the notification will be delivered and the eviction listener will +/// remove a wrong file created at step 3 (instead of the correct one created at step +/// 1). This will cause the cache entires and the files on the filesystem to become +/// out of sync. +/// +/// Generating unique file names prevents this problem, as the user thread will never +/// overwrite the file created at step 1 and the eviction lister will never remove a +/// wrong file. +/// /// # Thread Safety /// /// All methods provided by the `Cache` are considered thread-safe, and can be safely From e207c38897619be52b8d852a0aa2b296921b7c2a Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 3 Jul 2022 23:54:33 +0800 Subject: [PATCH 37/44] Support notification on eviction Add the `name` method to `sync` and `future` caches. --- .vscode/settings.json | 2 +- src/future/builder.rs | 13 ++++++++++ src/future/cache.rs | 9 +++++++ src/sync/builder.rs | 16 +++++++++++++ src/sync/cache.rs | 9 +++++++ src/sync/segment.rs | 10 ++++++++ src/sync_base/base_cache.rs | 20 +++++++++++++++- src/sync_base/removal_notifier.rs | 40 +++++++++++++++++++++++-------- 8 files changed, 107 insertions(+), 12 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index b1b0c9f4..a061a0b0 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,5 @@ { - "rust-analyzer.cargo.features": ["future", "dash", "unstable-debug-counters"], + "rust-analyzer.cargo.features": ["future", "dash", "logging", "unstable-debug-counters"], "rust-analyzer.server.extraEnv": { "CARGO_TARGET_DIR": "target/ra" }, diff --git a/src/future/builder.rs b/src/future/builder.rs index 4e0ea033..9bade8e2 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -54,6 +54,7 @@ use std::{ /// #[must_use] pub struct CacheBuilder { + name: Option, max_capacity: Option, initial_capacity: Option, weigher: Option>, @@ -72,6 +73,7 @@ where { fn default() -> Self { Self { + name: None, max_capacity: None, initial_capacity: None, weigher: None, @@ -110,6 +112,7 @@ where let build_hasher = RandomState::default(); builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle); Cache::with_everything( + self.name, self.max_capacity, self.initial_capacity, build_hasher, @@ -135,6 +138,7 @@ where { builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle); Cache::with_everything( + self.name, self.max_capacity, self.initial_capacity, hasher, @@ -149,6 +153,15 @@ where } impl CacheBuilder { + /// Sets the name of the cache. Currently the name is used for identification + /// only in logging messages. + pub fn name(self, name: &str) -> Self { + Self { + name: Some(name.to_string()), + ..self + } + } + /// Sets the max capacity of the cache. pub fn max_capacity(self, max_capacity: u64) -> Self { Self { diff --git a/src/future/cache.rs b/src/future/cache.rs index 4d4932a1..775e3f29 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -547,6 +547,11 @@ where } impl Cache { + /// Returns cache’s name. + pub fn name(&self) -> Option<&str> { + self.base.name() + } + /// Returns a read-only cache policy of this cache. /// /// At this time, cache policy cannot be modified after cache creation. @@ -633,6 +638,7 @@ where pub fn new(max_capacity: u64) -> Self { let build_hasher = RandomState::default(); Self::with_everything( + None, Some(max_capacity), None, build_hasher, @@ -663,6 +669,7 @@ where // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] pub(crate) fn with_everything( + name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, @@ -675,6 +682,7 @@ where ) -> Self { Self { base: BaseCache::new( + name, max_capacity, initial_capacity, build_hasher.clone(), @@ -2687,6 +2695,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() + .name("My Future Cache") .eviction_listener_with_queued_delivery_mode(listener) .build(); cache.reconfigure_for_testing(); diff --git a/src/sync/builder.rs b/src/sync/builder.rs index c5a7ffc6..1c708c6d 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -46,6 +46,7 @@ use std::{ /// #[must_use] pub struct CacheBuilder { + name: Option, max_capacity: Option, initial_capacity: Option, num_segments: Option, @@ -65,6 +66,7 @@ where { fn default() -> Self { Self { + name: None, max_capacity: None, initial_capacity: None, num_segments: None, @@ -105,6 +107,7 @@ where assert!(num_segments != 0); CacheBuilder { + name: self.name, max_capacity: self.max_capacity, initial_capacity: self.initial_capacity, num_segments: Some(num_segments), @@ -132,6 +135,7 @@ where let build_hasher = RandomState::default(); builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle); Cache::with_everything( + self.name, self.max_capacity, self.initial_capacity, build_hasher, @@ -160,6 +164,7 @@ where { builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle); Cache::with_everything( + self.name, self.max_capacity, self.initial_capacity, hasher, @@ -192,6 +197,7 @@ where let build_hasher = RandomState::default(); builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle); SegmentedCache::with_everything( + self.name, self.max_capacity, self.initial_capacity, self.num_segments.unwrap(), @@ -221,6 +227,7 @@ where { builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle); SegmentedCache::with_everything( + self.name, self.max_capacity, self.initial_capacity, self.num_segments.unwrap(), @@ -236,6 +243,15 @@ where } impl CacheBuilder { + /// Sets the name of the cache. Currently the name is used for identification + /// only in logging messages. + pub fn name(self, name: &str) -> Self { + Self { + name: Some(name.to_string()), + ..self + } + } + /// Sets the max capacity of the cache. pub fn max_capacity(self, max_capacity: u64) -> Self { Self { diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 4f73a2fa..b99e13ef 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -730,6 +730,11 @@ where } impl Cache { + /// Returns cache’s name. + pub fn name(&self) -> Option<&str> { + self.base.name() + } + /// Returns a read-only cache policy of this cache. /// /// At this time, cache policy cannot be modified after cache creation. @@ -802,6 +807,7 @@ where pub fn new(max_capacity: u64) -> Self { let build_hasher = RandomState::default(); Self::with_everything( + None, Some(max_capacity), None, build_hasher, @@ -832,6 +838,7 @@ where // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] pub(crate) fn with_everything( + name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, @@ -844,6 +851,7 @@ where ) -> Self { Self { base: BaseCache::new( + name, max_capacity, initial_capacity, build_hasher.clone(), @@ -2909,6 +2917,7 @@ mod tests { // Create a cache with the eviction listener. let mut cache = Cache::builder() + .name("My Sync Cache") .eviction_listener_with_conf(listener, listener_conf) .build(); cache.reconfigure_for_testing(); diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 0efbe8ae..1aa984a8 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -96,6 +96,7 @@ where pub fn new(max_capacity: u64, num_segments: usize) -> Self { let build_hasher = RandomState::default(); Self::with_everything( + None, Some(max_capacity), None, num_segments, @@ -119,6 +120,11 @@ where } impl SegmentedCache { + /// Returns cache’s name. + pub fn name(&self) -> Option<&str> { + self.inner.segments[0].name() + } + /// Returns a read-only cache policy of this cache. /// /// At this time, cache policy cannot be modified after cache creation. @@ -199,6 +205,7 @@ where /// Panics if `num_segments` is 0. #[allow(clippy::too_many_arguments)] pub(crate) fn with_everything( + name: Option, max_capacity: Option, initial_capacity: Option, num_segments: usize, @@ -212,6 +219,7 @@ where ) -> Self { Self { inner: Arc::new(Inner::new( + name, max_capacity, initial_capacity, num_segments, @@ -578,6 +586,7 @@ where /// Panics if `num_segments` is 0. #[allow(clippy::too_many_arguments)] fn new( + name: Option, max_capacity: Option, initial_capacity: Option, num_segments: usize, @@ -601,6 +610,7 @@ where let segments = (0..actual_num_segments) .map(|_| { Cache::with_everything( + name.clone(), seg_max_capacity, seg_init_capacity, build_hasher.clone(), diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 2b8c9d30..bb643a3f 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -84,6 +84,10 @@ impl Drop for BaseCache { } impl BaseCache { + pub(crate) fn name(&self) -> Option<&str> { + self.inner.name() + } + pub(crate) fn policy(&self) -> Policy { self.inner.policy() } @@ -140,6 +144,7 @@ where // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] pub(crate) fn new( + name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, @@ -153,6 +158,7 @@ where let (r_snd, r_rcv) = crossbeam_channel::bounded(READ_LOG_SIZE); let (w_snd, w_rcv) = crossbeam_channel::bounded(WRITE_LOG_SIZE); let inner = Arc::new(Inner::new( + name, max_capacity, initial_capacity, build_hasher, @@ -660,6 +666,7 @@ enum AdmissionResult { type CacheStore = crate::cht::SegmentedHashMap, TrioArc>, S>; pub(crate) struct Inner { + name: Option, max_capacity: Option, entry_count: AtomicCell, weighted_size: AtomicCell, @@ -684,6 +691,10 @@ pub(crate) struct Inner { // functions/methods used by BaseCache impl Inner { + fn name(&self) -> Option<&str> { + self.name.as_deref() + } + fn policy(&self) -> Policy { Policy::new(self.max_capacity, 1, self.time_to_live, self.time_to_idle) } @@ -802,6 +813,7 @@ where // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] fn new( + name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, @@ -824,7 +836,11 @@ where build_hasher.clone(), ); let (removal_notifier, key_locks) = if let Some(listener) = eviction_listener { - let rn = RemovalNotifier::new(listener, eviction_listener_conf.unwrap_or_default()); + let rn = RemovalNotifier::new( + listener, + eviction_listener_conf.unwrap_or_default(), + name.clone(), + ); if rn.is_blocking() { let kl = KeyLockMap::with_hasher(build_hasher.clone()); (Some(rn), Some(kl)) @@ -836,6 +852,7 @@ where }; Self { + name, max_capacity: max_capacity.map(|n| n as u64), entry_count: Default::default(), weighted_size: Default::default(), @@ -2014,6 +2031,7 @@ mod tests { let ensure_sketch_len = |max_capacity, len, name| { let cache = BaseCache::::new( + None, Some(max_capacity), None, RandomState::default(), diff --git a/src/sync_base/removal_notifier.rs b/src/sync_base/removal_notifier.rs index 12aaff60..079a6f54 100644 --- a/src/sync_base/removal_notifier.rs +++ b/src/sync_base/removal_notifier.rs @@ -27,10 +27,18 @@ pub(crate) enum RemovalNotifier { } impl RemovalNotifier { - pub(crate) fn new(listener: EvictionListener, conf: notification::Configuration) -> Self { + pub(crate) fn new( + listener: EvictionListener, + conf: notification::Configuration, + cache_name: Option, + ) -> Self { match conf.delivery_mode() { - DeliveryMode::Immediate => Self::Blocking(BlockingRemovalNotifier::new(listener)), - DeliveryMode::Queued => Self::ThreadPool(ThreadPoolRemovalNotifier::new(listener)), + DeliveryMode::Immediate => { + Self::Blocking(BlockingRemovalNotifier::new(listener, cache_name)) + } + DeliveryMode::Queued => { + Self::ThreadPool(ThreadPoolRemovalNotifier::new(listener, cache_name)) + } } } @@ -81,13 +89,15 @@ impl RemovalNotifier { pub(crate) struct BlockingRemovalNotifier { listener: EvictionListener, is_enabled: AtomicBool, + cache_name: Option, } impl BlockingRemovalNotifier { - fn new(listener: EvictionListener) -> Self { + fn new(listener: EvictionListener, cache_name: Option) -> Self { Self { listener, is_enabled: AtomicBool::new(true), + cache_name, } } @@ -106,7 +116,7 @@ impl BlockingRemovalNotifier { if let Err(_payload) = result { self.is_enabled.store(false, Ordering::Release); #[cfg(feature = "logging")] - log_panic(&*_payload); + log_panic(&*_payload, self.cache_name.as_deref()); } } } @@ -137,13 +147,14 @@ impl Drop for ThreadPoolRemovalNotifier { } impl ThreadPoolRemovalNotifier { - fn new(listener: EvictionListener) -> Self { + fn new(listener: EvictionListener, cache_name: Option) -> Self { let (snd, rcv) = crossbeam_channel::bounded(CHANNEL_CAPACITY); let thread_pool = ThreadPoolRegistry::acquire_pool(PoolName::RemovalNotifier); let state = NotifierState { task_lock: Default::default(), rcv, listener, + cache_name, is_enabled: AtomicBool::new(true), is_running: Default::default(), is_shutting_down: Default::default(), @@ -292,7 +303,7 @@ impl NotificationTask { #[cfg(feature = "logging")] { if let Err(payload) = &result { - log_panic(&**payload); + log_panic(&**payload, self.state.cache_name.as_deref()); } } result @@ -303,6 +314,7 @@ struct NotifierState { task_lock: Mutex<()>, rcv: Receiver>, listener: EvictionListener, + cache_name: Option, is_enabled: AtomicBool, is_running: AtomicBool, is_shutting_down: AtomicBool, @@ -362,7 +374,7 @@ impl RemovedEntries { } #[cfg(feature = "logging")] -fn log_panic(payload: &(dyn std::any::Any + Send + 'static)) { +fn log_panic(payload: &(dyn std::any::Any + Send + 'static), cache_name: Option<&str>) { // Try to downcast the payload into &str or String. // // NOTE: Clippy will complain if we use `if let Some(_)` here. @@ -371,9 +383,17 @@ fn log_panic(payload: &(dyn std::any::Any + Send + 'static)) { (payload.downcast_ref::<&str>().map(|s| (*s).into())) .or_else(|| payload.downcast_ref::().map(Into::into)); + let cn = cache_name + .map(|name| format!("[{}] ", name)) + .unwrap_or_default(); + if let Some(m) = message { - log::error!("Eviction listener panicked at '{}'", m); + log::error!( + "{}Disabled the eviction listener because it panicked at '{}'", + cn, + m + ); } else { - log::error!("Eviction listener panicked"); + log::error!("{}Disabled the eviction listener because it panicked", cn); } } From 5e0b9789812b7d47f8b32576c1dc86cac2b27047 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Mon, 4 Jul 2022 08:34:16 +0800 Subject: [PATCH 38/44] Support notification on eviction Write the documentation. --- src/future/cache.rs | 10 ++++++---- src/notification.rs | 33 +++++++++++++++++++-------------- src/sync/cache.rs | 37 +++++++++++++++++++------------------ 3 files changed, 44 insertions(+), 36 deletions(-) diff --git a/src/future/cache.rs b/src/future/cache.rs index 775e3f29..f0c5c31c 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -258,7 +258,7 @@ use std::{ /// entry was evicted. /// /// An eviction listener can be used to keep other data structures in sync with the -/// cache. +/// cache, for example. /// /// The following example demonstrates how to use an eviction listener with /// time-to-live expiration to manage the lifecycle of temporary files on a @@ -433,9 +433,11 @@ use std::{ /// `info`, ...): /// - If you are using the `env_logger` crate, you can achieve this by setting /// `RUST_LOG` environment variable to `moka=error`. -/// 3. If you have more than one cache, you may want to set a distinct name for each -/// cache by using builder's [`name`](#method.name) method. (TODO: Add the `name` -/// method to the builder) +/// 3. If you have more than one caches, you may want to set a distinct name for each +/// cache by using cache builder's [`name`][builder-name-method] method. The name +/// will appear in the log. +/// +/// [builder-name-method]: ./struct.CacheBuilder.html#method.name /// /// ## Delivery Modes for Eviction Listener /// diff --git a/src/notification.rs b/src/notification.rs index d2b4effe..2e9391e3 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -1,3 +1,5 @@ +//! Common data types for notifications. + use std::sync::Arc; pub(crate) type EvictionListener = @@ -55,27 +57,30 @@ impl ConfigurationBuilder { /// Specifies how and when an eviction notifications should be delivered to an /// eviction listener. /// -/// For more details, see [the document][delivery-mode-doc] for `sync::CacheBuilder`. +/// For more details, see [the document][delivery-mode-doc] of `sync::Cache`. /// -/// [delivery-mode-doc]: ./sync/struct.CacheBuilder.html#delivery-modes-for-eviction-listener +/// [delivery-mode-doc]: ../sync/struct.Cache.html#delivery-modes-for-eviction-listener #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum DeliveryMode { - /// When the `Immediate` mode is used, a notification should be delivered to the - /// listener immediately after an entry is evicted. This mode also guarantees - /// that cache write operations such and `insert`, `get_with` and `invalidate` - /// and eviction notifications for a given cache key are ordered by the time when - /// they occurred. + /// With mode, a notification should be delivered to the listener immediately + /// after an entry was evicted. It also guarantees that eviction notifications + /// and cache write operations such and `insert`, `get_with` and `invalidate` for + /// a given cache key are ordered by the time when they occurred. + /// + /// To guarantee the order, cache maintains key-level lock, which will reduce + /// concurrent write performance. /// - /// To guarantee the order, it adds some performance overheads to cache write - /// operations. Use this mode when the order is more import than the write - /// performance. + /// Use this mode when the order is more import than the write performance. Immediate, - /// When tne `Queued` mode is used, a notification will be delivered to the - /// listener some time after an entry was evicted. Therefore, it does not - /// preserve the order of write operations and eviction notifications. + /// With this mode, a notification will be delivered to the listener some time + /// after an entry was evicted. Therefore, it does not preserve the order of + /// eviction notifications and write operations. + /// + /// On the other hand, cache does not maintain key-level lock, so there will be + /// no overhead on write performance. /// /// Use this mode when write performance is more important than preserving the - /// order of write operations and eviction notifications. + /// order of eviction notifications and write operations. Queued, } diff --git a/src/sync/cache.rs b/src/sync/cache.rs index b99e13ef..79cc2f4e 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -57,8 +57,8 @@ use std::{ /// # Example: `insert`, `get` and `invalidate` /// /// Cache entries are manually added using [`insert`](#method.insert) or -/// [`get_with`](#method.get_with) methods, and are stored in -/// the cache until either evicted or manually invalidated. +/// [`get_with`](#method.get_with) methods, and are stored in the cache until either +/// evicted or manually invalidated. /// /// Here's an example of reading and updating a cache by using multiple threads: /// @@ -117,8 +117,7 @@ use std::{ /// /// If you want to atomically initialize and insert a value when the key is not /// present, you might want to check other insertion methods -/// [`get_with`](#method.get_with) and -/// [`try_get_with`](#method.try_get_with). +/// [`get_with`](#method.get_with) and [`try_get_with`](#method.try_get_with). /// /// # Avoiding to clone the value at `get` /// @@ -220,7 +219,7 @@ use std::{ /// entry was evicted. /// /// An eviction listener can be used to keep other data structures in sync with the -/// cache. +/// cache, for example. /// /// The following example demonstrates how to use an eviction listener with /// time-to-live expiration to manage the lifecycle of temporary files on a @@ -394,9 +393,11 @@ use std::{ /// `info`, ...): /// - If you are using the `env_logger` crate, you can achieve this by setting /// `RUST_LOG` environment variable to `moka=error`. -/// 3. If you have more than one cache, you may want to set a distinct name for each -/// cache by using builder's [`name`](#method.name) method. (TODO: Add the `name` -/// method to the builder) +/// 3. If you have more than one caches, you may want to set a distinct name for each +/// cache by using cache builder's [`name`][builder-name-method] method. The name +/// will appear in the log. +/// +/// [builder-name-method]: ./struct.CacheBuilder.html#method.name /// /// ## Delivery Modes for Eviction Listener /// @@ -415,7 +416,7 @@ use std::{ /// /// This mode has the following characteristics: /// -/// - The listener is called immediately after an entry is evicted. +/// - The listener is called immediately after an entry was evicted. /// - The listener is called by the thread who evicted the entry: /// - The calling thread can be a background eviction thread or a user thread /// invoking a cache write operation such as `insert`, `get_with` or @@ -445,7 +446,7 @@ use std::{ /// example was using it implicitly. /// /// The following is the same example but modified for the `Queued` delivery mode. -/// (Showing only changed lines) +/// (Showing changed lines only) /// /// ```rust /// // Cargo.toml @@ -622,11 +623,11 @@ use std::{ /// In `Queued` mode, the notification of the eviction at step 2 can be delivered /// either before or after the re-`insert` at step 3. If the `write_data_file` method /// does not generate unique file name on each call and the notification has not been -/// delivered before step 3, the user thread could overwrite the file created at step -/// 1. And then the notification will be delivered and the eviction listener will -/// remove a wrong file created at step 3 (instead of the correct one created at step -/// 1). This will cause the cache entires and the files on the filesystem to become -/// out of sync. +/// delivered before step 3, the user thread could overwrite the file created at +/// step 1. And then the notification will be delivered and the eviction listener +/// will remove a wrong file created at step 3 (instead of the correct one created at +/// step 1). This will cause the cache entires and the files on the filesystem to +/// become out of sync. /// /// Generating unique file names prevents this problem, as the user thread will never /// overwrite the file created at step 1 and the eviction lister will never remove a @@ -667,9 +668,9 @@ use std::{ /// protect against attacks such as HashDoS. /// /// The hashing algorithm can be replaced on a per-`Cache` basis using the -/// [`build_with_hasher`][build-with-hasher-method] method of the -/// `CacheBuilder`. Many alternative algorithms are available on crates.io, such -/// as the [aHash][ahash-crate] crate. +/// [`build_with_hasher`][build-with-hasher-method] method of the `CacheBuilder`. +/// Many alternative algorithms are available on crates.io, such as the +/// [aHash][ahash-crate] crate. /// /// [build-with-hasher-method]: ./struct.CacheBuilder.html#method.build_with_hasher /// [ahash-crate]: https://crates.io/crates/ahash From 6a5b974937c938cae02aec4919157090124f6b97 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Mon, 4 Jul 2022 20:44:26 +0800 Subject: [PATCH 39/44] Support notification on eviction Rename `sync_base::removal_notifier` module to `notification::notifier`. --- src/notification.rs | 2 ++ .../removal_notifier.rs => notification/notifier.rs} | 0 src/sync_base.rs | 1 - src/sync_base/base_cache.rs | 8 +++++--- 4 files changed, 7 insertions(+), 4 deletions(-) rename src/{sync_base/removal_notifier.rs => notification/notifier.rs} (100%) diff --git a/src/notification.rs b/src/notification.rs index 2e9391e3..ec9728e5 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -1,5 +1,7 @@ //! Common data types for notifications. +pub(crate) mod notifier; + use std::sync::Arc; pub(crate) type EvictionListener = diff --git a/src/sync_base/removal_notifier.rs b/src/notification/notifier.rs similarity index 100% rename from src/sync_base/removal_notifier.rs rename to src/notification/notifier.rs diff --git a/src/sync_base.rs b/src/sync_base.rs index f41aa74e..3ea511f7 100644 --- a/src/sync_base.rs +++ b/src/sync_base.rs @@ -2,7 +2,6 @@ pub(crate) mod base_cache; mod invalidator; pub(crate) mod iter; mod key_lock; -mod removal_notifier; /// The type of the unique ID to identify a predicate used by /// [`Cache#invalidate_entries_if`][invalidate-if] method. diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index bb643a3f..df16cd2f 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -2,7 +2,6 @@ use super::{ invalidator::{GetOrRemoveEntry, InvalidationResult, Invalidator, KeyDateLite, PredicateFun}, iter::ScanningGet, key_lock::{KeyLock, KeyLockMap}, - removal_notifier::RemovedEntry, PredicateId, }; @@ -26,8 +25,11 @@ use crate::{ time::{CheckedTimeOps, Clock, Instant}, CacheRegion, }, - notification::{self, EvictionListener, RemovalCause}, - sync_base::removal_notifier::RemovalNotifier, + notification::{ + self, + notifier::{RemovalNotifier, RemovedEntry}, + EvictionListener, RemovalCause, + }, Policy, PredicateError, }; From 823f7e339a7839f5c6bb8b0c92652c7d047a1dbc Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Mon, 4 Jul 2022 21:19:33 +0800 Subject: [PATCH 40/44] Support notification on eviction Write the documentation. --- src/common/error.rs | 2 +- src/future.rs | 2 +- src/future/cache.rs | 12 +++++++++++- src/sync_base.rs | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/common/error.rs b/src/common/error.rs index 7fa5a3d6..526dd8d1 100644 --- a/src/common/error.rs +++ b/src/common/error.rs @@ -1,5 +1,5 @@ /// The error type for the functionalities around -/// [`Cache#invalidate_entries_if`][invalidate-if] method. +/// [`Cache::invalidate_entries_if`][invalidate-if] method. /// /// [invalidate-if]: ./sync/struct.Cache.html#method.invalidate_entries_if #[derive(thiserror::Error, Debug)] diff --git a/src/future.rs b/src/future.rs index 04319ccf..0c70334c 100644 --- a/src/future.rs +++ b/src/future.rs @@ -15,7 +15,7 @@ pub use { }; /// The type of the unique ID to identify a predicate used by -/// [`Cache#invalidate_entries_if`][invalidate-if] method. +/// [`Cache::invalidate_entries_if`][invalidate-if] method. /// /// A `PredicateId` is a `String` of UUID (version 4). /// diff --git a/src/future/cache.rs b/src/future/cache.rs index f0c5c31c..6816fb6a 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -445,9 +445,19 @@ use std::{ /// notifications should be delivered to an eviction listener. Currently, the /// `future::Cache` supports only one delivery mode: `Queued` mode. /// +/// A future version of `future::Cache` will support `Immediate` mode, which will be +/// easier to use in many use cases than queued mode. Unlike the `future::Cache`, +/// the `sync::Cache` already supports it. +/// +/// Once `future::Cache` supports the immediate mode, the `eviction_listener` and +/// `eviction_listener_with_conf` methods will be added to the +/// `future::CacheBuilder`. The former will use the immediate mode, and the latter +/// will take a custom configurations to specify the queued mode. The current method +/// `eviction_listener_with_queued_delivery_mode` will be deprecated. +/// /// For more details about the delivery modes, see [this section][sync-delivery-modes] /// of `sync::Cache` documentation. -/// +/// /// [delivery-mode]: ../notification/enum.DeliveryMode.html /// [sync-delivery-modes]: ../sync/struct.Cache.html#delivery-modes-for-eviction-listener /// diff --git a/src/sync_base.rs b/src/sync_base.rs index 3ea511f7..8ca085f9 100644 --- a/src/sync_base.rs +++ b/src/sync_base.rs @@ -4,7 +4,7 @@ pub(crate) mod iter; mod key_lock; /// The type of the unique ID to identify a predicate used by -/// [`Cache#invalidate_entries_if`][invalidate-if] method. +/// [`Cache::invalidate_entries_if`][invalidate-if] method. /// /// A `PredicateId` is a `String` of UUID (version 4). /// From fc8d7ee7c21ec70c559796a02d33640f98d68502 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Mon, 4 Jul 2022 21:42:00 +0800 Subject: [PATCH 41/44] Support notification on eviction Fix "unused field" warnings in `notification::notifier` module. --- src/notification/notifier.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/notification/notifier.rs b/src/notification/notifier.rs index 079a6f54..884ec247 100644 --- a/src/notification/notifier.rs +++ b/src/notification/notifier.rs @@ -89,15 +89,17 @@ impl RemovalNotifier { pub(crate) struct BlockingRemovalNotifier { listener: EvictionListener, is_enabled: AtomicBool, + #[cfg(feature = "logging")] cache_name: Option, } impl BlockingRemovalNotifier { - fn new(listener: EvictionListener, cache_name: Option) -> Self { + fn new(listener: EvictionListener, _cache_name: Option) -> Self { Self { listener, is_enabled: AtomicBool::new(true), - cache_name, + #[cfg(feature = "logging")] + cache_name: _cache_name, } } @@ -147,14 +149,15 @@ impl Drop for ThreadPoolRemovalNotifier { } impl ThreadPoolRemovalNotifier { - fn new(listener: EvictionListener, cache_name: Option) -> Self { + fn new(listener: EvictionListener, _cache_name: Option) -> Self { let (snd, rcv) = crossbeam_channel::bounded(CHANNEL_CAPACITY); let thread_pool = ThreadPoolRegistry::acquire_pool(PoolName::RemovalNotifier); let state = NotifierState { task_lock: Default::default(), rcv, listener, - cache_name, + #[cfg(feature = "logging")] + cache_name: _cache_name, is_enabled: AtomicBool::new(true), is_running: Default::default(), is_shutting_down: Default::default(), @@ -314,6 +317,7 @@ struct NotifierState { task_lock: Mutex<()>, rcv: Receiver>, listener: EvictionListener, + #[cfg(feature = "logging")] cache_name: Option, is_enabled: AtomicBool, is_running: AtomicBool, From 2aff444fa88861e60cb9cff020e197b248d6c7ab Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Mon, 4 Jul 2022 21:59:56 +0800 Subject: [PATCH 42/44] Support notification on eviction Fix some typos in the documentation. --- src/notification.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/notification.rs b/src/notification.rs index ec9728e5..672eb7f8 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -56,7 +56,7 @@ impl ConfigurationBuilder { } } -/// Specifies how and when an eviction notifications should be delivered to an +/// Specifies how and when an eviction notification should be delivered to an /// eviction listener. /// /// For more details, see [the document][delivery-mode-doc] of `sync::Cache`. @@ -64,10 +64,11 @@ impl ConfigurationBuilder { /// [delivery-mode-doc]: ../sync/struct.Cache.html#delivery-modes-for-eviction-listener #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum DeliveryMode { - /// With mode, a notification should be delivered to the listener immediately - /// after an entry was evicted. It also guarantees that eviction notifications - /// and cache write operations such and `insert`, `get_with` and `invalidate` for - /// a given cache key are ordered by the time when they occurred. + /// With this mode, a notification should be delivered to the listener + /// immediately after an entry was evicted. It also guarantees that eviction + /// notifications and cache write operations such and `insert`, `get_with` and + /// `invalidate` for a given cache key are ordered by the time when they + /// occurred. /// /// To guarantee the order, cache maintains key-level lock, which will reduce /// concurrent write performance. From 317b0abd7b44bffd76b7f1039937e7288464720f Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 5 Jul 2022 06:51:36 +0800 Subject: [PATCH 43/44] Support notification on eviction Remove unused function and struct. --- src/future/cache.rs | 2 +- src/notification/notifier.rs | 4 ---- src/sync_base/base_cache.rs | 4 ---- 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/src/future/cache.rs b/src/future/cache.rs index 6816fb6a..2b6240eb 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -442,7 +442,7 @@ use std::{ /// ## Delivery Modes for Eviction Listener /// /// The [`DeliveryMode`][delivery-mode] specifies how and when an eviction -/// notifications should be delivered to an eviction listener. Currently, the +/// notification should be delivered to an eviction listener. Currently, the /// `future::Cache` supports only one delivery mode: `Queued` mode. /// /// A future version of `future::Cache` will support `Immediate` mode, which will be diff --git a/src/notification/notifier.rs b/src/notification/notifier.rs index 884ec247..5c2c0940 100644 --- a/src/notification/notifier.rs +++ b/src/notification/notifier.rs @@ -123,10 +123,6 @@ impl BlockingRemovalNotifier { } } -// pub(crate) struct NonBlockingRemovalNotifier { -// _phantom: std::marker::PhantomData<(K, V)>, -// } - pub(crate) struct ThreadPoolRemovalNotifier { snd: Sender>, state: Arc>, diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index df16cd2f..b659bde4 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -566,10 +566,6 @@ impl<'a, K, V> EvictionState<'a, K, V> { self.notifier.is_some() } - // fn is_batch_notification_supported(&self) -> bool { - // self.removed_entries.is_some() - // } - fn add_removed_entry( &mut self, key: Arc, From a158e82122c33e9e176f3c14a2fc3782077d113f Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Tue, 5 Jul 2022 07:24:39 +0800 Subject: [PATCH 44/44] Update the change log (v0.9.0) --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b4dc2ce..79a297a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Moka Cache — Change Log +## Version 0.9.0 + +### Added + +- Add support for eviction listener to the following caches ([#145][gh-pull-0145]). + Eviction listener is a callback function that will be called when an entry is + removed from the cache. + - `sync::Cache` + - `sync::SegmentedCache` + - `future::Cache` +- Add a crate feature `sync` for enabling and disabling `sync` caches. + ([#143][gh-pull-0143]) + - This feature is enabled by default. + - When using experimental `dash` cache, opting out of `sync` will reduce the + number of dependencies. +- Add a crate feature `logging` to enable optional log crate dependency. + ([#159][gh-pull-0159]) + - Currently log will be emitted only when an eviction listener has panicked. + + ## Version 0.8.6 ### Fixed @@ -392,6 +412,9 @@ The minimum supported Rust version (MSRV) is now 1.51.0 (2021-03-25). [gh-issue-0034]: https://github.com/moka-rs/moka/issues/34/ [gh-issue-0031]: https://github.com/moka-rs/moka/issues/31/ +[gh-pull-0159]: https://github.com/moka-rs/moka/pull/159/ +[gh-pull-0145]: https://github.com/moka-rs/moka/pull/145/ +[gh-pull-0143]: https://github.com/moka-rs/moka/pull/143/ [gh-pull-0138]: https://github.com/moka-rs/moka/pull/138/ [gh-pull-0137]: https://github.com/moka-rs/moka/pull/137/ [gh-pull-0133]: https://github.com/moka-rs/moka/pull/133/