diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 0000000..ce8c7c0 --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,3 @@ +[profile.default-miri] +test-threads = "num-cpus" +slow-timeout = { period = "120s", terminate-after = 2 } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aad2d47..7b32e9a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,7 +37,7 @@ jobs: run: rustup target add ${{ env.NO_STD_TARGET }} - name: Install cargo-hack uses: taiki-e/install-action@cargo-hack - - name: Build with no_std compatible features + - name: Build on no_std environment run: > cargo hack build --target ${{ env.NO_STD_TARGET }} @@ -54,17 +54,18 @@ jobs: - name: Checkout repository uses: actions/checkout@v3 - name: Install Rust 1.65.0 - # NOTE: We actually installing the first nightly 1.66.0. It should be - # equivalent to stable 1.65.0. We need the nightly version to use the + # NOTE: We are actually installing the first nightly 1.66.0. It should + # be equivalent to stable 1.65.0. We need the nightly version to use the # sparse registry feature. This massively improves the index download. + # Link: https://blog.rust-lang.org/2022/06/22/sparse-registry-testing.html run: rustup toolchain install nightly-2022-09-18 - name: Set Rust 1.65.0 as default run: rustup default nightly-2022-09-18 - name: Check MSRV run: cargo check --all-features - docs: - name: Docs + docsrs: + name: Build doc runs-on: ubuntu-latest steps: - name: Checkout repository @@ -78,6 +79,17 @@ jobs: RUSTDOCFLAGS: --cfg docsrs -D warnings run: cargo doc --all-features + doc: + name: Test doc + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust stable + run: rustup toolchain install stable + - name: Run doc snippets + run: cargo test --doc --all-features + examples: name: Examples runs-on: ubuntu-latest @@ -109,6 +121,10 @@ jobs: env: RUSTFLAGS: -D warnings -D clippy::pedantic -D clippy::nursery run: cargo hack clippy --feature-powerset --no-dev-deps + - name: Lint test profile + env: + RUSTFLAGS: -D warnings -D clippy::pedantic -D clippy::nursery + run: cargo hack clippy --profile test --feature-powerset --no-dev-deps - name: Lint loom env: RUSTFLAGS: --cfg loom -D warnings -D clippy::pedantic -D clippy::nursery @@ -146,8 +162,11 @@ jobs: run: rustup toolchain install nightly --component miri - name: Set Rust nightly as default run: rustup default nightly + # NOTE: Nextest is configure to run Miri against `num-cpus` threads. + - name: Install latest nextest release + uses: taiki-e/install-action@nextest - name: Miri test - run: cargo miri test --all-features + run: cargo miri nextest run --all-features loom: name: Loom diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml new file mode 100644 index 0000000..e39eba2 --- /dev/null +++ b/.github/workflows/semver.yml @@ -0,0 +1,19 @@ +name: Semver + +on: + workflow_dispatch: + +env: + CARGO_TERM_COLOR: always + +jobs: + semver: + name: Check semver + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust stable + run: rustup toolchain install stable + - name: Check semver violations + uses: obi1kenobi/cargo-semver-checks-action@v2 diff --git a/Cargo.toml b/Cargo.toml index 3fc8146..dbd8bae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] description = """ An implementation of Mellor-Crummey and Scott contention-free -spin-lock for mutual exclusion, referred to as MCS lock. +lock for mutual exclusion, referred to as MCS lock. """ name = "mcslock" version = "0.3.0" @@ -13,29 +13,32 @@ readme = "README.md" documentation = "https://docs.rs/mcslock" repository = "https://github.com/pedromfedricci/mcslock" authors = ["Pedro de Matos Fedricci "] -categories = ["no-std", "concurrency"] -keywords = ["no_std", "mutex", "spin-lock", "mcs-lock"] +categories = ["algorithms", "concurrency", "no-std", "no-std::no-alloc"] +keywords = ["mutex", "no_std", "spinlock", "synchronization"] [features] -# NOTE: Features `yield` and `thread_local` require std. +# NOTE: Features `yield`, `thread_local` require std. yield = [] thread_local = [] barging = [] -# NOTE: The `dep:` syntax requires Rust 1.60. -lock_api = ["barging", "dep:lock_api"] +lock_api = ["dep:lock_api"] [dependencies.lock_api] version = "0.4" default-features = false optional = true -[target.'cfg(loom)'.dev-dependencies] -loom = { version = "0.7" } +[target.'cfg(loom)'.dev-dependencies.loom] +version = "0.7" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints.rust.unexpected_cfgs] +level = "warn" +check-cfg = ["cfg(loom)", "cfg(tarpaulin)", "cfg(tarpaulin_include)"] + [[example]] name = "barging" required-features = ["barging"] diff --git a/Makefile.toml b/Makefile.toml index 7e1064d..d65e2b1 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -23,16 +23,25 @@ args = ["rustdoc", "--all-features", "--open", "--", "--default-theme", "ayu"] # Check MSRV. [tasks.msrv] -# NOTE: We actually installing the first nightly 1.66.0. It should be -# equivalent to stable 1.65.0. We need the nightly version to use the -# sparse registry feature. This massively improves the index download. +# NOTE: We are actually installing the first nightly 1.66.0. It should be +# equivalent to stable 1.65.0. We need the nightly version to use the sparse +# registry feature. This massively improves the index download. # Link: https://blog.rust-lang.org/2022/06/22/sparse-registry-testing.html -toolchain = "nightly-2022-09-17" +toolchain = "nightly-2022-09-18" command = "cargo" env = { "CARGO_UNSTABLE_SPARSE_REGISTRY" = "true" } -# TODO: add `--all` for benches. args = ["check", "--all-features"] +# Check semver viloations. +[tasks.semver] +command = "cargo" +args = ["semver-checks", "${@}"] + +# Run all documentation snippets. +[tasks.doc-test] +command = "cargo" +args = ["test", "--doc", "--all-features"] + # Lint all feature combinations with cargo-hack. [tasks.lint] command = "cargo" @@ -46,7 +55,7 @@ env = { "RUSTFLAGS" = "${CLIPPY_FLAGS}" } args = ["run", "--example", "${@}", "--all-features"] # Lint all feature combinations with carg-hack on test profile. -[tasks.lint-test] +[tasks.test-lint] command = "cargo" env = { "RUSTFLAGS" = "${CLIPPY_FLAGS}" } args = ["hack", "clippy", "--profile", "test", "--feature-powerset", @@ -58,7 +67,7 @@ args = ["hack", "clippy", "--profile", "test", "--feature-powerset", toolchain = "nightly" install_crate = { rustup_component_name = "miri" } command = "cargo" -args = ["miri", "test", "--all-features", "${@}"] +args = ["miri", "nextest", "run", "--all-features", "${@}"] # Check code coverage with tarpaulin (all features). [tasks.tarpaulin] diff --git a/README.md b/README.md index ab854df..14552e9 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# A simple and correct implementation of MCS lock +# A simple and correct implementation of the MCS lock [![MIT][mit-badge]][mit] [![Apache 2.0][apache2-badge]][apache2] @@ -8,9 +8,8 @@ [![Codecov][codecov-badge]][codecov] ![No_std][no_std-badge] -MCS lock is a List-Based Queuing Lock that avoids network contention by -having threads spin on local memory locations. The main properties of this -mechanism are: +MCS lock is a List-Based Queuing Lock that avoids network contention by having +threads spin on local memory locations. The main properties of this mechanism are: - guarantees FIFO ordering of lock acquisitions; - spins on locally-accessible flag variables only; @@ -18,16 +17,16 @@ mechanism are: - works equally well (requiring only O(1) network transactions per lock acquisition) on machines with and without coherent caches. -This algorithm and several others were introduced by [Mellor-Crummey and Scott] paper. -And a simpler correctness proof of the MCS lock was proposed by [Johnson and Harathi]. +This algorithm and several others were introduced by [Mellor-Crummey and Scott] +paper. And a simpler correctness proof of the MCS lock was proposed by +[Johnson and Harathi]. -## Use cases +## Spinlock use cases It is noteworthy to mention that [spinlocks are usually not what you want]. The -majority of use cases are well covered by OS-based mutexes like -[`std::sync::Mutex`] or [`parking_lot::Mutex`]. These implementations will notify -the system that the waiting thread should be parked, freeing the processor to -work on something else. +majority of use cases are well covered by OS-based mutexes like [`std::sync::Mutex`] +and [`parking_lot::Mutex`]. These implementations will notify the system that the +waiting thread should be parked, freeing the processor to work on something else. Spinlocks are only efficient in very few circumstances where the overhead of context switching or process rescheduling are greater than busy waiting @@ -52,7 +51,7 @@ Or add a entry under the `[dependencies]` section in your `Cargo.toml`: [dependencies] # Available features: `yield`, `barging`, `thread_local` and `lock_api`. -mcslock = { version = "0.3", features = ["barging"] } +mcslock = { version = "0.3", features = ["thread_local"] } ``` ## Documentation @@ -64,18 +63,19 @@ locally with the following command: RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features --open ``` -## Raw MCS lock +## Locking with a raw MCS spinlock This implementation operates under FIFO. Raw locking APIs require exclusive access to a locally accessible queue node. This node is represented by the -[`MutexNode`] type. Callers are responsible for instantiating the queue nodes -themselves. This implementation is `no_std` compatible. See [`raw`] module for -more information. +[`raw::MutexNode`] type. Callers are responsible for instantiating the queue +nodes themselves. This implementation is `no_std` compatible. See the [`raw`] +module for more information. ```rust use std::sync::Arc; use std::thread; +// `spins::Mutex` simply spins during contention. use mcslock::raw::{spins::Mutex, MutexNode}; fn main() { @@ -84,22 +84,24 @@ fn main() { thread::spawn(move || { // A queue node must be mutably accessible. + // Critical section must be defined as a closure. let mut node = MutexNode::new(); *c_mutex.lock(&mut node) = 10; }) .join().expect("thread::spawn failed"); // A queue node must be mutably accessible. + // Critical section must be defined as a closure. let mut node = MutexNode::new(); assert_eq!(*mutex.try_lock(&mut node).unwrap(), 10); } ``` -## Thread local MCS queue nodes +## Thread local queue nodes Enables [`raw::Mutex`] locking APIs that operate over queue nodes that are stored at the thread local storage. These locking APIs require a static -reference to a [`LocalMutexNode`] key. Keys must be generated by the +reference to a [`raw::LocalMutexNode`] key. Keys must be generated by the [`thread_local_node!`] macro. Thread local nodes are not `no_std` compatible and can be enabled through the `thread_local` feature. @@ -107,6 +109,7 @@ and can be enabled through the `thread_local` feature. use std::sync::Arc; use std::thread; +// `spins::Mutex` simply spins during contention. use mcslock::raw::spins::Mutex; // Requires `thread_local` feature. @@ -117,31 +120,32 @@ fn main() { let c_mutex = Arc::clone(&mutex); thread::spawn(move || { - // Local nodes handles are provided by reference. - // Critical section must be defined as closure. - c_mutex.lock_with_local(&NODE, |mut guard| *guard = 10); + // Local node handles are provided by reference. + // Critical section must be defined as a closure. + c_mutex.lock_with_local_then(&NODE, |data| *data = 10); }) .join().expect("thread::spawn failed"); - // Local nodes handles are provided by reference. - // Critical section must be defined as closure. - assert_eq!(mutex.try_lock_with_local(&NODE, |g| *g.unwrap()), 10); + // Local node handles are provided by reference. + // Critical section must be defined as a closure. + assert_eq!(mutex.try_lock_with_local_then(&NODE, |g| *g.unwrap()), 10); } ``` -## Barging MCS lock +## Locking with a barging MCS spinlock This implementation will have non-waiting threads race for the lock against the front of the waiting queue thread, which means this it is an unfair lock. This implementation is suitable for `no_std` environments, and the locking -APIs are compatible with the [lock_api] crate. See [`barging`] and [`lock_api`] -modules for more information. +APIs are compatible with the [lock_api] crate. See [`barging`] and +[`barking::lock_api`] modules for more information. ```rust use std::sync::Arc; use std::thread; // Requires `barging` feature. +// `spins::backoff::Mutex` spins with exponential backoff during contention. use mcslock::barging::spins::backoff::Mutex; fn main() { @@ -171,28 +175,27 @@ of busy-waiting during lock acquisitions and releases, this will call OS scheduler. This may cause a context switch, so you may not want to enable this feature if your intention is to to actually do optimistic spinning. The default implementation calls [`core::hint::spin_loop`], which does in fact -just simply busy-waits. This feature is not `no_std` compatible. +just simply busy-waits. This feature **is not** `no_std` compatible. ### thread_local -The `thread_local` feature enables [`raw::Mutex`] locking APIs that operate -over queue nodes that are stored at the thread local storage. These locking APIs -require a static reference to a [`LocalMutexNode`] key. Keys must be generated -by the [`thread_local_node!`] macro. This feature is not `no_std` compatible. +The `thread_local` feature enables [`raw::Mutex`] locking APIs that operate over +queue nodes that are stored at the thread local storage. These locking APIs +require a static reference to a [`raw::LocalMutexNode`] key. Keys must be generated +by the [`thread_local_node!`] macro. This feature **is not** `no_std` compatible. ### barging -The `barging` feature provides locking APIs that are compatible with the -[lock_api] crate. It does not require node allocations from the caller, -and it is suitable for `no_std` environments. This implementation is not -fair (does not guarantee FIFO), but can improve throughput when the lock -is heavily contended. +The `barging` feature provides locking APIs that are compatible with the [lock_api] +crate. It does not require node allocations from the caller. The [`barging`] module +is suitable for `no_std` environments. This implementation **is not** fair (does not +guarantee FIFO), but can improve throughput when the lock is heavily contended. ### lock_api This feature implements the [`RawMutex`] trait from the [lock_api] crate for -[`barging::Mutex`]. Aliases are provided by the [`lock_api`] module. This feature -is `no_std` compatible. +[`barging::Mutex`]. Aliases are provided by the [`barging::lock_api`] (`no_std`) +module. ## Minimum Supported Rust Version (MSRV) @@ -245,24 +248,25 @@ each of your dependencies, including this one. [codecov]: https://codecov.io/gh/pedromfedricci/mcslock [cargo-crev]: https://github.com/crev-dev/cargo-crev -[`MutexNode`]: https://docs.rs/mcslock/latest/mcslock/raw/struct.MutexNode.html -[`LocalMutexNode`]: https://docs.rs/mcslock/latest/mcslock/raw/struct.LocalMutexNode.html -[`raw::Mutex`]: https://docs.rs/mcslock/latest/mcslock/raw/struct.Mutex.html -[`barging::Mutex`]: https://docs.rs/mcslock/latest/mcslock/barging/struct.Mutex.html +[Mellor-Crummey and Scott]: https://www.cs.rochester.edu/~scott/papers/1991_TOCS_synch.pdf +[Johnson and Harathi]: https://web.archive.org/web/20140411142823/http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf +[spinlocks are usually not what you want]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +[Linux kernel mutexes]: https://www.kernel.org/doc/html/latest/locking/mutex-design.html + [`raw`]: https://docs.rs/mcslock/latest/mcslock/raw/index.html +[`raw::Mutex`]: https://docs.rs/mcslock/latest/mcslock/raw/struct.Mutex.html +[`raw::MutexNode`]: https://docs.rs/mcslock/latest/mcslock/raw/struct.MutexNode.html +[`raw::LocalMutexNode`]: https://docs.rs/mcslock/latest/mcslock/raw/struct.LocalMutexNode.html [`barging`]: https://docs.rs/mcslock/latest/mcslock/barging/index.html -[`lock_api`]: https://docs.rs/mcslock/latest/mcslock/barging/lock_api/index.html +[`barging::lock_api`]: https://docs.rs/mcslock/latest/mcslock/barging/lock_api/index.html +[`barging::Mutex`]: https://docs.rs/mcslock/latest/mcslock/barging/struct.Mutex.html [`thread_local_node!`]: https://docs.rs/mcslock/latest/mcslock/macro.thread_local_node.html + [`std::sync::Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html -[`parking_lot::Mutex`]: https://docs.rs/parking_lot/latest/parking_lot/type.Mutex.html -[`RawMutex`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutex.html -[`RawMutexFair`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutexFair.html [`std::thread::yield_now`]: https://doc.rust-lang.org/std/thread/fn.yield_now.html [`core::hint::spin_loop`]: https://doc.rust-lang.org/core/hint/fn.spin_loop.html -[spin-lock]: https://en.wikipedia.org/wiki/Spinlock -[spin-rs]: https://docs.rs/spin/latest/spin + [lock_api]: https://docs.rs/lock_api/latest/lock_api -[Linux kernel mutexes]: https://www.kernel.org/doc/html/latest/locking/mutex-design.html -[spinlocks are usually not what you want]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html -[Mellor-Crummey and Scott]: https://www.cs.rochester.edu/~scott/papers/1991_TOCS_synch.pdf -[Johnson and Harathi]: https://web.archive.org/web/20140411142823/http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf +[`RawMutex`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutex.html +[`RawMutexFair`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutexFair.html +[`parking_lot::Mutex`]: https://docs.rs/parking_lot/latest/parking_lot/type.Mutex.html diff --git a/examples/raw.rs b/examples/raw.rs index 754f511..d918ec5 100644 --- a/examples/raw.rs +++ b/examples/raw.rs @@ -26,19 +26,20 @@ fn main() { // // We unwrap() the return value to assert that we are not expecting // threads to ever fail while holding the lock. - let mut data = data.lock(&mut node); - *data += 1; - if *data == N { - tx.send(()).unwrap(); - } - // the lock is unlocked here when `data` goes out of scope. + data.lock_with_then(&mut node, |data| { + *data += 1; + if *data == N { + tx.send(()).unwrap(); + } + // The lock is unlocked here at the end of the closure scope. + }); + // The node can now be reused for other locking operations. + let _ = data.lock_with_then(&mut node, |data| *data); }); } let _message = rx.recv(); - // A queue node must be mutably accessible. - let mut node = MutexNode::new(); - let count = data.lock(&mut node); - assert_eq!(*count, N); - // lock is unlock here when `count` goes out of scope. + // A queue node is transparently allocated in the stack. + let count = data.lock_then(|data| *data); + assert_eq!(count, N); } diff --git a/examples/thread_local.rs b/examples/thread_local.rs index 5c8db73..6c646b8 100644 --- a/examples/thread_local.rs +++ b/examples/thread_local.rs @@ -35,7 +35,7 @@ fn main() { // threads to ever fail while holding the lock. // // Data is exclusively accessed by the guard argument. - data.lock_with_local(&NODE, |mut data| { + data.lock_with_local_then(&NODE, |data| { *data += 1; if *data == N { tx.send(()).unwrap(); @@ -46,6 +46,6 @@ fn main() { } let _message = rx.recv(); - let count = data.lock_with_local(&NODE, |guard| *guard); + let count = data.lock_with_local_then(&NODE, |data| *data); assert_eq!(count, N); } diff --git a/src/barging/lock_api/mod.rs b/src/barging/lock_api/mod.rs index 6f73f2d..7dc74f5 100644 --- a/src/barging/lock_api/mod.rs +++ b/src/barging/lock_api/mod.rs @@ -1,34 +1,51 @@ -//! Locking interfaces for MCS lock that are compatible with [lock_api]. +//! Unfair MCS lock aliases for [`lock_api::Mutex`]. //! //! This module exports [`lock_api::Mutex`] and [`lock_api::MutexGuard`] type //! aliases with a `barging` MCS lock and guard as their inner types. The //! [`barging::Mutex`] type will implement the [`lock_api::RawMutex`] trait when -//! this feature is enabled. +//! this feature is enabled. The `barging` MCS lock is a unfair lock. //! -//! The following modules provide type aliases for [`lock_api::Mutex`] and -//! [`lock_api::MutexGuard`] that are associated with a relax strategy. See -//! their documentation for more information. +//! This module provides an implementation that is `no_std` compatible. +//! +//! The lock is hold for as long as its associated RAII guard is in scope. Once +//! the guard is dropped, the mutex is freed. Mutex guards are returned by +//! [`lock`] and [`try_lock`]. +//! +//! This Mutex is generic over the two layers of relax policies. User may +//! choose a policy as long as it implements the [`Relax`] trait. The shared +//! lock relax policy is associated with the `Rs` generic paramater. The +//! handoff relax policy is then associated with the `Rq` generic parameter. +//! Backoff relax policies are usually prefered for shared lock contention, +//! while non-backoff relax policies are usually prefered for handoffs. +//! +//! There is a number of relax policies provided by the [`relax`] module. The +//! following modules provide type aliases for [`lock_api::Mutex`] and +//! [`lock_api::MutexGuard`] associated with a relax policy. See their +//! documentation for more information. //! //! [`relax`]: crate::relax //! [`Relax`]: crate::relax::Relax //! [`barging::Mutex`]: crate::barging::Mutex +//! //! [lock_api]: https://crates.io/crates/lock_api //! [`lock_api::Mutex`]: https://docs.rs/lock_api/latest/lock_api/struct.Mutex.html //! [`lock_api::MutexGuard`]: https://docs.rs/lock_api/latest/lock_api/struct.MutexGuard.html //! [`lock_api::RawMutex`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutex.html -//! [`RawMutexFair`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutexFair.html +//! [`lock`]: https://docs.rs/lock_api/latest/lock_api/struct.Mutex.html#method.lock +//! [`try_lock`]: https://docs.rs/lock_api/latest/lock_api/struct.Mutex.html#method.try_lock mod mutex; pub use mutex::{Mutex, MutexGuard}; -/// A `barging` MCS lock alias that signals the processor that it is running -/// a busy-wait spin-loop during lock contention. +/// An unfair MCS lock that implements a `spin` relax policy. +/// +/// During lock contention, this lock spins while signaling the processor that +/// it is running a busy-wait spin-loop. pub mod spins { use super::mutex; use crate::relax::Spin; - /// A `barging` MCS lock that implements the [`Spin`] relax strategy - /// and compatible with the `lock_api` crate. + /// A [`lock_api::Mutex`] that implements the [`Spin`] relax policy. /// /// # Example /// @@ -39,21 +56,25 @@ pub mod spins { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`lock_api::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`Spin`] relax strategy - /// and compatible with the `lock_api` crate. + /// A [`lock_api::MutexGuard`] that implements the [`Spin`] relax policy. + /// + /// [`lock_api::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, Spin, Spin>; - /// A `barging` MCS lock alias that, during lock contention, will perform - /// exponential backoff while signaling the processor that it is running a - /// busy-wait spin-loop. + /// An unfair MCS lock that implements a `spin with backoff` relax policy. + /// + /// During lock contention, this lock will perform exponential backoff + /// while spinning, signaling the processor that it is running a busy-wait + /// spin-loop. pub mod backoff { use super::mutex; use crate::relax::{Spin, SpinBackoff}; - /// A `barging` MCS lock that implements the [`SpinBackoff`] relax - /// strategy and compatible with the `lock_api` crate. + /// A [`lock_api::Mutex`] that implements the [`SpinBackoff`] relax + /// policy. /// /// # Example /// @@ -64,24 +85,28 @@ pub mod spins { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`lock_api::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`SpinBackoff`] relax - /// strategy and compatible with the `lock_api` crate. + /// A [`lock_api::MutexGuard`] that implements the [`SpinBackoff`] relax + /// policy. + /// + /// [`lock_api::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, SpinBackoff, Spin>; } } -/// A `barging` MCS lock alias that yields the current time slice to the -/// OS scheduler during lock contention. +/// An unfair MCS lock that implements a `yield` relax policy. +/// +/// During lock contention, this lock will yield the current time slice to the +/// OS scheduler. #[cfg(any(feature = "yield", loom, test))] #[cfg_attr(docsrs, doc(cfg(feature = "yield")))] pub mod yields { use super::mutex; use crate::relax::Yield; - /// A `barging` MCS lock that implements the [`Yield`] relax strategy - /// and compatible with the `lock_api` crate. + /// A [`lock_api::Mutex`] that implements the [`Yield`] relax policy. /// /// # Example /// @@ -92,22 +117,24 @@ pub mod yields { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`lock_api::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`Yield`] relax strategy - /// and compatible with the `lock_api` crate. + /// A [`lock_api::MutexGuard`] that implements the [`Yield`] relax policy. + /// + /// [`lock_api::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, Yield, Yield>; - /// A `barging` MCS lock alias that, during lock contention, will perform - /// exponential backoff while spinning up to a threshold, then yields back to - /// the OS scheduler. - #[cfg(feature = "yield")] + /// An unfair MCS lock that implements a `yield with backoff` relax policy. + /// + /// During lock contention, this lock will perform exponential backoff while + /// spinning, up to a threshold, then yields back to the OS scheduler. pub mod backoff { use super::mutex; use crate::relax::{Yield, YieldBackoff}; - /// A `barging` MCS lock that implements the [`YieldBackoff`] relax - /// strategy and compatible with the `lock_api` crate. + /// A [`lock_api::Mutex`] that implements the [`YieldBackoff`] relax + /// policy. /// /// # Example /// @@ -118,22 +145,26 @@ pub mod yields { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`lock_api::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`YieldBackoff`] relax - /// strategy and compatible with the `lock_api` crate. + /// A [`lock_api::MutexGuard`] that implements the [`YieldBackoff`] + /// relax policy. + /// + /// [`lock_api::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, YieldBackoff, Yield>; } } -/// A `barging` MCS lock alias that rapidly spins without telling the CPU -/// to do any power down during lock contention. +/// An unfair MCS lock that implements a `loop` relax policy. +/// +/// During lock contention, this lock will rapidly spin without telling the CPU +/// to do any power down. pub mod loops { use super::mutex; use crate::relax::Loop; - /// A `barging` MCS lock that implements the [`Loop`] relax strategy - /// and compatible with the `lock_api` crate. + /// A [`lock_api::Mutex`] that implements the [`Loop`] relax policy. /// /// # Example /// @@ -144,9 +175,11 @@ pub mod loops { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`lock_api::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`Loop`] relax strategy - /// and compatible with the `lock_api` crate. + /// A [`lock_api::MutexGuard`] that implements the [`Loop`] relax policy. + /// + /// [`lock_api::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, Loop, Loop>; } diff --git a/src/barging/lock_api/mutex.rs b/src/barging/lock_api/mutex.rs index b36761f..fbbf25d 100644 --- a/src/barging/lock_api/mutex.rs +++ b/src/barging/lock_api/mutex.rs @@ -3,14 +3,16 @@ use crate::barging; #[cfg(test)] use crate::relax::Relax; #[cfg(test)] -use crate::test::{LockData, LockNew, LockWith}; +use crate::test::{LockData, LockNew, LockThen, TryLockThen}; -/// A lock that provides mutually exclusive data access that is compatible with -/// [`lock_api`](https://crates.io/crates/lock_api). +/// A [`lock_api::Mutex`] alias that wraps a [`barging::Mutex`]. +/// +/// [`lock_api::Mutex`]: https://docs.rs/lock_api/latest/lock_api/struct.Mutex.html pub type Mutex = lock_api::Mutex, T>; -/// A guard that provides mutable data access that is compatible with -/// [`lock_api`](https://crates.io/crates/lock_api). +/// A [`lock_api::MutexGuard`] alias that wraps a [`barging::MutexGuard`]. +/// +/// [`lock_api::MutexGuard`]: https://docs.rs/lock_api/latest/lock_api/struct.MutexGuard.html pub type MutexGuard<'a, T, Rs, Rq> = lock_api::MutexGuard<'a, barging::Mutex<(), Rs, Rq>, T>; #[cfg(test)] @@ -26,24 +28,27 @@ impl LockNew for Mutex { } #[cfg(test)] -impl LockWith for Mutex { - type Guard<'a> = MutexGuard<'a, Self::Target, Rs, Rq> +impl LockThen for Mutex { + type Guard<'a> = &'a mut Self::Target where Self: 'a, Self::Target: 'a; - fn try_lock_with(&self, f: F) -> Ret + fn lock_then(&self, f: F) -> Ret where - F: FnOnce(Option>) -> Ret, + F: FnOnce(&mut Self::Target) -> Ret, { - f(self.try_lock()) + f(&mut *self.lock()) } +} - fn lock_with(&self, f: F) -> Ret +#[cfg(test)] +impl TryLockThen for Mutex { + fn try_lock_then(&self, f: F) -> Ret where - F: FnOnce(MutexGuard<'_, T, Rs, Rq>) -> Ret, + F: FnOnce(Option<&mut Self::Target>) -> Ret, { - f(self.lock()) + f(self.try_lock().as_deref_mut()) } fn is_locked(&self) -> bool { @@ -71,8 +76,18 @@ mod test { use crate::test::tests; #[test] - fn lots_and_lots() { - tests::lots_and_lots::>(); + fn lots_and_lots_lock() { + tests::lots_and_lots_lock::>(); + } + + #[test] + fn lots_and_lots_try_lock() { + tests::lots_and_lots_try_lock::>(); + } + + #[test] + fn lots_and_lots_mixed_lock() { + tests::lots_and_lots_mixed_lock::>(); } #[test] diff --git a/src/barging/mod.rs b/src/barging/mod.rs index 5f2f9e2..5d5fca5 100644 --- a/src/barging/mod.rs +++ b/src/barging/mod.rs @@ -1,36 +1,39 @@ -//! A barging MCS lock implementation that is compliant with the [lock_api] crate. +//! Unfair MCS lock implementation. //! //! This implementation will have non-waiting threads race for the lock against //! the front of the waiting queue thread. If the front of the queue thread //! looses the race, it will simply keep spinning, while holding its position -//! in the queue. By allowing barging instead of forcing FIFO, a higher throughput -//! can be achieved when the lock is heavily contended. This implementation is -//! suitable for `no_std` environments, and the locking APIs are compatible with -//! the [lock_api] crate (see `lock_api` feature). +//! in the queue. By allowing barging instead of forcing FIFO, a higher +//! throughput can be achieved when the lock is heavily contended. +//! +//! This module provides an implementation that is `no_std` compatible, it does +//! not require queue nodes to be allocated by the callers, and so it is +//! compatible with the [lock_api] crate (see `lock_api` feature). //! //! The lock is hold for as long as its associated RAII guard is in scope. Once //! the guard is dropped, the mutex is freed. Mutex guards are returned by //! [`lock`] and [`try_lock`]. Guards are also accessible as the closure argument -//! for [`lock_with`] and [`try_lock_with`] methods. +//! for [`lock_then`] and [`try_lock_then`] methods. //! -//! This Mutex is generic over the two layers of relax strategies. User may -//! choose a strategy as long as it implements the [`Relax`] trait. The shared -//! lock relax strategy is associated with the `Rs` generic paramater. The -//! handoff relax strategy is then associated with the `Rq` generic parameter. -//! Backoff relax strategies are usually prefered for shared lock contention, -//! while non-backoff relax strategies are usually prefered for handoffs. +//! This Mutex is generic over the two layers of relax policies. User may +//! choose a policy as long as it implements the [`Relax`] trait. The shared +//! lock relax policy is associated with the `Rs` generic paramater. The +//! handoff relax policy is then associated with the `Rq` generic parameter. +//! Backoff relax policies are usually prefered for shared lock contention, +//! while non-backoff relax policies are usually prefered for handoffs. //! -//! There is a number of strategies provided by the [`relax`] module. Each -//! submodule provides type aliases for [`Mutex`] and [`MutexGuard`] associated -//! with one relax strategy. See their documentation for more information. +//! There is a number of policies provided by the [`relax`] module. The +//! following modules provide type aliases for [`Mutex`] and [`MutexGuard`] +//! associated with a relax policy. See their documentation for more information. //! -//! [lock_api]: https://crates.io/crates/lock_api //! [`lock`]: Mutex::lock //! [`try_lock`]: Mutex::try_lock -//! [`lock_with`]: Mutex::lock_with -//! [`try_lock_with`]: Mutex::try_lock_with +//! [`lock_then`]: Mutex::lock_then +//! [`try_lock_then`]: Mutex::try_lock_then //! [`relax`]: crate::relax //! [`Relax`]: crate::relax::Relax +//! +//! [lock_api]: https://crates.io/crates/lock_api mod mutex; pub use mutex::{Mutex, MutexGuard}; @@ -39,13 +42,15 @@ pub use mutex::{Mutex, MutexGuard}; #[cfg_attr(docsrs, doc(cfg(feature = "lock_api")))] pub mod lock_api; -/// A `barging` MCS lock alias that signals the processor that it is running -/// a busy-wait spin-loop during lock contention. +/// An unfair MCS lock that implements a `spin` relax policy. +/// +/// During lock contention, this lock spins while signaling the processor that +/// it is running a busy-wait spin-loop. pub mod spins { use super::mutex; use crate::relax::Spin; - /// A `barging` MCS lock that implements the [`Spin`] relax strategy. + /// A [`barging::Mutex`] that implements the [`Spin`] relax policy. /// /// # Example /// @@ -56,20 +61,25 @@ pub mod spins { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`barging::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`Spin`] relax strategy. + /// A [`barging::MutexGuard`] that implements the [`Spin`] relax policy. + /// + /// [`barging::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, Spin, Spin>; - /// A `barging` MCS lock alias that, during lock contention, will perform - /// exponential backoff while signaling the processor that it is running a - /// busy-wait spin-loop. + /// An unfair MCS lock that implements a `spin with backoff` relax policy. + /// + /// During lock contention, this lock will perform exponential backoff + /// while spinning, signaling the processor that it is running a busy-wait + /// spin-loop. pub mod backoff { use super::mutex; use crate::relax::{Spin, SpinBackoff}; - /// A `barging` MCS lock that implements the [`SpinBackoff`] relax - /// strategy. + /// A [`barging::Mutex`] that implements the [`SpinBackoff`] relax + /// policy. /// /// # Example /// @@ -80,23 +90,28 @@ pub mod spins { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`barging::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`SpinBackoff`] relax - /// strategy. + /// A [`barging::MutexGuard`] that implements the [`SpinBackoff`] relax + /// policy. + /// + /// [`barging::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, SpinBackoff, Spin>; } } -/// A `barging` MCS lock alias that yields the current time slice to the -/// OS scheduler during lock contention. +/// An unfair MCS lock that implements a `yield` relax policy. +/// +/// During lock contention, this lock will yield the current time slice to the +/// OS scheduler. #[cfg(any(feature = "yield", loom, test))] #[cfg_attr(docsrs, doc(cfg(feature = "yield")))] pub mod yields { use super::mutex; use crate::relax::Yield; - /// A `barging` MCS lock that implements the [`Yield`] relax strategy. + /// A [`barging::Mutex`] that implements the [`Yield`] relax policy. /// /// # Example /// @@ -107,21 +122,24 @@ pub mod yields { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`barging::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`Yield`] relax strategy. + /// A [`barging::MutexGuard`] that implements the [`Yield`] relax policy. + /// + /// [`barging::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, Yield, Yield>; - /// A `barging` MCS lock alias that, during lock contention, will perform - /// exponential backoff while spinning up to a threshold, then yields back - /// to the OS scheduler. - #[cfg(feature = "yield")] + /// An unfair MCS lock that implements a `yield with backoff` relax policy. + /// + /// During lock contention, this lock will perform exponential backoff while + /// spinning, up to a threshold, then yields back to the OS scheduler. pub mod backoff { use super::mutex; use crate::relax::{Yield, YieldBackoff}; - /// A `barging` MCS lock that implements the [`YieldBackoff`] relax - /// strategy. + /// A [`barging::Mutex`] that implements the [`YieldBackoff`] relax + /// policy. /// /// # Example /// @@ -132,21 +150,26 @@ pub mod yields { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`barging::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`YieldBackoff`] relax - /// strategy. + /// A [`barging::MutexGuard`] that implements the [`YieldBackoff`] + /// relax policy. + /// + /// [`barging::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, YieldBackoff, Yield>; } } -/// A `barging` MCS lock alias that rapidly spins without telling the CPU -/// to do any power down during lock contention. +/// An unfair MCS lock that implements a `loop` relax policy. +/// +/// During lock contention, this lock will rapidly spin without telling the CPU +/// to do any power down. pub mod loops { use super::mutex; use crate::relax::Loop; - /// A `barging` MCS lock that implements the [`Loop`] relax strategy. + /// A [`barging::Mutex`] that implements the [`Loop`] relax policy. /// /// # Example /// @@ -157,8 +180,11 @@ pub mod loops { /// let guard = mutex.lock(); /// assert_eq!(*guard, 0); /// ``` + /// [`barging::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `barging` MCS guard that implements the [`Loop`] relax strategy. + /// A [`barging::MutexGuard`] that implements the [`Loop`] relax policy. + /// + /// [`barging::MutexGuard`]: mutex::MutexGuard pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, Loop, Loop>; } diff --git a/src/barging/mutex.rs b/src/barging/mutex.rs index cb56a37..7a86fb1 100644 --- a/src/barging/mutex.rs +++ b/src/barging/mutex.rs @@ -1,11 +1,19 @@ -use core::fmt; -use core::marker::PhantomData; -use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; +use core::fmt::{self, Debug, Display, Formatter}; use crate::cfg::atomic::AtomicBool; -use crate::cfg::cell::{UnsafeCell, WithUnchecked}; -use crate::raw::{Mutex as RawMutex, MutexNode}; -use crate::relax::Relax; +use crate::inner::barging as inner; +use crate::relax::{Relax, RelaxWait}; + +#[cfg(test)] +use crate::test::{LockNew, LockThen, TryLockThen}; + +#[cfg(all(loom, test))] +use crate::loom::{Guard, GuardDeref, GuardDerefMut}; +#[cfg(all(loom, test))] +use crate::test::{AsDeref, AsDerefMut}; + +// The inner type of mutex, with a boolean as the atomic data. +type MutexInner = inner::Mutex, RelaxWait>; /// A mutual exclusion primitive useful for protecting shared data. /// @@ -62,13 +70,10 @@ use crate::relax::Relax; /// [`lock`]: Mutex::lock /// [`try_lock`]: Mutex::try_lock pub struct Mutex { - locked: AtomicBool, - marker: PhantomData, - raw: RawMutex<(), Rq>, - data: UnsafeCell, + inner: MutexInner, } -// Same unsafe impls as `crate::raw::Mutex`. +// Same unsafe impls as `crate::inner::barging::Mutex`. unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} @@ -89,20 +94,14 @@ impl Mutex { #[cfg(not(all(loom, test)))] #[inline] pub const fn new(value: T) -> Self { - let locked = AtomicBool::new(false); - let raw = RawMutex::new(()); - let data = UnsafeCell::new(value); - Self { locked, raw, data, marker: PhantomData } + Self { inner: inner::Mutex::new(value) } } /// Creates a new unlocked mutex with Loom primitives (non-const). #[cfg(all(loom, test))] #[cfg(not(tarpaulin_include))] fn new(value: T) -> Self { - let locked = AtomicBool::new(false); - let raw = RawMutex::new(()); - let data = UnsafeCell::new(value); - Self { locked, raw, data, marker: PhantomData } + Self { inner: inner::Mutex::new(value) } } /// Consumes this mutex, returning the underlying data. @@ -120,7 +119,7 @@ impl Mutex { /// ``` #[inline(always)] pub fn into_inner(self) -> T { - self.data.into_inner() + self.inner.into_inner() } } @@ -157,19 +156,7 @@ impl Mutex { /// ``` #[inline] pub fn lock(&self) -> MutexGuard<'_, T, Rs, Rq> { - if self.try_lock_fast() { - return MutexGuard::new(self); - } - let mut node = MutexNode::new(); - let guard = self.raw.lock(&mut node); - while !self.try_lock_fast() { - let mut relax = Rs::new(); - while self.locked.load(Relaxed) { - relax.relax(); - } - } - drop(guard); - MutexGuard::new(self) + self.inner.lock().into() } /// Acquires this mutex and then runs the closure against its guard. @@ -196,11 +183,11 @@ impl Mutex { /// let c_mutex = Arc::clone(&mutex); /// /// thread::spawn(move || { - /// c_mutex.lock_with(|mut guard| *guard = 10); + /// c_mutex.lock_then(|mut guard| *guard = 10); /// }) /// .join().expect("thread::spawn failed"); /// - /// assert_eq!(mutex.lock_with(|guard| *guard), 10); + /// assert_eq!(mutex.lock_then(|guard| *guard), 10); /// ``` /// /// Compile fail: borrows of the guard or its data cannot escape the given @@ -210,10 +197,10 @@ impl Mutex { /// use mcslock::barging::spins::Mutex; /// /// let mutex = Mutex::new(1); - /// let data = mutex.lock_with(|guard| &*guard); + /// let data = mutex.lock_then(|guard| &*guard); /// ``` #[inline] - pub fn lock_with(&self, f: F) -> Ret + pub fn lock_then(&self, f: F) -> Ret where F: FnOnce(MutexGuard<'_, T, Rs, Rq>) -> Ret, { @@ -258,10 +245,7 @@ impl Mutex { /// ``` #[inline] pub fn try_lock(&self) -> Option> { - self.locked - .compare_exchange(false, true, Acquire, Relaxed) - .map(|_| MutexGuard::new(self)) - .ok() + self.inner.try_lock().map(From::from) } /// Attempts to acquire this mutex and then runs a closure against its guard. @@ -288,17 +272,17 @@ impl Mutex { /// let c_mutex = Arc::clone(&mutex); /// /// thread::spawn(move || { - /// c_mutex.try_lock_with(|guard| { + /// c_mutex.try_lock_then(|guard| { /// if let Some(mut guard) = guard { /// *guard = 10; /// } else { - /// println!("try_lock_with failed"); + /// println!("try_lock_then failed"); /// } /// }); /// }) /// .join().expect("thread::spawn failed"); /// - /// assert_eq!(mutex.lock_with(|guard| *guard), 10); + /// assert_eq!(mutex.lock_then(|guard| *guard), 10); /// ``` /// /// Compile fail: borrows of the guard or its data cannot escape the given @@ -308,10 +292,10 @@ impl Mutex { /// use mcslock::barging::spins::Mutex; /// /// let mutex = Mutex::new(1); - /// let data = mutex.try_lock_with(|guard| &*guard.unwrap()); + /// let data = mutex.try_lock_then(|guard| &*guard.unwrap()); /// ``` #[inline] - pub fn try_lock_with(&self, f: F) -> Ret + pub fn try_lock_then(&self, f: F) -> Ret where F: FnOnce(Option>) -> Ret, { @@ -339,8 +323,7 @@ impl Mutex { /// ``` #[inline] pub fn is_locked(&self) -> bool { - // Relaxed is sufficient because this method only guarantees atomicity. - self.locked.load(Relaxed) + self.inner.is_locked() } /// Returns a mutable reference to the underlying data. @@ -364,23 +347,12 @@ impl Mutex { #[cfg(not(all(loom, test)))] #[inline(always)] pub fn get_mut(&mut self) -> &mut T { - // SAFETY: We hold exclusive access to the Mutex data. - unsafe { &mut *self.data.get() } - } - - /// Tries to lock this mutex with a weak exchange. - fn try_lock_fast(&self) -> bool { - self.locked.compare_exchange_weak(false, true, Acquire, Relaxed).is_ok() - } - - /// Unlocks this mutex. - fn unlock(&self) { - self.locked.store(false, Release); + self.inner.get_mut() } } -impl Default for Mutex { - /// Creates a `Mutex`, with the `Default` value for `T`. +impl Default for Mutex { + /// Creates a `Mutex`, with the `Default` value for `T`. #[inline] fn default() -> Self { Self::new(Default::default()) @@ -388,26 +360,21 @@ impl Default for Mutex { } impl From for Mutex { - /// Creates a `Mutex` from a instance of `T`. + /// Creates a `Mutex` from a instance of `T`. #[inline] fn from(data: T) -> Self { Self::new(data) } } -impl fmt::Debug for Mutex { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut d = f.debug_struct("Mutex"); - match self.try_lock() { - Some(guard) => guard.with(|data| d.field("data", &data)), - None => d.field("data", &format_args!("")), - }; - d.finish() +impl Debug for Mutex { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) } } #[cfg(test)] -impl crate::test::LockNew for Mutex { +impl LockNew for Mutex { type Target = T; fn new(value: Self::Target) -> Self @@ -419,24 +386,27 @@ impl crate::test::LockNew for Mutex { } #[cfg(test)] -impl crate::test::LockWith for Mutex { - type Guard<'a> = MutexGuard<'a, Self::Target, Rs, Rq> +impl LockThen for Mutex { + type Guard<'a> = MutexGuard<'a, T, Rs, Rq> where Self: 'a, Self::Target: 'a; - fn try_lock_with(&self, f: F) -> Ret + fn lock_then(&self, f: F) -> Ret where - F: FnOnce(Option>) -> Ret, + F: FnOnce(MutexGuard<'_, T, Rs, Rq>) -> Ret, { - self.try_lock_with(f) + self.lock_then(f) } +} - fn lock_with(&self, f: F) -> Ret +#[cfg(test)] +impl TryLockThen for Mutex { + fn try_lock_then(&self, f: F) -> Ret where - F: FnOnce(MutexGuard<'_, T, Rs, Rq>) -> Ret, + F: FnOnce(Option>) -> Ret, { - self.lock_with(f) + self.try_lock_then(f) } fn is_locked(&self) -> bool { @@ -480,7 +450,7 @@ unsafe impl lock_api::RawMutex for Mutex<(), Rs, Rq> { #[inline] unsafe fn unlock(&self) { - self.unlock(); + self.inner.unlock(); } #[inline] @@ -489,6 +459,9 @@ unsafe impl lock_api::RawMutex for Mutex<(), Rs, Rq> { } } +// The inner type of mutex's guard, with a boolean as the atomic data. +type GuardInner<'a, T, Rs, Rq> = inner::MutexGuard<'a, T, AtomicBool, RelaxWait, RelaxWait>; + /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. /// @@ -496,56 +469,40 @@ unsafe impl lock_api::RawMutex for Mutex<(), Rs, Rq> { /// [`Deref`] and [`DerefMut`] implementations. /// /// This structure is returned by [`lock`] and [`try_lock`] methods on [`Mutex`]. -/// It is also given as closure argument by [`lock_with`] and [`try_lock_with`] +/// It is also given as closure parameter by [`lock_then`] and [`try_lock_then`] /// methods. /// /// [`Deref`]: core::ops::Deref /// [`DerefMut`]: core::ops::DerefMut /// [`lock`]: Mutex::lock /// [`try_lock`]: Mutex::lock -/// [`lock_with`]: Mutex::lock_with -/// [`try_lock_with`]: Mutex::try_lock_with +/// [`lock_then`]: Mutex::lock_then +/// [`try_lock_then`]: Mutex::try_lock_then #[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, T: ?Sized, Rs, Rq> { - lock: &'a Mutex, + inner: GuardInner<'a, T, Rs, Rq>, } -// Same unsafe impls as `crate::raw::MutexGuard`. +// Same unsafe impls as `crate::inner::barging::MutexGuard`. unsafe impl Send for MutexGuard<'_, T, Rs, Rq> {} unsafe impl Sync for MutexGuard<'_, T, Rs, Rq> {} -impl<'a, T: ?Sized, Rs, Rq> MutexGuard<'a, T, Rs, Rq> { - /// Creates a new `MutexGuard` instance. - const fn new(lock: &'a Mutex) -> Self { - Self { lock } - } - - /// Runs `f` against an shared reference pointing to the underlying data. - fn with(&self, f: F) -> Ret - where - F: FnOnce(&T) -> Ret, - { - // SAFETY: A guard instance holds the lock locked. - unsafe { self.lock.data.with_unchecked(f) } - } -} - -impl Drop for MutexGuard<'_, T, Rs, Rq> { - #[inline(always)] - fn drop(&mut self) { - self.lock.unlock(); +#[doc(hidden)] +impl<'a, T: ?Sized, Rs, Rq> From> for MutexGuard<'a, T, Rs, Rq> { + fn from(inner: GuardInner<'a, T, Rs, Rq>) -> Self { + Self { inner } } } -impl<'a, T: ?Sized + fmt::Debug, Rs, Rq> fmt::Debug for MutexGuard<'a, T, Rs, Rq> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.with(|data| fmt::Debug::fmt(data, f)) +impl<'a, T: ?Sized + Debug, Rs, Rq> Debug for MutexGuard<'a, T, Rs, Rq> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) } } -impl<'a, T: ?Sized + fmt::Display, Rs, Rq> fmt::Display for MutexGuard<'a, T, Rs, Rq> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.with(|data| fmt::Display::fmt(data, f)) +impl<'a, T: ?Sized + Display, Rs, Rq> Display for MutexGuard<'a, T, Rs, Rq> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) } } @@ -556,8 +513,7 @@ impl<'a, T: ?Sized, Rs, Rq> core::ops::Deref for MutexGuard<'a, T, Rs, Rq> { /// Dereferences the guard to access the underlying data. #[inline(always)] fn deref(&self) -> &T { - // SAFETY: A guard instance holds the lock locked. - unsafe { &*self.lock.data.get() } + &self.inner } } @@ -566,8 +522,7 @@ impl<'a, T: ?Sized, Rs, Rq> core::ops::DerefMut for MutexGuard<'a, T, Rs, Rq> { /// Mutably dereferences the guard to access the underlying data. #[inline(always)] fn deref_mut(&mut self) -> &mut T { - // SAFETY: A guard instance holds the lock locked. - unsafe { &mut *self.lock.data.get() } + &mut self.inner } } @@ -575,11 +530,39 @@ impl<'a, T: ?Sized, Rs, Rq> core::ops::DerefMut for MutexGuard<'a, T, Rs, Rq> { /// underlying data. #[cfg(all(loom, test))] #[cfg(not(tarpaulin_include))] -unsafe impl crate::loom::Guard for MutexGuard<'_, T, Rs, Rq> { +unsafe impl Guard for MutexGuard<'_, T, Rs, Rq> { type Target = T; fn get(&self) -> &loom::cell::UnsafeCell { - &self.lock.data + self.inner.get() + } +} + +#[cfg(all(loom, test))] +#[cfg(not(tarpaulin_include))] +impl AsDeref for MutexGuard<'_, T, Rs, Rq> { + type Target = T; + + type Deref<'a> = GuardDeref<'a, Self> + where + Self: 'a, + Self::Target: 'a; + + fn as_deref(&self) -> Self::Deref<'_> { + self.get_ref() + } +} + +#[cfg(all(loom, test))] +#[cfg(not(tarpaulin_include))] +impl AsDerefMut for MutexGuard<'_, T, Rs, Rq> { + type DerefMut<'a> = GuardDerefMut<'a, Self> + where + Self: 'a, + Self::Target: 'a; + + fn as_deref_mut(&mut self) -> Self::DerefMut<'_> { + self.get_mut() } } @@ -589,8 +572,23 @@ mod test { use crate::test::tests; #[test] - fn lots_and_lots() { - tests::lots_and_lots::>(); + fn node_waiter_drop_does_not_matter() { + tests::node_waiter_drop_does_not_matter::(); + } + + #[test] + fn lots_and_lots_lock() { + tests::lots_and_lots_lock::>(); + } + + #[test] + fn lots_and_lots_try_lock() { + tests::lots_and_lots_try_lock::>(); + } + + #[test] + fn lots_and_lots_mixed_lock() { + tests::lots_and_lots_mixed_lock::>(); } #[test] diff --git a/src/cfg.rs b/src/cfg.rs index bf3bb5b..38c6adb 100644 --- a/src/cfg.rs +++ b/src/cfg.rs @@ -1,13 +1,50 @@ pub mod atomic { + pub use sealed::AtomicPtrNull; + #[cfg(not(all(loom, test)))] pub use core::sync::atomic::{fence, AtomicBool, AtomicPtr}; #[cfg(all(loom, test))] pub use loom::sync::atomic::{fence, AtomicBool, AtomicPtr}; + + impl AtomicPtrNull for AtomicPtr { + type Target = T; + + #[rustfmt::skip] + #[cfg(not(all(loom, test)))] + const NULL_MUT: AtomicPtr = { + Self::new(core::ptr::null_mut()) + }; + + #[cfg(all(loom, test))] + #[cfg(not(tarpaulin_include))] + fn null_mut() -> AtomicPtr { + Self::new(core::ptr::null_mut()) + } + } + + mod sealed { + use super::AtomicPtr; + + /// A trait that extends [`AtomicPtr`] to allow creating `null` values. + pub trait AtomicPtrNull { + /// The type of the data pointed to. + type Target; + + /// A compiler time evaluable [`AtomicPtr`] poiting to `null`. + #[cfg(not(all(loom, test)))] + #[allow(clippy::declare_interior_mutable_const)] + const NULL_MUT: AtomicPtr; + + /// Returns a Loom based [`AtomicPtr`] poiting to `null` (non-const). + #[cfg(all(loom, test))] + fn null_mut() -> AtomicPtr; + } + } } pub mod cell { - pub use sealed::WithUnchecked; + pub use sealed::{UnsafeCellOptionWith, UnsafeCellWith}; #[cfg(not(all(loom, test)))] pub use core::cell::UnsafeCell; @@ -15,37 +52,77 @@ pub mod cell { #[cfg(all(loom, test))] pub use loom::cell::UnsafeCell; - #[cfg(not(all(loom, test)))] - impl WithUnchecked for UnsafeCell { + impl UnsafeCellWith for UnsafeCell { type Target = T; + #[cfg(not(all(loom, test)))] unsafe fn with_unchecked(&self, f: F) -> Ret where F: FnOnce(&Self::Target) -> Ret, { - // SAFETY: Caller must guarantee there are no mutable aliases. + // SAFETY: Caller guaranteed that there are no mutable aliases. f(unsafe { &*self.get() }) } - } - - #[cfg(all(loom, test))] - #[cfg(not(tarpaulin_include))] - impl WithUnchecked for UnsafeCell { - type Target = T; + #[cfg(all(loom, test))] + #[cfg(not(tarpaulin_include))] unsafe fn with_unchecked(&self, f: F) -> Ret where F: FnOnce(&Self::Target) -> Ret, { - // SAFETY: Caller must guarantee there are no mutable aliases. + // SAFETY: Caller guaranteed that there are no mutable aliases. self.with(|ptr| f(unsafe { &*ptr })) } + + #[cfg(not(all(loom, test)))] + unsafe fn with_mut_unchecked(&self, f: F) -> Ret + where + F: FnOnce(&mut Self::Target) -> Ret, + { + // SAFETY: Caller guaranteed that there are no mutable aliases. + f(unsafe { &mut *self.get() }) + } + + #[cfg(all(loom, test))] + #[cfg(not(tarpaulin_include))] + unsafe fn with_mut_unchecked(&self, f: F) -> Ret + where + F: FnOnce(&mut Self::Target) -> Ret, + { + // SAFETY: Caller guaranteed that there are no mutable aliases. + self.with_mut(|ptr| f(unsafe { &mut *ptr })) + } + } + + impl UnsafeCellOptionWith for Option<&UnsafeCell> { + type Target = T; + + #[cfg(not(all(loom, test)))] + unsafe fn as_deref_with_mut_unchecked(&self, f: F) -> Ret + where + F: FnOnce(Option<&mut T>) -> Ret, + { + let ptr = self.map(UnsafeCell::get); + // SAFETY: Caller guaranteed that there are no mutable aliases. + f(ptr.map(|ptr| unsafe { &mut *ptr })) + } + + #[cfg(all(loom, test))] + #[cfg(not(tarpaulin_include))] + unsafe fn as_deref_with_mut_unchecked(&self, f: F) -> Ret + where + F: FnOnce(Option<&mut T>) -> Ret, + { + let ptr = self.map(UnsafeCell::get_mut); + // SAFETY: Caller guaranteed that there are no mutable aliases. + f(ptr.as_ref().map(|ptr| unsafe { ptr.deref() })) + } } mod sealed { /// A trait that extends [`UnsafeCell`] to allow running closures against /// its underlying data. - pub trait WithUnchecked { + pub trait UnsafeCellWith { /// The type of the underlying data. type Target: ?Sized; @@ -58,6 +135,74 @@ pub mod cell { unsafe fn with_unchecked(&self, f: F) -> Ret where F: FnOnce(&Self::Target) -> Ret; + + /// Runs `f` against a mutable reference borrowed from a [`UnsafeCell`]. + /// + /// # Safety + /// + /// Caller must guarantee there are no mutable aliases to the + /// underlying data. + unsafe fn with_mut_unchecked(&self, f: F) -> Ret + where + F: FnOnce(&mut Self::Target) -> Ret; + } + + /// A trait that extends `Option<&UnsafeCell>` to allow running closures + /// against its underlying data. + pub trait UnsafeCellOptionWith { + type Target: ?Sized; + + /// Runs `f` against a mutable reference borrowed from a + /// [`Option<&UnsafeCell>`]. + /// + /// # Safety + /// + /// Caller must guarantee there are no mutable aliases to the + /// underlying data. + unsafe fn as_deref_with_mut_unchecked(&self, f: F) -> Ret + where + F: FnOnce(Option<&mut Self::Target>) -> Ret; + } + } +} + +pub mod debug_abort { + #[cfg(all(test, panic = "unwind"))] + use std::panic::Location; + + /// Runs the closure, aborting the process if a unwinding panic occurs. + #[cfg(all(test, panic = "unwind"))] + #[track_caller] + pub fn on_unwind T>(may_unwind: F) -> T { + let location = Location::caller(); + let abort = DebugAbort { location }; + let value = may_unwind(); + core::mem::forget(abort); + value + } + + /// Runs the closure, without aborting the process if a unwinding panic + /// occurs. + #[cfg(not(all(test, panic = "unwind")))] + #[cfg(not(tarpaulin_include))] + pub fn on_unwind T>(may_unwind: F) -> T { + may_unwind() + } + + /// A test only type that will abort the program execution once dropped. + /// + /// To avoid aborting the proccess, callers must `forget` all instances of + /// the `Abort` type. + #[cfg(all(test, panic = "unwind"))] + struct DebugAbort { + location: &'static Location<'static>, + } + + #[cfg(all(test, panic = "unwind"))] + #[cfg(not(tarpaulin_include))] + impl Drop for DebugAbort { + fn drop(&mut self) { + panic!("thread exits are forbidden inside {:?}, aborting", self.location); } } } @@ -71,13 +216,13 @@ pub mod hint { } pub mod thread { - #[cfg(all(any(feature = "yield", test), not(all(loom, test))))] + #[cfg(all(any(feature = "yield", test), not(loom)))] pub use std::thread::yield_now; #[cfg(all(loom, test))] pub use loom::thread::yield_now; - #[cfg(all(feature = "thread_local", not(all(loom, test))))] + #[cfg(all(feature = "thread_local", not(loom)))] pub use std::thread::LocalKey; #[cfg(all(feature = "thread_local", loom, test))] diff --git a/src/inner/barging/mod.rs b/src/inner/barging/mod.rs new file mode 100644 index 0000000..90b416f --- /dev/null +++ b/src/inner/barging/mod.rs @@ -0,0 +1,179 @@ +use core::fmt::{self, Debug, Display, Formatter}; +use core::marker::PhantomData; + +use crate::cfg::cell::{UnsafeCell, UnsafeCellWith}; +use crate::inner::raw; +use crate::lock::{Lock, Wait}; + +/// A mutual exclusion primitive implementing a barging MCS lock protocol, useful +/// for protecting shared data. +pub struct Mutex { + lock: L, + queue: raw::Mutex<(), L, Wq>, + marker: PhantomData, + data: UnsafeCell, +} + +// Same unsafe impls as `crate::inner::raw::Mutex`. +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +impl Mutex { + /// Creates a new, unlocked and core based mutex (const). + #[cfg(not(all(loom, test)))] + pub const fn new(value: T) -> Self { + let lock = Lock::UNLOCKED; + let queue = raw::Mutex::new(()); + let data = UnsafeCell::new(value); + Self { lock, queue, data, marker: PhantomData } + } + + /// Creates a new, unlocked and loom base mutex (non-const). + #[cfg(all(loom, test))] + #[cfg(not(tarpaulin_include))] + pub fn new(value: T) -> Self { + let lock = Lock::unlocked(); + let queue = raw::Mutex::new(()); + let data = UnsafeCell::new(value); + Self { lock, queue, data, marker: PhantomData } + } +} + +impl Mutex { + /// Acquires this mutex, blocking the current thread until it is able to do so. + pub fn lock(&self) -> MutexGuard<'_, T, L, Ws, Wq> { + if self.lock.try_lock_acquire_weak() { + return MutexGuard::new(self); + } + let mut node = raw::MutexNode::new(); + self.queue.lock_with_then(&mut node, |()| { + while !self.lock.try_lock_acquire_weak() { + self.lock.lock_wait_relaxed::(); + } + }); + MutexGuard::new(self) + } +} + +impl Mutex { + /// Attempts to acquire this mutex without blocking the thread. + pub fn try_lock(&self) -> Option> { + self.lock.try_lock_acquire().then(|| MutexGuard::new(self)) + } + + /// Returns `true` if the lock is currently held. + /// + /// This function does not guarantee strong ordering, only atomicity. + pub fn is_locked(&self) -> bool { + self.lock.is_locked() + } + + /// Unlocks this mutex. + pub fn unlock(&self) { + self.lock.notify(); + } +} + +impl Mutex { + /// Consumes this mutex, returning the underlying data. + pub fn into_inner(self) -> T { + self.data.into_inner() + } +} + +impl Mutex { + /// Returns a mutable reference to the underlying data. + #[cfg(not(all(loom, test)))] + pub fn get_mut(&mut self) -> &mut T { + // SAFETY: We hold exclusive access to the Mutex data. + unsafe { &mut *self.data.get() } + } +} + +impl Debug for Mutex { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut d = f.debug_struct("Mutex"); + match self.try_lock() { + Some(guard) => guard.with(|data| d.field("data", &data)), + None => d.field("data", &format_args!("")), + }; + d.finish() + } +} + +/// An RAII implementation of a "scoped lock" of a mutex. When this structure is +/// dropped (falls out of scope), the lock will be unlocked. +#[must_use = "if unused the Mutex will immediately unlock"] +pub struct MutexGuard<'a, T: ?Sized, L: Lock, Ws, Wq> { + lock: &'a Mutex, +} + +// Same unsafe impls as `crate::inner::raw::MutexGuard`. +unsafe impl Send for MutexGuard<'_, T, L, Ws, Wq> {} +unsafe impl Sync for MutexGuard<'_, T, L, Ws, Wq> {} + +impl<'a, T: ?Sized, L: Lock, Ws, Wq> MutexGuard<'a, T, L, Ws, Wq> { + /// Creates a new `MutexGuard` instance. + const fn new(lock: &'a Mutex) -> Self { + Self { lock } + } + + /// Runs `f` against an shared reference pointing to the underlying data. + fn with(&self, f: F) -> Ret + where + F: FnOnce(&T) -> Ret, + { + // SAFETY: A guard instance holds the lock locked. + unsafe { self.lock.data.with_unchecked(f) } + } +} + +impl<'a, T: ?Sized + Debug, L: Lock, Ws, Wq> Debug for MutexGuard<'a, T, L, Ws, Wq> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.with(|data| data.fmt(f)) + } +} + +impl<'a, T: ?Sized + Display, L: Lock, Ws, Wq> Display for MutexGuard<'a, T, L, Ws, Wq> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.with(|data| data.fmt(f)) + } +} + +#[cfg(not(all(loom, test)))] +impl<'a, T: ?Sized, L: Lock, Ws, Wq> core::ops::Deref for MutexGuard<'a, T, L, Ws, Wq> { + type Target = T; + + /// Dereferences the guard to access the underlying data. + fn deref(&self) -> &T { + // SAFETY: A guard instance holds the lock locked. + unsafe { &*self.lock.data.get() } + } +} + +#[cfg(not(all(loom, test)))] +impl<'a, T: ?Sized, L: Lock, Ws, Wq> core::ops::DerefMut for MutexGuard<'a, T, L, Ws, Wq> { + /// Mutably dereferences the guard to access the underlying data. + fn deref_mut(&mut self) -> &mut T { + // SAFETY: A guard instance holds the lock locked. + unsafe { &mut *self.lock.data.get() } + } +} + +impl Drop for MutexGuard<'_, T, L, Ws, Wq> { + fn drop(&mut self) { + self.lock.unlock(); + } +} + +/// SAFETY: A guard instance hold the lock locked, with exclusive access to the +/// underlying data. +#[cfg(all(loom, test))] +#[cfg(not(tarpaulin_include))] +unsafe impl crate::loom::Guard for MutexGuard<'_, T, L, Ws, Wq> { + type Target = T; + + fn get(&self) -> &loom::cell::UnsafeCell { + &self.lock.data + } +} diff --git a/src/inner/mod.rs b/src/inner/mod.rs new file mode 100644 index 0000000..7d9bcf5 --- /dev/null +++ b/src/inner/mod.rs @@ -0,0 +1,4 @@ +pub mod raw; + +#[cfg(feature = "barging")] +pub mod barging; diff --git a/src/inner/raw/mod.rs b/src/inner/raw/mod.rs new file mode 100644 index 0000000..990f4ac --- /dev/null +++ b/src/inner/raw/mod.rs @@ -0,0 +1,357 @@ +use core::fmt::{self, Debug, Display, Formatter}; +use core::marker::PhantomData; +use core::mem::MaybeUninit; +use core::ptr; +use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; + +use crate::cfg::atomic::{fence, AtomicPtr, AtomicPtrNull}; +use crate::cfg::cell::{UnsafeCell, UnsafeCellOptionWith, UnsafeCellWith}; +use crate::lock::{Lock, Wait}; +use crate::relax::Relax; + +#[cfg(feature = "thread_local")] +mod thread_local; +#[cfg(feature = "thread_local")] +pub use thread_local::LocalMutexNode; + +/// The inner definition of [`MutexNode`], which is known to be in a initialized +/// state. +#[derive(Debug)] +pub struct MutexNodeInit { + next: AtomicPtr, + lock: L, +} + +impl MutexNodeInit { + /// Returns a raw mutable pointer of this node. + const fn as_ptr(&self) -> *mut Self { + (self as *const Self).cast_mut() + } +} + +impl MutexNodeInit { + /// Creates a new, locked, initialized and core based node (const). + #[cfg(not(all(loom, test)))] + const fn locked() -> Self { + let next = AtomicPtr::NULL_MUT; + let lock = Lock::LOCKED; + Self { next, lock } + } + + /// Creates a new, locked, initialized and loom based node (non-const). + #[cfg(all(loom, test))] + #[cfg(not(tarpaulin_include))] + fn locked() -> Self { + let next = AtomicPtr::null_mut(); + let lock = Lock::locked(); + Self { next, lock } + } +} + +/// A locally-accessible record for forming the waiting queue. +/// +/// The inner state is never dropped, only overwritten. This is desirable and +/// well suited for our use cases, since all `L` types used are only composed +/// of `no drop glue` types (eg. atomic types). +/// +/// `L` must fail [`core::mem::needs_drop`] check, else `L` will leak. +#[derive(Debug)] +#[repr(transparent)] +pub struct MutexNode { + inner: MaybeUninit>, +} + +impl MutexNode { + /// Creates new `MutexNode` instance. + pub const fn new() -> Self { + Self { inner: MaybeUninit::uninit() } + } +} + +impl MutexNode { + /// Initializes this node's inner state, returning a shared reference + /// pointing to it. + fn initialize(&mut self) -> &MutexNodeInit { + self.inner.write(MutexNodeInit::locked()) + } +} + +#[cfg(not(tarpaulin_include))] +impl Default for MutexNode { + #[inline(always)] + fn default() -> Self { + Self::new() + } +} + +/// A mutual exclusion primitive implementing the MCS lock protocol, useful for +/// protecting shared data. +pub struct Mutex { + tail: AtomicPtr>, + wait: PhantomData, + data: UnsafeCell, +} + +// Same unsafe impls as `std::sync::Mutex`. +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +impl Mutex { + /// Creates a new, unlocked and core based mutex (const). + #[cfg(not(all(loom, test)))] + pub const fn new(value: T) -> Self { + let tail = AtomicPtr::NULL_MUT; + let data = UnsafeCell::new(value); + Self { tail, data, wait: PhantomData } + } + + /// Creates a new, unlocked and loom based mutex (non-const). + #[cfg(all(loom, test))] + #[cfg(not(tarpaulin_include))] + pub fn new(value: T) -> Self { + let tail = AtomicPtr::null_mut(); + let data = UnsafeCell::new(value); + Self { tail, data, wait: PhantomData } + } + + /// Consumes this mutex, returning the underlying data. + pub fn into_inner(self) -> T { + self.data.into_inner() + } +} + +impl Mutex { + /// Attempts to acquire this mutex without blocking the thread. + /// + /// # Safety + /// + /// The returned guard instance **must** be dropped, that is, it **must not** + /// be "forgotten" (e.g. `core::mem::forget`), or being targeted by any + /// other operation that would prevent it from executing its `drop` call. + unsafe fn try_lock<'a>(&'a self, n: &'a mut MutexNode) -> Option> { + let node = n.initialize(); + self.tail + .compare_exchange(ptr::null_mut(), node.as_ptr(), AcqRel, Relaxed) + .map(|_| MutexGuard::new(self, node)) + .ok() + } + + /// Acquires this mutex, blocking the current thread until it is able to do so. + /// + /// # Safety + /// + /// The returned guard instance **must** be dropped, that is, it **must not** + /// be "forgotten" (e.g. `core::mem::forget`), or being targeted of any + /// other operation hat would prevent it from executing its `drop` call. + unsafe fn lock<'a>(&'a self, n: &'a mut MutexNode) -> MutexGuard<'a, T, L, W> { + let node = n.initialize(); + let pred = self.tail.swap(node.as_ptr(), AcqRel); + // If we have a predecessor, complete the link so it will notify us. + if !pred.is_null() { + // SAFETY: Already verified that our predecessor is not null. + unsafe { &*pred }.next.store(node.as_ptr(), Release); + // Verify the lock hand-off, while applying some waiting policy. + node.lock.lock_wait_relaxed::(); + fence(Acquire); + } + MutexGuard::new(self, node) + } + + /// Unlocks this mutex. If there is a successor node in the queue, the lock + /// is passed directly to them. + fn unlock(&self, head: &MutexNodeInit) { + let mut next = head.next.load(Relaxed); + // If we don't have a known successor currently, + if next.is_null() { + // and we are the tail, then dequeue and free the lock. + let false = self.try_unlock_release(head.as_ptr()) else { return }; + // But if we are not the tail, then we have a pending successor. We + // must wait for them to finish linking with us. + next = wait_next_relaxed::(&head.next); + } + fence(Acquire); + // Notify our successor that they hold the lock. + // SAFETY: We already verified that our successor is not null. + unsafe { &*next }.lock.notify(); + } +} + +/// A relaxed loop that returns a pointer to the successor once it finishes +/// linking with the current thread. +/// +/// The successor node is loaded with a relaxed ordering. +fn wait_next_relaxed(next: &AtomicPtr>) -> *mut MutexNodeInit { + let mut relax = R::new(); + loop { + let ptr = next.load(Relaxed); + let true = ptr.is_null() else { return ptr }; + relax.relax(); + } +} + +impl Mutex { + /// Returns `true` if the lock is currently held. + /// + /// This function does not guarantee strong ordering, only atomicity. + pub fn is_locked(&self) -> bool { + !self.tail.load(Relaxed).is_null() + } + + /// Returns a mutable reference to the underlying data. + #[cfg(not(all(loom, test)))] + pub fn get_mut(&mut self) -> &mut T { + // SAFETY: We hold exclusive access to the Mutex data. + unsafe { &mut *self.data.get() } + } + + /// Unlocks the lock if the candidate node is the queue's tail. + fn try_unlock_release(&self, node: *mut MutexNodeInit) -> bool { + self.tail.compare_exchange(node, ptr::null_mut(), Release, Relaxed).is_ok() + } +} + +impl Mutex { + /// Attempts to acquire this mutex and then runs a closure against the + /// protected data. + /// + /// This function does not block. + pub fn try_lock_with_then(&self, node: &mut MutexNode, f: F) -> Ret + where + F: FnOnce(Option<&mut T>) -> Ret, + { + // SAFETY: The guard's `drop` call is executed within this scope. + unsafe { self.try_lock(node) }.as_deref_mut_with_mut(f) + } + + /// Acquires this mutex and then runs the closure against the protected data. + /// + /// This function will block if the lock is unavailable. + pub fn lock_with_then(&self, node: &mut MutexNode, f: F) -> Ret + where + F: FnOnce(&mut T) -> Ret, + { + // SAFETY: The guard's `drop` call is executed within this scope. + unsafe { self.lock(node) }.with_mut(f) + } +} + +impl Debug for Mutex { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut d = f.debug_struct("Mutex"); + let mut node = MutexNode::new(); + self.try_lock_with_then(&mut node, |data| match data { + Some(data) => d.field("data", &data), + None => d.field("data", &format_args!("")), + }); + d.finish() + } +} + +/// An RAII implementation of a "scoped lock" of a mutex. When this structure is +/// dropped (falls out of scope), the lock will be unlocked. +#[must_use = "if unused the Mutex will immediately unlock"] +struct MutexGuard<'a, T: ?Sized, L: Lock, W: Wait> { + lock: &'a Mutex, + head: &'a MutexNodeInit, +} + +// Rust's `std::sync::MutexGuard` is not Send for pthread compatibility, but this +// impl is safe to be Send. Same unsafe Sync impl as `std::sync::MutexGuard`. +unsafe impl Send for MutexGuard<'_, T, L, W> {} +unsafe impl Sync for MutexGuard<'_, T, L, W> {} + +impl<'a, T: ?Sized, L: Lock, W: Wait> MutexGuard<'a, T, L, W> { + /// Creates a new `MutexGuard` instance. + const fn new(lock: &'a Mutex, head: &'a MutexNodeInit) -> Self { + Self { lock, head } + } + + /// Runs `f` against a shared reference pointing to the underlying data. + #[cfg(not(tarpaulin_include))] + fn with(&self, f: F) -> Ret + where + F: FnOnce(&T) -> Ret, + { + // SAFETY: A guard instance holds the lock locked. + unsafe { self.lock.data.with_unchecked(f) } + } + + /// Runs `f` against a mutable reference pointing to the underlying data. + fn with_mut(&mut self, f: F) -> Ret + where + F: FnOnce(&mut T) -> Ret, + { + // SAFETY: A guard instance holds the lock locked. + unsafe { self.lock.data.with_mut_unchecked(f) } + } +} + +/// A trait that converts a `&mut Self` to a `Option<&mut Self::Target>` and +/// then runs closures against it. +trait AsDerefMutWithMut { + type Target: ?Sized; + + /// Converts `&mut Self` to `Option<&mut Self::Target>` and then runs `f` + /// against it. + fn as_deref_mut_with_mut(&mut self, f: F) -> Ret + where + F: FnOnce(Option<&mut Self::Target>) -> Ret; +} + +impl AsDerefMutWithMut for Option> { + type Target = T; + + fn as_deref_mut_with_mut(&mut self, f: F) -> Ret + where + F: FnOnce(Option<&mut Self::Target>) -> Ret, + { + let data = self.as_ref().map(|guard| &guard.lock.data); + // SAFETY: A guard instance holds the lock locked. + unsafe { data.as_deref_with_mut_unchecked(f) } + } +} + +#[cfg(not(tarpaulin_include))] +impl<'a, T: ?Sized + Debug, L: Lock, W: Wait> Debug for MutexGuard<'a, T, L, W> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.with(|data| data.fmt(f)) + } +} + +#[cfg(not(tarpaulin_include))] +impl<'a, T: ?Sized + Display, L: Lock, W: Wait> Display for MutexGuard<'a, T, L, W> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.with(|data| data.fmt(f)) + } +} + +#[cfg(not(all(loom, test)))] +#[cfg(not(tarpaulin_include))] +impl<'a, T: ?Sized, L: Lock, W: Wait> core::ops::Deref for MutexGuard<'a, T, L, W> { + type Target = T; + + /// Dereferences the guard to access the underlying data. + #[inline(always)] + fn deref(&self) -> &T { + // SAFETY: A guard instance holds the lock locked. + unsafe { &*self.lock.data.get() } + } +} + +#[cfg(not(all(loom, test)))] +#[cfg(not(tarpaulin_include))] +impl<'a, T: ?Sized, L: Lock, W: Wait> core::ops::DerefMut for MutexGuard<'a, T, L, W> { + /// Mutably dereferences the guard to access the underlying data. + #[inline(always)] + fn deref_mut(&mut self) -> &mut T { + // SAFETY: A guard instance holds the lock locked. + unsafe { &mut *self.lock.data.get() } + } +} + +impl<'a, T: ?Sized, L: Lock, W: Wait> Drop for MutexGuard<'a, T, L, W> { + #[inline] + fn drop(&mut self) { + self.lock.unlock(self.head); + } +} diff --git a/src/inner/raw/thread_local.rs b/src/inner/raw/thread_local.rs new file mode 100644 index 0000000..8e44515 --- /dev/null +++ b/src/inner/raw/thread_local.rs @@ -0,0 +1,158 @@ +use core::cell::{RefCell, RefMut}; +use core::ops::DerefMut; +use core::panic::Location; + +use super::{Mutex, MutexNode}; +use crate::cfg::thread::LocalKey; +use crate::lock::{Lock, Wait}; + +type Key = &'static LocalMutexNode; + +/// A handle to a [`MutexNode`] stored at the thread local storage. +#[derive(Debug)] +#[repr(transparent)] +pub struct LocalMutexNode { + #[cfg(not(all(loom, test)))] + key: LocalKey>, + + // We can't take ownership of Loom's `thread_local!` value since it is a + // `static`, non-copy value, so we just point to it. + #[cfg(all(loom, test))] + key: &'static LocalKey>, +} + +#[cfg(not(tarpaulin_include))] +impl LocalMutexNode { + /// Creates a new `LocalMutexNode` key from the provided thread local node + /// key. + #[cfg(not(all(loom, test)))] + pub const fn new(key: LocalKey>) -> Self { + Self { key } + } + + /// Creates a new Loom based `LocalMutexNode` key from the provided thread + /// local node key. + #[cfg(all(loom, test))] + pub const fn new(key: &'static LocalKey>) -> Self { + Self { key } + } +} + +/// Panics the thread with a message pointing to the panic location. +#[inline(never)] +#[cold] +fn panic_already_borrowed(caller: &Location<'static>) -> ! { + panic!("{}, conflict at: {}", already_borrowed_error!(), caller) +} + +impl Mutex { + /// Attempts to acquire this mutex with a thread local node and then runs + /// a closure against the protected data. + /// + /// # Panics + /// + /// See: `with_local_node_then`. + #[track_caller] + pub fn try_lock_with_local_then(&self, node: Key, f: F) -> Ret + where + N: DerefMut>, + F: FnOnce(Option<&mut T>) -> Ret, + { + self.with_local_node_then(node, |m, n| m.try_lock_with_then(n, f)) + } + + /// Attempts to acquire this mutex with a thread local node and then runs + /// a closure against the protected data. + /// + /// # Safety + /// + /// See: `with_local_node_then_unchecked`. + /// + /// # Panics + /// + /// See: `with_local_node_then_unchecked`. + pub unsafe fn try_lock_with_local_then_unchecked(&self, node: Key, f: F) -> Ret + where + N: DerefMut>, + F: FnOnce(Option<&mut T>) -> Ret, + { + self.with_local_node_then_unchecked(node, |m, n| m.try_lock_with_then(n, f)) + } + + /// Attempts to acquire this mutex with a thread local node and then runs + /// a closure against the protected data. + /// + /// # Panics + /// + /// See: `with_local_node_then`. + #[track_caller] + pub fn lock_with_local_then(&self, node: Key, f: F) -> Ret + where + N: DerefMut>, + F: FnOnce(&mut T) -> Ret, + { + self.with_local_node_then(node, |m, n| m.lock_with_then(n, f)) + } + + /// Attempts to acquire this mutex with a thread local node and then runs + /// a closure against the protected data. + /// + /// # Safety + /// + /// See: `with_local_node_then_unchecked`. + /// + /// # Panics + /// + /// See: `with_local_node_then_unchecked`. + pub unsafe fn lock_with_local_then_unchecked(&self, node: Key, f: F) -> Ret + where + N: DerefMut>, + F: FnOnce(&mut T) -> Ret, + { + self.with_local_node_then_unchecked(node, |m, n| m.lock_with_then(n, f)) + } + + /// Runs `f` over a raw mutex and a thread local node as arguments. + /// + /// # Panics + /// + /// Will panic if the thread local node is already mutably borrowed. + /// + /// Panics if the key currently has its destructor running, and it **may** + /// panic if the destructor has previously been run for this thread. + #[track_caller] + fn with_local_node_then(&self, node: Key, f: F) -> Ret + where + N: DerefMut>, + F: FnOnce(&Self, &mut MutexNode) -> Ret, + { + let caller = Location::caller(); + let panic = |_| panic_already_borrowed(caller); + let f = |mut node: RefMut| f(self, &mut node); + node.key.with(|node| node.try_borrow_mut().map_or_else(panic, f)) + } + + /// Runs 'f' over the a raw mutex and thread local node as arguments without + /// checking if the node is currently mutably borrowed. + /// + /// # Safety + /// + /// Mutably borrowing a [`RefCell`] while references are still live is + /// undefined behaviour. Threfore, caller must guarantee that the thread + /// local node is not already in use for the current thread. A thread local + /// node is release to the current thread once the associated `with_local`'s + /// f closure runs out of scope. + /// + /// # Panics + /// + /// Panics if the key currently has its destructor running, and it **may** + /// panic if the destructor has previously been run for this thread. + unsafe fn with_local_node_then_unchecked(&self, node: Key, f: F) -> Ret + where + N: DerefMut>, + F: FnOnce(&Self, &mut MutexNode) -> Ret, + { + // SAFETY: Caller guaranteed that no other references are live. + node.key.with(|node| f(self, unsafe { &mut *node.as_ptr() })) + } +} diff --git a/src/lib.rs b/src/lib.rs index bb8fdfa..4ed5dd1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,5 @@ //! A simple and correct implementation of Mellor-Crummey and Scott -//! contention-free [spin-lock] for mutual exclusion, referred to as MCS lock. +//! contention-free [lock] for mutual exclusion, referred to as MCS lock. //! //! MCS lock is a List-Based Queuing Lock that avoids network contention by //! having threads spin on local memory locations. The main properties of this @@ -11,17 +11,17 @@ //! - works equally well (requiring only O(1) network transactions per lock //! acquisition) on machines with and without coherent caches. //! -//! This algorithm and serveral others were introduced by [Mellor-Crummey and Scott] -//! paper. And a simpler correctness proof of the MCS lock was proposed by -//! [Johnson and Harathi]. +//! This algorithm and serveral others were introduced by +//! [Mellor-Crummey and Scott] paper. And a simpler correctness proof of the +//! MCS lock was proposed by [Johnson and Harathi]. //! -//! ## Use cases +//! ## Spinlock use cases //! //! It is noteworthy to mention that [spinlocks are usually not what you want]. //! The majority of use cases are well covered by OS-based mutexes like -//! [`std::sync::Mutex`] or [`parking_lot::Mutex`]. These implementations will -//! notify the system that the waiting thread should be parked, freeing the -//! processor to work on something else. +//! [`std::sync::Mutex`] and [`parking_lot::Mutex`] . These implementations will +//! notify the system that the waiting thread should be parked, freeing the processor +//! to work on something else. //! //! Spinlocks are only efficient in very few circunstances where the overhead //! of context switching or process rescheduling are greater than busy waiting @@ -31,18 +31,19 @@ //! tailored for optimistic spinning during contention before actually sleeping. //! This implementation is `no_std` by default, so it's useful in those environments. //! -//! ## Raw MCS lock +//! ## Locking with a raw MCS spinlock //! //! This implementation operates under FIFO. Raw locking APIs require exclusive //! access to a locally accessible queue node. This node is represented by the -//! [`MutexNode`] type. Callers are responsible for instantiating the queue nodes -//! themselves. This implementation is `no_std` compatible. See [`mod@raw`] +//! [`raw::MutexNode`] type. Callers are responsible for instantiating the queue +//! nodes themselves. This implementation is `no_std` compatible. See the [`raw`] //! module for more information. //! //! ``` //! use std::sync::Arc; //! use std::thread; //! +//! // `spins::Mutex` simply spins during contention. //! use mcslock::raw::{spins::Mutex, MutexNode}; //! //! let mutex = Arc::new(Mutex::new(0)); @@ -50,21 +51,23 @@ //! //! thread::spawn(move || { //! // A queue node must be mutably accessible. +//! // Critical section must be defined as a closure. //! let mut node = MutexNode::new(); -//! *c_mutex.lock(&mut node) = 10; +//! c_mutex.lock_with_then(&mut node, |data| { +//! *data = 10; +//! }); //! }) //! .join().expect("thread::spawn failed"); //! -//! // A queue node must be mutably accessible. -//! let mut node = MutexNode::new(); -//! assert_eq!(*mutex.try_lock(&mut node).unwrap(), 10); +//! // Critical section must be defined as a closure. +//! assert_eq!(mutex.try_lock_then(|data| *data.unwrap()), 10); //! ``` //! -//! ## Thread local MCS queue nodes +//! ## Thread local queue nodes //! //! Enables [`raw::Mutex`] locking APIs that operate over queue nodes that are //! stored at the thread local storage. These locking APIs require a static -//! reference to a [`LocalMutexNode`] key. Keys must be generated by the +//! reference to a [`raw::LocalMutexNode`] key. Keys must be generated by the //! [`thread_local_node!`] macro. Thread local nodes are not `no_std` compatible //! and can be enabled through the `thread_local` feature. //! @@ -74,6 +77,7 @@ //! use std::sync::Arc; //! use std::thread; //! +//! // `spins::Mutex` simply spins during contention. //! use mcslock::raw::spins::Mutex; //! //! // Requires `thread_local` feature. @@ -83,28 +87,28 @@ //! let c_mutex = Arc::clone(&mutex); //! //! thread::spawn(move || { -//! // Local nodes handles are provided by reference. -//! // Critical section must be defined as closure. -//! c_mutex.lock_with_local(&NODE, |mut guard| *guard = 10); +//! // Local node handles are provided by reference. +//! // Critical section must be defined as a closure. +//! c_mutex.lock_with_local_then(&NODE, |data| *data = 10); //! }) //! .join().expect("thread::spawn failed"); //! -//! // Local nodes handles are provided by reference. -//! // Critical section must be defined as closure. -//! assert_eq!(mutex.try_lock_with_local(&NODE, |g| *g.unwrap()), 10); +//! // Local node handles are provided by reference. +//! // Critical section must be defined as a closure. +//! assert_eq!(mutex.try_lock_with_local_then(&NODE, |data| *data.unwrap()), 10); //! # } //! # #[cfg(not(feature = "thread_local"))] //! # fn main() {} //! ``` //! -//! ## Barging MCS lock +//! ## Locking with a barging MCS spinlock //! //! This implementation will have non-waiting threads race for the lock against //! the front of the waiting queue thread, which means this it is an unfair lock. //! This implementation can be enabled through the `barging` feature, it is //! suitable for `no_std` environments, and the locking APIs are compatible with -//! the `lock_api` crate. See [`mod@barging`] and [`lock_api`] modules for more -//! information. +//! the `lock_api` crate. See [`barging`] and [`lock_api`] modules for +//! more information. //! //! ``` //! # #[cfg(feature = "barging")] @@ -112,6 +116,7 @@ //! use std::sync::Arc; //! use std::thread; //! +//! // Requires `barging` feature. //! use mcslock::barging::spins::backoff::Mutex; //! //! let mutex = Arc::new(Mutex::new(0)); @@ -142,28 +147,29 @@ //! OS scheduler. This may cause a context switch, so you may not want to enable //! this feature if your intention is to to actually do optimistic spinning. The //! default implementation calls [`core::hint::spin_loop`], which does in fact -//! just simply busy-waits. This feature is not `not_std` compatible. +//! just simply busy-waits. This feature **is not** `not_std` compatible. //! //! ### thread_local //! //! The `thread_local` feature enables [`raw::Mutex`] locking APIs that operate -//! over queue nodes that are stored at the thread local storage. These locking APIs -//! require a static reference to a [`LocalMutexNode`] key. Keys must be generated -//! by the [`thread_local_node!`] macro. This feature is not `no_std` compatible. +//! over queue nodes that are stored at the thread local storage. These locking +//! APIs require a static reference to a [`raw::LocalMutexNode`] key. Keys must +//! be generated by the [`thread_local_node!`] macro. This feature **is not** +//! `no_std` compatible. //! //! ### barging //! //! The `barging` feature provides locking APIs that are compatible with the -//! [lock_api] crate. It does not require node allocations from the caller, -//! and it is suitable for `no_std` environments. This implementation is not -//! fair (does not guarantee FIFO), but can improve throughput when the lock -//! is heavily contended. +//! [lock_api] crate. It does not require node allocations from the caller. The +//! [`barging`] module is suitable for `no_std` environments. This implementation +//! **is not** fair (they do not guarantee FIFO), but can improve throughput when +//! the lock is heavily contended. //! //! ### lock_api //! -//! This feature implements the [`RawMutex`] trait from the [lock_api] -//! crate for [`barging::Mutex`]. Aliases are provided by the -//! [`lock_api`] module. This feature is `no_std` compatible. +//! This feature implements the [`RawMutex`] trait from the [lock_api] crate for +//! [`barging::Mutex`]. Aliases are provided by the [`barging::lock_api`] +//! (`no_std`) module. //! //! ## Related projects //! @@ -174,48 +180,43 @@ //! - mcs-rs: //! - libmcs: //! -//! [`MutexNode`]: raw::MutexNode -//! [`LocalMutexNode`]: raw::LocalMutexNode -//! [`thread_local_node!`]: crate::thread_local_node -//! [`lock_api`]: barging::lock_api -//! [`std::sync::Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html +//! [lock]: https://en.wikipedia.org/wiki/Lock_(computer_science) +//! [Mellor-Crummey and Scott]: https://www.cs.rochester.edu/~scott/papers/1991_TOCS_synch.pdf +//! [Johnson and Harathi]: https://web.archive.org/web/20140411142823/http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf +//! [spinlocks are usually not what you want]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +//! [Linux kernel mutexes]: https://www.kernel.org/doc/html/latest/locking/mutex-design.html +//! //! [`parking_lot::Mutex`]: https://docs.rs/parking_lot/latest/parking_lot/type.Mutex.html +//! [lock_api]: https://docs.rs/lock_api/latest/lock_api //! [`RawMutex`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutex.html //! [`RawMutexFair`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutexFair.html -//! [`std::thread::yield_now`]: https://doc.rust-lang.org/std/thread/fn.yield_now.html -//! [spin-lock]: https://en.wikipedia.org/wiki/Spinlock -//! [spin-rs]: https://docs.rs/spin/latest/spin -//! [lock_api]: https://docs.rs/lock_api/latest/lock_api -//! [Linux kernel mutexes]: https://www.kernel.org/doc/html/latest/locking/mutex-design.html -//! [spinlocks are usually not what you want]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html -//! [Mellor-Crummey and Scott]: https://www.cs.rochester.edu/~scott/papers/1991_TOCS_synch.pdf -//! [Johnson and Harathi]: https://web.archive.org/web/20140411142823/http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf #![cfg_attr( - all(not(any(feature = "yield", feature = "thread_local")), not(loom), not(test)), + all(not(feature = "yield"), not(feature = "thread_local"), not(loom), not(test)), no_std )] #![cfg_attr(docsrs, feature(doc_cfg))] -#![allow(unexpected_cfgs)] #![allow(clippy::module_name_repetitions)] #![allow(clippy::inline_always)] #![allow(clippy::doc_markdown)] #![warn(rust_2021_compatibility)] #![warn(missing_docs)] +#[cfg(feature = "thread_local")] +#[macro_use] +pub(crate) mod thread_local; + pub mod raw; pub mod relax; +pub(crate) mod cfg; +pub(crate) mod inner; +pub(crate) mod lock; + #[cfg(feature = "barging")] #[cfg_attr(docsrs, doc(cfg(feature = "barging")))] pub mod barging; -#[cfg(feature = "thread_local")] -#[cfg_attr(docsrs, doc(cfg(feature = "thread_local")))] -mod thread_local; - -pub(crate) mod cfg; - #[cfg(test)] pub(crate) mod test; diff --git a/src/lock.rs b/src/lock.rs new file mode 100644 index 0000000..9c79496 --- /dev/null +++ b/src/lock.rs @@ -0,0 +1,120 @@ +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +use crate::cfg::atomic::AtomicBool; +use crate::relax::Relax; + +/// A `Lock` is some arbitrary data type used by a lock implementation to +/// manage the state of the lock. +pub trait Lock { + /// Creates a new locked `Lock` instance. + /// + /// It's expected for a implementing type to be compiler-time evaluable, + /// since they compose node types that do require it (except Loom based + /// nodes). + #[cfg(not(all(loom, test)))] + const LOCKED: Self; + + /// Creates a new unlocked `Lock` instance. + /// + /// It's expected for a implementing type to be compiler-time evaluable, + /// since they compose node types that do require it (except Loom based + /// nodes). + #[cfg(not(all(loom, test)))] + const UNLOCKED: Self; + + /// Creates a new locked `Lock` instance with Loom primitives (non-const). + /// + /// Loom primitives are not compiler-time evaluable. + #[cfg(all(loom, test))] + fn locked() -> Self; + + /// Creates a new unlocked `Lock` instance with Loom primitives (non-const). + /// + /// Loom primitives are not compiler-time evaluable. + #[cfg(all(loom, test))] + fn unlocked() -> Self; + + /// Tries to lock the mutex with acquire ordering. + /// + /// Returns `true` if successfully moved from unlocked state to locked + /// state, `false` otherwise. + fn try_lock_acquire(&self) -> bool; + + /// Tries to lock the mutex with acquire ordering and weak exchange. + /// + /// Returns `true` if successfully moved from unlocked state to locked + /// state, `false` otherwise. + fn try_lock_acquire_weak(&self) -> bool; + + /// Blocks the thread untill the lock is acquired, applies some arbitrary + /// waiting policy while the lock is still on hold somewhere else. + /// + /// The lock is loaded with a relaxed ordering. + fn lock_wait_relaxed(&self); + + /// Returns `true` if the lock is currently held. + /// + /// This function does not guarantee strong ordering, only atomicity. + fn is_locked(&self) -> bool; + + /// Changes the state of the lock and, possibly, notifies that change + /// to some other interested party. + fn notify(&self); +} + +/// The waiting policy that should be applied while the lock state has not +/// reached some target state. +pub trait Wait { + /// The relax operation that will be excuted during lock waiting loops. + type LockRelax: Relax; + + /// The relax operation that will be excuted during unlock waiting loops. + type UnlockRelax: Relax; +} + +impl Lock for AtomicBool { + #[cfg(not(all(loom, test)))] + #[allow(clippy::declare_interior_mutable_const)] + const LOCKED: Self = Self::new(true); + + #[cfg(not(all(loom, test)))] + #[allow(clippy::declare_interior_mutable_const)] + const UNLOCKED: Self = Self::new(false); + + #[cfg(all(loom, test))] + #[cfg(not(tarpaulin_include))] + fn locked() -> Self { + Self::new(true) + } + + #[cfg(all(loom, test))] + #[cfg(not(tarpaulin_include))] + fn unlocked() -> Self { + Self::new(false) + } + + fn try_lock_acquire(&self) -> bool { + self.compare_exchange(false, true, Acquire, Relaxed).is_ok() + } + + fn try_lock_acquire_weak(&self) -> bool { + self.compare_exchange_weak(false, true, Acquire, Relaxed).is_ok() + } + + fn lock_wait_relaxed(&self) { + // Block the thread with a relaxed loop until the load returns `false`, + // indicating that the lock was handed off to the current thread. + let mut relax = W::LockRelax::new(); + while self.load(Relaxed) { + relax.relax(); + } + } + + fn is_locked(&self) -> bool { + self.load(Relaxed) + } + + fn notify(&self) { + self.store(false, Release); + } +} diff --git a/src/loom.rs b/src/loom.rs index 51d2ae5..61d19a4 100644 --- a/src/loom.rs +++ b/src/loom.rs @@ -1,81 +1,88 @@ -use core::marker::PhantomData; -use core::ops::{Deref, DerefMut}; - -use loom::cell::{ConstPtr, MutPtr, UnsafeCell}; - -/// A trait for guard types that hold exclusive access to the underlying data -/// behind Loom's [`UnsafeCell`]. -/// -/// # Safety -/// -/// Must guarantee that an instance of the guard holds exclusive access to its -/// underlying data through all its lifetime. -pub unsafe trait Guard: Sized { - /// The target type after dereferencing [`GuardDeref`] or [`GuardDerefMut`]. - type Target: ?Sized; - - /// Returns a shared reference to the underlying [`UnsafeCell`]. - fn get(&self) -> &UnsafeCell; - - /// Get a Loom immutable pointer bounded by this guard lifetime. - fn deref(&self) -> GuardDeref<'_, Self> { - GuardDeref::new(self) +#[cfg(feature = "barging")] +pub use guard::{Guard, GuardDeref, GuardDerefMut}; + +#[cfg(feature = "barging")] +mod guard { + use core::marker::PhantomData; + use core::ops::{Deref, DerefMut}; + + use loom::cell::{ConstPtr, MutPtr, UnsafeCell}; + + /// A trait for guard types that protect the access to the underlying data + /// behind Loom's [`UnsafeCell`]. + /// + /// # Safety + /// + /// Must guarantee that an instance of the guard is the only access point + /// to the underlying data through all its lifetime. + pub unsafe trait Guard: Sized { + /// The target type after dereferencing [`GuardDeref`] or [`GuardDerefMut`]. + type Target: ?Sized; + + /// Returns a shared reference to the underlying [`UnsafeCell`]. + fn get(&self) -> &UnsafeCell; + + /// Get a Loom immutable pointer bounded by this guard lifetime. + fn get_ref(&self) -> GuardDeref<'_, Self> { + GuardDeref::new(self) + } + + /// Get a Loom mutable pointer bounded by this guard lifetime. + fn get_mut(&mut self) -> GuardDerefMut<'_, Self> { + GuardDerefMut::new(self) + } } - /// Get a Loom mutable pointer bounded by this guard lifetime. - fn deref_mut(&self) -> GuardDerefMut<'_, Self> { - GuardDerefMut::new(self) + /// A Loom immutable pointer borrowed from a guard instance. + pub struct GuardDeref<'a, G: Guard> { + ptr: ConstPtr, + marker: PhantomData<(&'a G::Target, &'a G)>, } -} - -/// A Loom immutable pointer borrowed from a guard instance. -pub struct GuardDeref<'a, G: Guard> { - ptr: ConstPtr, - marker: PhantomData<(&'a G::Target, &'a G)>, -} -impl GuardDeref<'_, G> { - fn new(guard: &G) -> Self { - let ptr = guard.get().get(); - Self { ptr, marker: PhantomData } + impl GuardDeref<'_, G> { + fn new(guard: &G) -> Self { + let ptr = guard.get().get(); + Self { ptr, marker: PhantomData } + } } -} -impl Deref for GuardDeref<'_, G> { - type Target = G::Target; + impl Deref for GuardDeref<'_, G> { + type Target = G::Target; - fn deref(&self) -> &Self::Target { - // SAFETY: Our lifetime is bounded by the guard borrow. - unsafe { self.ptr.deref() } + fn deref(&self) -> &Self::Target { + // SAFETY: Our lifetime is bounded by the guard borrow. + unsafe { self.ptr.deref() } + } } -} -/// A Loom mutable pointer borrowed from a guard instance. -pub struct GuardDerefMut<'a, G: Guard> { - ptr: MutPtr, - marker: PhantomData<(&'a G::Target, &'a G)>, -} + /// A Loom mutable pointer borrowed from a guard instance. + pub struct GuardDerefMut<'a, G: Guard> { + ptr: MutPtr, + marker: PhantomData<(&'a mut G::Target, &'a mut G)>, + } -impl GuardDerefMut<'_, G> { - fn new(guard: &G) -> Self { - let ptr = guard.get().get_mut(); - Self { ptr, marker: PhantomData } + impl GuardDerefMut<'_, G> { + #[allow(clippy::needless_pass_by_ref_mut)] + fn new(guard: &mut G) -> Self { + let ptr = guard.get().get_mut(); + Self { ptr, marker: PhantomData } + } } -} -impl Deref for GuardDerefMut<'_, G> { - type Target = G::Target; + impl Deref for GuardDerefMut<'_, G> { + type Target = G::Target; - fn deref(&self) -> &Self::Target { - // SAFETY: Our lifetime is bounded by the guard borrow. - unsafe { self.ptr.deref() } + fn deref(&self) -> &Self::Target { + // SAFETY: Our lifetime is bounded by the guard borrow. + unsafe { self.ptr.deref() } + } } -} -impl DerefMut for GuardDerefMut<'_, G> { - fn deref_mut(&mut self) -> &mut Self::Target { - // SAFETY: Our lifetime is bounded by the guard borrow. - unsafe { self.ptr.deref() } + impl DerefMut for GuardDerefMut<'_, G> { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: Our lifetime is bounded by the guard borrow. + unsafe { self.ptr.deref() } + } } } @@ -85,7 +92,7 @@ pub mod models { use loom::sync::Arc; use loom::{model, thread}; - use crate::test::{Guard, LockWith}; + use crate::test::{AsDeref, AsDerefMut, LockThen, TryLockThen}; type Int = usize; // TODO: Three or more threads make lock models run for too long. It would @@ -96,72 +103,72 @@ pub mod models { const TRY_LOCKS: Int = 3; /// Increments a shared integer. - fn inc>(lock: &Arc) { - lock.lock_with(|guard| *guard.deref_mut() += 1); + fn inc>(lock: &Arc) { + lock.lock_then(|mut data| *data.as_deref_mut() += 1); } /// Tries to increment a shared integer. - fn try_inc>(lock: &Arc) { - lock.try_lock_with(|opt| opt.map(|guard| *guard.deref_mut() += 1)); + fn try_inc>(lock: &Arc) { + lock.try_lock_then(|opt| opt.map(|mut data| *data.as_deref_mut() += 1)); } /// Get the shared integer. - fn get>(lock: &Arc) -> Int { - lock.lock_with(|guard| *guard.deref()) + fn get>(lock: &Arc) -> Int { + lock.lock_then(|data| *data.as_deref()) } /// Evaluates that concurrent `try_lock` calls will serialize all mutations /// against the shared data, therefore no data races. - pub fn try_lock_join + 'static>() { + pub fn try_lock_join + 'static>() { model(|| { const RUNS: Int = TRY_LOCKS; - let data = Arc::new(L::new(0)); + let lock = Arc::new(L::new(0)); let handles: [_; RUNS] = array::from_fn(|_| { - let data = Arc::clone(&data); - thread::spawn(move || try_inc(&data)) + let lock = Arc::clone(&lock); + thread::spawn(move || try_inc(&lock)) }); for handle in handles { handle.join().unwrap(); } - let data = get(&data); - assert!((1..=RUNS).contains(&data)); + let value = get(&lock); + assert!((1..=RUNS).contains(&value)); }); } /// Evaluates that concurrent `lock` calls will serialize all mutations /// against the shared data, therefore no data races. - pub fn lock_join + 'static>() { + pub fn lock_join + 'static>() { model(|| { const RUNS: Int = LOCKS; - let data = Arc::new(L::new(0)); + let lock = Arc::new(L::new(0)); let handles: [_; RUNS] = array::from_fn(|_| { - let data = Arc::clone(&data); - thread::spawn(move || inc(&data)) + let lock = Arc::clone(&lock); + thread::spawn(move || inc(&lock)) }); for handle in handles { handle.join().unwrap(); } - let data = get(&data); - assert_eq!(RUNS, data); + let value = get(&lock); + assert_eq!(RUNS, value); }); } /// Evaluates that concurrent `lock` and `try_lock` calls will serialize /// all mutations against the shared data, therefore no data races. - pub fn mixed_lock_join + 'static>() { + pub fn mixed_lock_join + 'static>() { model(|| { const RUNS: Int = LOCKS; - let data = Arc::new(L::new(0)); + let lock = Arc::new(L::new(0)); let handles: [_; RUNS] = array::from_fn(|run| { - let data = Arc::clone(&data); + let lock = Arc::clone(&lock); let f = if run % 2 == 0 { inc } else { try_inc }; - thread::spawn(move || f(&data)) + thread::spawn(move || f(&lock)) }); for handle in handles { handle.join().unwrap(); } - let data = get(&data); - assert!((1..=RUNS).contains(&data)); + let value = get(&lock); + assert!((1..=RUNS).contains(&value)); }); } } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index f23e06a..1ea7164 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -1,46 +1,50 @@ -//! A MCS lock implementation that requires exclusive access to a locally -//! accessible queue node. +//! MCS lock implementation. //! //! The `raw` implementation of MCS lock is fair, that is, it guarantees that //! thread that have waited for longer will be scheduled first (FIFO). Each //! waiting thread will spin against its own, locally-accessible atomic lock //! state, which then avoids the network contention of the state access. //! -//! This module provides an implementation that is `no_std` compatible, but -//! also requires that queue nodes must be allocated by the callers. Queue +//! This module provides an implementation that is `no_std` compatible, and +//! it also requires that queue nodes must be allocated by the callers. Queue //! nodes are represented by the [`MutexNode`] type. //! -//! The lock is hold for as long as its associated RAII guard is in scope. Once -//! the guard is dropped, the mutex is freed. Mutex guards are returned by -//! [`lock`] and [`try_lock`]. Guards are also accessible as the closure argument -//! for [`lock_with`] and [`try_lock_with`] methods. +//! The lock is held for all the duration of the locking closure scope provided +//! to [`Mutex`]'s [`try_lock_then`], [`try_lock_with_then`], [`lock_then`] and +//! [`lock_with_then`] methods. //! -//! The Mutex is generic over the relax strategy. User may choose a strategy -//! as long as it implements the [`Relax`] trait. There is a number of strategies -//! provided by the [`relax`] module. Each module in `raw` provides type aliases -//! for [`Mutex`] and [`MutexGuard`] associated with one relax strategy. See -//! their documentation for more information. +//! This Mutex is generic over the relax policy. User may choose a policy as long +//! as it implements the [`Relax`] trait. //! -//! [`lock`]: Mutex::lock -//! [`try_lock`]: Mutex::try_lock -//! [`lock_with`]: Mutex::lock_with -//! [`try_lock_with`]: Mutex::try_lock_with +//! There is a number of relax policies provided by the [`relax`] module. The +//! following modules provide type aliases for [`Mutex`] associated with a relax +//! policy. See their documentation for more information. +//! +//! [`try_lock_then`]: Mutex::try_lock_then +//! [`try_lock_with_then`]: Mutex::try_lock_with_then +//! [`lock_then`]: Mutex::lock_then +//! [`lock_with_then`]: Mutex::lock_with_then //! [`relax`]: crate::relax //! [`Relax`]: crate::relax::Relax mod mutex; -pub use mutex::{Mutex, MutexGuard, MutexNode}; +pub use mutex::{Mutex, MutexNode}; #[cfg(feature = "thread_local")] -pub use crate::thread_local::LocalMutexNode; +#[cfg_attr(docsrs, doc(cfg(feature = "thread_local")))] +mod thread_local; +#[cfg(feature = "thread_local")] +pub use thread_local::LocalMutexNode; -/// A `raw` MCS lock alias that signals the processor that it is running a -/// busy-wait spin-loop during lock contention. +/// A MCS lock that implements a `spin` relax policy. +/// +/// During lock contention, this lock spins while signaling the processor that +/// it is running a busy-wait spin-loop. pub mod spins { use super::mutex; use crate::relax::Spin; - /// A `raw` MCS lock that implements the [`Spin`] relax strategy. + /// A [`raw::Mutex`] that implements the [`Spin`] relax policy. /// /// # Example /// @@ -49,22 +53,22 @@ pub mod spins { /// /// let mutex = Mutex::new(0); /// let mut node = MutexNode::new(); - /// let guard = mutex.lock(&mut node); - /// assert_eq!(*guard, 0); + /// let value = mutex.lock_with_then(&mut node, |data| *data); + /// assert_eq!(value, 0); /// ``` + /// [`raw::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `raw` MCS guard that implements the [`Spin`] relax strategy. - pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, Spin>; - - /// A `raw` MCS lock alias that, during lock contention, will perform - /// exponential backoff while signaling the processor that it is running a - /// busy-wait spin-loop. + /// A MCS lock that implements a `spin with backoff` relax policy. + /// + /// During lock contention, this lock will perform exponential backoff + /// while spinning, signaling the processor that it is running a busy-wait + /// spin-loop. pub mod backoff { use super::mutex; use crate::relax::SpinBackoff; - /// A `raw` MCS lock that implements the [`SpinBackoff`] relax strategy. + /// A [`raw::Mutex`] that implements the [`SpinBackoff`] relax policy. /// /// # Example /// @@ -73,25 +77,25 @@ pub mod spins { /// /// let mutex = Mutex::new(0); /// let mut node = MutexNode::new(); - /// let guard = mutex.lock(&mut node); - /// assert_eq!(*guard, 0); + /// let value = mutex.lock_with_then(&mut node, |data| *data); + /// assert_eq!(value, 0); /// ``` + /// [`raw::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - - /// A `raw` MCS guard that implements the [`SpinBackoff`] relax strategy. - pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, SpinBackoff>; } } -/// A `raw` MCS lock alias that yields the current time slice to the OS scheduler -/// during lock contention. +/// A MCS lock that implements a `yield` relax policy. +/// +/// During lock contention, this lock will yield the current time slice to the +/// OS scheduler. #[cfg(any(feature = "yield", loom, test))] #[cfg_attr(docsrs, doc(cfg(feature = "yield")))] pub mod yields { use super::mutex; use crate::relax::Yield; - /// A `raw` MCS lock that implements the [`Yield`] relax strategy. + /// A [`raw::Mutex`] that implements the [`Yield`] relax policy. /// /// # Example /// @@ -100,23 +104,21 @@ pub mod yields { /// /// let mutex = Mutex::new(0); /// let mut node = MutexNode::new(); - /// let guard = mutex.lock(&mut node); - /// assert_eq!(*guard, 0); + /// let value = mutex.lock_with_then(&mut node, |data| *data); + /// assert_eq!(value, 0); /// ``` + /// [`raw::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - /// A `raw` MCS guard that implements the [`Yield`] relax strategy. - pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, Yield>; - - /// A `raw` MCS lock alias that, during lock contention, will perform - /// exponential backoff while spinning up to a threshold, then yields - /// back to the OS scheduler. - #[cfg(feature = "yield")] + /// A MCS lock that implements a `yield with backoff` relax policy. + /// + /// During lock contention, this lock will perform exponential backoff while + /// spinning, up to a threshold, then yields back to the OS scheduler. pub mod backoff { use super::mutex; use crate::relax::YieldBackoff; - /// A `raw` MCS lock that implements the [`YieldBackoff`] relax strategy. + /// A [`raw::Mutex`] that implements the [`YieldBackoff`] relax policy. /// /// # Example /// @@ -125,23 +127,23 @@ pub mod yields { /// /// let mutex = Mutex::new(0); /// let mut node = MutexNode::new(); - /// let guard = mutex.lock(&mut node); - /// assert_eq!(*guard, 0); + /// let value = mutex.lock_with_then(&mut node, |data| *data); + /// assert_eq!(value, 0); /// ``` + /// [`raw::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - - /// A `raw` MCS guard that implements the [`YieldBackoff`] relax strategy. - pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, YieldBackoff>; } } -/// A `raw` MCS lock alias that rapidly spins without telling the CPU to do any -/// power down during lock contention. +/// A MCS lock that implements a `loop` relax policy. +/// +/// During lock contention, this lock will rapidly spin without telling the CPU +/// to do any power down. pub mod loops { use super::mutex; use crate::relax::Loop; - /// A `raw` MCS lock that implements the [`Loop`] relax strategy. + /// A [`raw::Mutex`] that implements the [`Loop`] relax policy. /// /// # Example /// @@ -150,11 +152,9 @@ pub mod loops { /// /// let mutex = Mutex::new(0); /// let mut node = MutexNode::new(); - /// let guard = mutex.lock(&mut node); - /// assert_eq!(*guard, 0); + /// let value = mutex.lock_with_then(&mut node, |data| *data); + /// assert_eq!(value, 0); /// ``` + /// [`raw::Mutex`]: mutex::Mutex pub type Mutex = mutex::Mutex; - - /// A `raw` MCS guard that implements the [`Loop`] relax strategy. - pub type MutexGuard<'a, T> = mutex::MutexGuard<'a, T, Loop>; } diff --git a/src/raw/mutex.rs b/src/raw/mutex.rs index 07e0f90..9a2ace3 100644 --- a/src/raw/mutex.rs +++ b/src/raw/mutex.rs @@ -1,61 +1,28 @@ -use core::fmt; -use core::marker::PhantomData; -use core::mem::MaybeUninit; -use core::ptr; -use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; - -use crate::cfg::atomic::{fence, AtomicBool, AtomicPtr}; -use crate::cfg::cell::{UnsafeCell, WithUnchecked}; -use crate::relax::Relax; - -/// The inner definition of [`MutexNode`], which is known to be in a initialized -/// state. -#[derive(Debug)] -struct MutexNodeInit { - next: AtomicPtr, - locked: AtomicBool, -} +use core::fmt::{self, Debug, Formatter}; +use core::ops::{Deref, DerefMut}; -impl MutexNodeInit { - /// Crates a new `MutexNodeInit` instance. - #[cfg(not(all(loom, test)))] - #[inline] - const fn new() -> Self { - let next = AtomicPtr::new(ptr::null_mut()); - let locked = AtomicBool::new(true); - Self { next, locked } - } - - /// Creates a new Loom based `MutexNodeInit` instance (non-const). - #[cfg(all(loom, test))] - #[cfg(not(tarpaulin_include))] - fn new() -> Self { - let next = AtomicPtr::new(ptr::null_mut()); - let locked = AtomicBool::new(true); - Self { next, locked } - } +use crate::cfg::atomic::AtomicBool; +use crate::inner::raw as inner; +use crate::relax::{Relax, RelaxWait}; - /// Returns a raw mutable pointer of this node. - const fn as_ptr(&self) -> *mut Self { - (self as *const Self).cast_mut() - } -} +#[cfg(test)] +use crate::test::{LockNew, LockThen, TryLockThen}; /// A locally-accessible record for forming the waiting queue. /// /// `MutexNode` is an opaque type that holds metadata for the [`Mutex`]'s /// waiting queue. To acquire a MCS lock, an instance of queue node must be -/// reachable and mutably borrowed for the duration of some associated -/// [`MutexGuard`]. Once the guard is dropped, a node instance can be reused as -/// the backing allocation for another lock acquisition. See [`lock`] and -/// [`try_lock`] methods on [`Mutex`]. +/// reachable and mutably borrowed for the duration of some associate locking +/// closure. Once the closure goes out of scope, a node instance can be reused +/// as the backing allocation for another lock acquisition. See [`lock_with_then`] +/// and [`try_lock_with_then`] methods on [`Mutex`]. /// -/// [`lock`]: Mutex::lock -/// [`try_lock`]: Mutex::try_lock -#[repr(transparent)] +/// [`lock_with_then`]: Mutex::lock_with_then +/// [`try_lock_with_then`]: Mutex::try_lock_with_then #[derive(Debug)] +#[repr(transparent)] pub struct MutexNode { - inner: MaybeUninit, + inner: inner::MutexNode, } impl MutexNode { @@ -71,17 +38,32 @@ impl MutexNode { #[must_use] #[inline(always)] pub const fn new() -> Self { - Self { inner: MaybeUninit::uninit() } + Self { inner: inner::MutexNode::new() } } +} + +#[cfg(not(tarpaulin_include))] +#[doc(hidden)] +impl Deref for MutexNode { + type Target = inner::MutexNode; - /// Initializes this node and returns a exclusive reference to the initialized - /// inner state. - fn initialize(&mut self) -> &mut MutexNodeInit { - self.inner.write(MutexNodeInit::new()) + #[inline(always)] + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +#[doc(hidden)] +impl DerefMut for MutexNode { + #[inline(always)] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner } } +#[cfg(not(tarpaulin_include))] impl Default for MutexNode { + #[inline(always)] fn default() -> Self { Self::new() } @@ -92,9 +74,10 @@ impl Default for MutexNode { /// This mutex will block threads waiting for the lock to become available. The /// mutex can also be statically initialized or created via a [`new`] /// constructor. Each mutex has a type parameter which represents the data that -/// it is protecting. The data can only be accessed through the RAII guards -/// returned from [`lock`] and [`try_lock`], which guarantees that the data is only -/// ever accessed when the mutex is locked. +/// it is protecting. The data can only be accessed through closure parameters +/// provided by [`lock_then`], [`lock_with_then`], [`try_lock_then`] and +/// [`try_lock_with_then`] that guarantees that the data is only ever accessed +/// when the mutex is locked. /// /// # Examples /// @@ -103,10 +86,10 @@ impl Default for MutexNode { /// use std::thread; /// use std::sync::mpsc::channel; /// -/// use mcslock::raw::{Mutex, MutexNode}; +/// use mcslock::raw::{self, MutexNode}; /// use mcslock::relax::Spin; /// -/// type SpinMutex = Mutex; +/// type Mutex = raw::Mutex; /// /// const N: usize = 10; /// @@ -115,7 +98,7 @@ impl Default for MutexNode { /// // /// // Here we're using an Arc to share memory among threads, and the data inside /// // the Arc is protected with a mutex. -/// let data = Arc::new(SpinMutex::new(0)); +/// let data = Arc::new(Mutex::new(0)); /// /// let (tx, rx) = channel(); /// for _ in 0..N { @@ -129,27 +112,28 @@ impl Default for MutexNode { /// // /// // We unwrap() the return value to assert that we are not expecting /// // threads to ever fail while holding the lock. -/// let mut data = data.lock(&mut node); -/// *data += 1; -/// if *data == N { -/// tx.send(()).unwrap(); -/// } -/// // the lock is unlocked here when `data` goes out of scope. +/// data.lock_with_then(&mut node, |data| { +/// *data += 1; +/// if *data == N { +/// tx.send(()).unwrap(); +/// } +/// // The lock is unlocked here at the end of the closure scope. +/// }); /// }); /// } /// /// rx.recv().unwrap(); /// ``` /// [`new`]: Mutex::new -/// [`lock`]: Mutex::lock -/// [`try_lock`]: Mutex::try_lock +/// [`lock_then`]: Mutex::lock_then +/// [`lock_with_then`]: Mutex::lock_with_then +/// [`try_lock_then`]: Mutex::try_lock_then +/// [`try_lock_with_then`]: Mutex::try_lock_with_then pub struct Mutex { - tail: AtomicPtr, - marker: PhantomData, - data: UnsafeCell, + pub(super) inner: inner::Mutex>, } -// Same unsafe impls as `std::sync::Mutex`. +// Same unsafe impls as `crate::inner::raw::Mutex`. unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} @@ -159,29 +143,25 @@ impl Mutex { /// # Examples /// /// ``` - /// use mcslock::raw::Mutex; + /// use mcslock::raw; /// use mcslock::relax::Spin; /// - /// type SpinMutex = Mutex; + /// type Mutex = raw::Mutex; /// - /// const MUTEX: SpinMutex = SpinMutex::new(0); - /// let mutex = SpinMutex::new(0); + /// const MUTEX: Mutex = Mutex::new(0); + /// let mutex = Mutex::new(0); /// ``` #[cfg(not(all(loom, test)))] #[inline] pub const fn new(value: T) -> Self { - let tail = AtomicPtr::new(ptr::null_mut()); - let data = UnsafeCell::new(value); - Self { tail, data, marker: PhantomData } + Self { inner: inner::Mutex::new(value) } } /// Creates a new unlocked mutex with Loom primitives (non-const). #[cfg(all(loom, test))] #[cfg(not(tarpaulin_include))] pub(crate) fn new(value: T) -> Self { - let tail = AtomicPtr::new(ptr::null_mut()); - let data = UnsafeCell::new(value); - Self { tail, data, marker: PhantomData } + Self { inner: inner::Mutex::new(value) } } /// Consumes this mutex, returning the underlying data. @@ -189,30 +169,32 @@ impl Mutex { /// # Examples /// /// ``` - /// use mcslock::raw::Mutex; + /// use mcslock::raw; /// use mcslock::relax::Spin; /// - /// type SpinMutex = Mutex; + /// type Mutex = raw::Mutex; /// - /// let mutex = SpinMutex::new(0); + /// let mutex = Mutex::new(0); /// assert_eq!(mutex.into_inner(), 0); /// ``` #[inline(always)] pub fn into_inner(self) -> T { - self.data.into_inner() + self.inner.into_inner() } } impl Mutex { - /// Attempts to acquire this mutex without blocking the thread. + /// Attempts to acquire this mutex and then runs a closure against the + /// proteced data. /// /// If the lock could not be acquired at this time, then [`None`] is returned. - /// Otherwise, an RAII guard is returned. The lock will be unlocked when the - /// guard is dropped. + /// Otherwise, an [`Some`] with a `&mut T` is returned. The lock will be + /// unlocked once the closure goes out of scope. /// - /// To acquire a MCS lock through this function, it's also required a mutably - /// borrowed queue node, which is a record that keeps a link for forming the - /// queue, see [`MutexNode`]. + /// This function transparently allocates a [`MutexNode`] in the stack for + /// each call, and so it will not reuse the same node for other calls. + /// Consider callig [`try_lock_with_then`] if you want to reuse node + /// allocations. /// /// This function does not block. /// @@ -222,106 +204,119 @@ impl Mutex { /// use std::sync::Arc; /// use std::thread; /// - /// use mcslock::raw::{Mutex, MutexNode}; + /// use mcslock::raw; /// use mcslock::relax::Spin; /// - /// type SpinMutex = Mutex; + /// type Mutex = raw::Mutex; /// - /// let mutex = Arc::new(SpinMutex::new(0)); + /// let mutex = Arc::new(Mutex::new(0)); /// let c_mutex = Arc::clone(&mutex); /// /// thread::spawn(move || { - /// let mut node = MutexNode::new(); - /// let mut guard = c_mutex.try_lock(&mut node); - /// if let Some(mut guard) = guard { - /// *guard = 10; - /// } else { - /// println!("try_lock failed"); - /// } + /// c_mutex.try_lock_then(|data| { + /// if let Some(data) = data { + /// *data = 10; + /// } else { + /// println!("try_lock failed"); + /// } + /// }); /// }) /// .join().expect("thread::spawn failed"); /// - /// let mut node = MutexNode::new(); - /// assert_eq!(*mutex.lock(&mut node), 10); + /// let value = mutex.lock_then(|data| *data); + /// assert_eq!(value, 10); + /// ``` + /// + /// Compile fail: borrows of the data cannot escape the given closure: + /// + /// ```compile_fail,E0515 + /// use mcslock::raw::spins::Mutex; + /// + /// let mutex = Mutex::new(1); + /// let borrow = mutex.try_lock_then(|data| &*data.unwrap()); /// ``` + /// [`try_lock_with_then`]: Mutex::try_lock_with_then #[inline] - pub fn try_lock<'a>(&'a self, node: &'a mut MutexNode) -> Option> { - let node = node.initialize(); - self.tail - .compare_exchange(ptr::null_mut(), node.as_ptr(), AcqRel, Relaxed) - .map(|_| MutexGuard::new(self, node)) - .ok() + pub fn try_lock_then(&self, f: F) -> Ret + where + F: FnOnce(Option<&mut T>) -> Ret, + { + let mut node = MutexNode::new(); + self.try_lock_with_then(&mut node, f) } - /// Attempts to acquire this mutex and then runs a closure against its guard. + /// Attempts to acquire this mutex and then runs a closure against the + /// protected data. /// - /// If the lock could not be acquired at this time, then a [`None`] value is - /// given back as the closure argument. If the lock has been acquired, then - /// a [`Some`] value with the mutex guard is given instead. The lock will be - /// unlocked when the guard is dropped. + /// If the lock could not be acquired at this time, then [`None`] is returned. + /// Otherwise, an [`Some`] with a `&mut T` is returned. The lock will be + /// unlocked once the closure goes out of scope. /// - /// This function instantiates a [`MutexNode`] for each call, which is - /// convenient for one-liners by not particularly efficient on hot paths. - /// If that is your use case, consider calling [`try_lock`] in busy loops - /// while reusing one single node allocation. + /// To acquire a MCS lock through this function, it's also required a mutably + /// borrowed queue node, which is a record that keeps a link for forming the + /// queue, see [`MutexNode`]. /// /// This function does not block. /// + /// # Examples + /// /// ``` /// use std::sync::Arc; /// use std::thread; /// - /// use mcslock::raw::Mutex; + /// use mcslock::raw::{self, MutexNode}; /// use mcslock::relax::Spin; /// - /// type SpinMutex = Mutex; + /// type Mutex = raw::Mutex; /// - /// let mutex = Arc::new(SpinMutex::new(0)); + /// let mutex = Arc::new(Mutex::new(0)); /// let c_mutex = Arc::clone(&mutex); /// /// thread::spawn(move || { - /// c_mutex.try_lock_with(|guard| { - /// if let Some(mut guard) = guard { - /// *guard = 10; + /// let mut node = MutexNode::new(); + /// c_mutex.try_lock_with_then(&mut node, |data| { + /// if let Some(data) = data { + /// *data = 10; /// } else { - /// println!("try_lock_with failed"); + /// println!("try_lock failed"); /// } /// }); /// }) /// .join().expect("thread::spawn failed"); /// - /// assert_eq!(mutex.lock_with(|guard| *guard), 10); + /// let mut node = MutexNode::new(); + /// let value = mutex.lock_with_then(&mut node, |data| *data); + /// assert_eq!(value, 10); /// ``` /// - /// Compile fail: borrows of the guard or its data cannot escape the given - /// closure: + /// Compile fail: borrows of the data cannot escape the given closure: /// /// ```compile_fail,E0515 - /// use mcslock::raw::spins::Mutex; + /// use mcslock::raw::{spins::Mutex, MutexNode}; /// /// let mutex = Mutex::new(1); - /// let data = mutex.try_lock_with(|guard| &*guard.unwrap()); + /// let mut node = MutexNode::new(); + /// let borrow = mutex.try_lock_with_then(&mut node, |data| &*data.unwrap()); /// ``` - /// [`try_lock`]: Mutex::try_lock #[inline] - pub fn try_lock_with(&self, f: F) -> Ret + pub fn try_lock_with_then<'a, F, Ret>(&'a self, node: &'a mut MutexNode, f: F) -> Ret where - F: FnOnce(Option>) -> Ret, + F: FnOnce(Option<&mut T>) -> Ret, { - let mut node = MutexNode::new(); - f(self.try_lock(&mut node)) + self.inner.try_lock_with_then(&mut node.inner, f) } - /// Acquires this mutex, blocking the current thread until it is able to do so. + /// Acquires this mutex and then runs the closure against the protected data. /// /// This function will block the local thread until it is available to acquire - /// the mutex. Upon returning, the thread is the only thread with the lock - /// held. An RAII guard is returned to allow scoped unlock of the lock. When - /// the guard goes out of scope, the mutex will be unlocked. + /// the mutex. Upon acquiring the mutex, the user provided closure will be + /// executed against the mutex proteced data. Once the closure goes out of + /// scope, it will unlock the mutex. /// - /// To acquire a MCS lock through this function, it's also required a mutably - /// borrowed queue node, which is a record that keeps a link for forming the - /// queue, see [`MutexNode`]. + /// This function transparently allocates a [`MutexNode`] in the stack for + /// each call, and so it will not reuse the same node for other calls. + /// Consider callig [`lock_with_then`] if you want to reuse node + /// allocations. /// /// This function will block if the lock is unavailable. /// @@ -331,51 +326,50 @@ impl Mutex { /// use std::sync::Arc; /// use std::thread; /// - /// use mcslock::raw::{Mutex, MutexNode}; + /// use mcslock::raw; /// use mcslock::relax::Spin; /// - /// type SpinMutex = Mutex; + /// type Mutex = raw::Mutex; /// - /// let mutex = Arc::new(SpinMutex::new(0)); + /// let mutex = Arc::new(Mutex::new(0)); /// let c_mutex = Arc::clone(&mutex); /// /// thread::spawn(move || { - /// let mut node = MutexNode::new(); - /// *c_mutex.lock(&mut node) = 10; + /// c_mutex.lock_then(|data| *data = 10); /// }) /// .join().expect("thread::spawn failed"); /// - /// let mut node = MutexNode::new(); - /// assert_eq!(*mutex.lock(&mut node), 10); + /// assert_eq!(mutex.lock_then(|data| *data), 10); + /// ``` + /// + /// Compile fail: borrows of the data cannot escape the given closure: + /// + /// ```compile_fail,E0515 + /// use mcslock::raw::spins::Mutex; + /// + /// let mutex = Mutex::new(1); + /// let borrow = mutex.lock_then(|data| &*data); /// ``` + /// [`lock_with_then`]: Mutex::lock_with_then #[inline] - pub fn lock<'a>(&'a self, node: &'a mut MutexNode) -> MutexGuard<'a, T, R> { - let node = node.initialize(); - let pred = self.tail.swap(node.as_ptr(), AcqRel); - // If we have a predecessor, complete the link so it will notify us. - if !pred.is_null() { - // SAFETY: Already verified that predecessor is not null. - unsafe { &*pred }.next.store(node.as_ptr(), Release); - let mut relax = R::new(); - while node.locked.load(Relaxed) { - relax.relax(); - } - fence(Acquire); - } - MutexGuard::new(self, node) - } - - /// Acquires this mutex and then runs the closure against its guard. + pub fn lock_then(&self, f: F) -> Ret + where + F: FnOnce(&mut T) -> Ret, + { + let mut node = MutexNode::new(); + self.lock_with_then(&mut node, f) + } + + /// Acquires this mutex and then runs the closure against the proteced data. /// /// This function will block the local thread until it is available to acquire /// the mutex. Upon acquiring the mutex, the user provided closure will be - /// executed against the mutex guard. Once the guard goes out of scope, it - /// will unlock the mutex. + /// executed against the mutex proteced data. Once the closure goes out of + /// scope, it will unlock the mutex. /// - /// This function instantiates a [`MutexNode`] for each call, which is - /// convenient for one-liners by not particularly efficient on hot paths. - /// If that is your use case, consider calling [`lock`] in the busy loop - /// while reusing one single node allocation. + /// To acquire a MCS lock through this function, it's also required a mutably + /// borrowed queue node, which is a record that keeps a link for forming the + /// queue, see [`MutexNode`]. /// /// This function will block if the lock is unavailable. /// @@ -385,61 +379,39 @@ impl Mutex { /// use std::sync::Arc; /// use std::thread; /// - /// use mcslock::raw::Mutex; + /// use mcslock::raw::{self, MutexNode}; /// use mcslock::relax::Spin; /// - /// type SpinMutex = Mutex; + /// type Mutex = raw::Mutex; /// - /// let mutex = Arc::new(SpinMutex::new(0)); + /// let mutex = Arc::new(Mutex::new(0)); /// let c_mutex = Arc::clone(&mutex); /// /// thread::spawn(move || { - /// c_mutex.lock_with(|mut guard| *guard = 10); + /// let mut node = MutexNode::new(); + /// c_mutex.lock_with_then(&mut node, |data| *data = 10); /// }) /// .join().expect("thread::spawn failed"); /// - /// assert_eq!(mutex.lock_with(|guard| *guard), 10); + /// let mut node = MutexNode::new(); + /// assert_eq!(mutex.lock_with_then(&mut node, |data| *data), 10); /// ``` /// - /// Compile fail: borrows of the guard or its data cannot escape the given - /// closure: + /// Compile fail: borrows of the data cannot escape the given closure: /// /// ```compile_fail,E0515 - /// use mcslock::raw::spins::Mutex; + /// use mcslock::raw::{spins::Mutex, MutexNode}; /// /// let mutex = Mutex::new(1); - /// let data = mutex.lock_with(|guard| &*guard); + /// let mut node = MutexNode::new(); + /// let borrow = mutex.lock_with_then(&mut node, |data| &*data); /// ``` - /// [`lock`]: Mutex::lock #[inline] - pub fn lock_with(&self, f: F) -> Ret + pub fn lock_with_then<'a, F, Ret>(&'a self, node: &'a mut MutexNode, f: F) -> Ret where - F: FnOnce(MutexGuard<'_, T, R>) -> Ret, + F: FnOnce(&mut T) -> Ret, { - let mut node = MutexNode::new(); - f(self.lock(&mut node)) - } - - /// Unlocks this mutex. If there is a successor node in the queue, the lock - /// is passed directly to them. - fn unlock(&self, node: &MutexNodeInit) { - let mut next = node.next.load(Relaxed); - // If we don't have a known successor currently, - if next.is_null() { - // and we are the tail, then dequeue and free the lock. - let false = self.try_unlock(node.as_ptr()) else { return }; - // But if we are not the tail, then we have a pending successor. We - // must wait for them to finish linking with us. - let mut relax = R::new(); - loop { - next = node.next.load(Relaxed); - let true = next.is_null() else { break }; - relax.relax(); - } - } - fence(Acquire); - // SAFETY: We already verified that our successor is not null. - unsafe { &*next }.locked.store(false, Release); + self.inner.lock_with_then(&mut node.inner, f) } } @@ -452,23 +424,23 @@ impl Mutex { /// # Example /// /// ``` - /// use mcslock::raw::{Mutex, MutexNode}; + /// use mcslock::raw; /// use mcslock::relax::Spin; /// - /// type SpinMutex = Mutex; + /// type Mutex = raw::Mutex; /// - /// let mutex = SpinMutex::new(0); - /// let mut node = MutexNode::new(); + /// let mutex = Mutex::new(0); /// - /// let guard = mutex.lock(&mut node); - /// drop(guard); + /// mutex.lock_then(|_data| { + /// assert_eq!(mutex.is_locked(), true); + /// }); /// /// assert_eq!(mutex.is_locked(), false); /// ``` #[inline] pub fn is_locked(&self) -> bool { // Relaxed is sufficient because this method only guarantees atomicity. - !self.tail.load(Relaxed).is_null() + self.inner.is_locked() } /// Returns a mutable reference to the underlying data. @@ -479,31 +451,24 @@ impl Mutex { /// # Examples /// /// ``` - /// use mcslock::raw::{Mutex, MutexNode}; + /// use mcslock::raw; /// use mcslock::relax::Spin; /// - /// type SpinMutex = Mutex; + /// type Mutex = raw::Mutex; /// - /// let mut mutex = SpinMutex::new(0); + /// let mut mutex = Mutex::new(0); /// *mutex.get_mut() = 10; /// - /// let mut node = MutexNode::new(); - /// assert_eq!(*mutex.lock(&mut node), 10); + /// assert_eq!(mutex.lock_then(|data| *data), 10); /// ``` #[cfg(not(all(loom, test)))] #[inline(always)] pub fn get_mut(&mut self) -> &mut T { - // SAFETY: We hold exclusive access to the Mutex data. - unsafe { &mut *self.data.get() } - } - - /// Unlocks the lock if the candidate node is the queue's tail. - fn try_unlock(&self, node: *mut MutexNodeInit) -> bool { - self.tail.compare_exchange(node, ptr::null_mut(), Release, Relaxed).is_ok() + self.inner.get_mut() } } -impl Default for Mutex { +impl Default for Mutex { /// Creates a `Mutex`, with the `Default` value for `T`. #[inline] fn default() -> Self { @@ -519,20 +484,14 @@ impl From for Mutex { } } -impl fmt::Debug for Mutex { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut node = MutexNode::new(); - let mut d = f.debug_struct("Mutex"); - match self.try_lock(&mut node) { - Some(guard) => guard.with(|data| d.field("data", &data)), - None => d.field("data", &format_args!("")), - }; - d.finish() +impl Debug for Mutex { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) } } #[cfg(test)] -impl crate::test::LockNew for Mutex { +impl LockNew for Mutex { type Target = T; fn new(value: Self::Target) -> Self @@ -544,24 +503,27 @@ impl crate::test::LockNew for Mutex { } #[cfg(test)] -impl crate::test::LockWith for Mutex { - type Guard<'a> = MutexGuard<'a, Self::Target, R> +impl LockThen for Mutex { + type Guard<'a> = &'a mut Self::Target where Self: 'a, Self::Target: 'a; - fn try_lock_with(&self, f: F) -> Ret + fn lock_then(&self, f: F) -> Ret where - F: FnOnce(Option>) -> Ret, + F: FnOnce(&mut Self::Target) -> Ret, { - self.try_lock_with(f) + self.lock_then(f) } +} - fn lock_with(&self, f: F) -> Ret +#[cfg(test)] +impl TryLockThen for Mutex { + fn try_lock_then(&self, f: F) -> Ret where - F: FnOnce(MutexGuard<'_, T, R>) -> Ret, + F: FnOnce(Option<&mut Self::Target>) -> Ret, { - self.lock_with(f) + self.try_lock_then(f) } fn is_locked(&self) -> bool { @@ -583,142 +545,34 @@ impl crate::test::LockData for Mutex { } } -/// An RAII implementation of a "scoped lock" of a mutex. When this structure is -/// dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be access through this guard via its -/// [`Deref`] and [`DerefMut`] implementations. -/// -/// This structure is returned by [`lock`] and [`try_lock`] methods on [`Mutex`]. -/// It is also given as closure argument by [`lock_with`] and [`try_lock_with`] -/// methods. -/// -/// [`Deref`]: core::ops::Deref -/// [`DerefMut`]: core::ops::DerefMut -/// [`lock`]: Mutex::lock -/// [`try_lock`]: Mutex::lock -/// [`lock_with`]: Mutex::lock_with -/// [`try_lock_with`]: Mutex::try_lock_with -#[must_use = "if unused the Mutex will immediately unlock"] -pub struct MutexGuard<'a, T: ?Sized, R: Relax> { - lock: &'a Mutex, - node: &'a MutexNodeInit, -} - -// `std::sync::MutexGuard` is not Send for pthread compatibility, but this -// implementation is safe to be Send. -unsafe impl Send for MutexGuard<'_, T, R> {} -// Same unsafe Sync impl as `std::sync::MutexGuard`. -unsafe impl Sync for MutexGuard<'_, T, R> {} - -impl<'a, T: ?Sized, R: Relax> MutexGuard<'a, T, R> { - /// Creates a new `MutexGuard` instance. - const fn new(lock: &'a Mutex, node: &'a MutexNodeInit) -> Self { - Self { lock, node } - } - - /// Runs `f` against an shared reference pointing to the underlying data. - fn with(&self, f: F) -> Ret - where - F: FnOnce(&T) -> Ret, - { - // SAFETY: A guard instance holds the lock locked. - unsafe { self.lock.data.with_unchecked(f) } - } -} - -impl<'a, T: ?Sized, R: Relax> Drop for MutexGuard<'a, T, R> { - #[inline] - fn drop(&mut self) { - self.lock.unlock(self.node); - } -} - -impl<'a, T: ?Sized + fmt::Debug, R: Relax> fmt::Debug for MutexGuard<'a, T, R> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.with(|data| fmt::Debug::fmt(data, f)) - } -} - -impl<'a, T: ?Sized + fmt::Display, R: Relax> fmt::Display for MutexGuard<'a, T, R> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.with(|data| fmt::Display::fmt(data, f)) - } -} - -#[cfg(not(all(loom, test)))] -impl<'a, T: ?Sized, R: Relax> core::ops::Deref for MutexGuard<'a, T, R> { - type Target = T; - - /// Dereferences the guard to access the underlying data. - #[inline(always)] - fn deref(&self) -> &T { - // SAFETY: A guard instance holds the lock locked. - unsafe { &*self.lock.data.get() } - } -} - -#[cfg(not(all(loom, test)))] -impl<'a, T: ?Sized, R: Relax> core::ops::DerefMut for MutexGuard<'a, T, R> { - /// Mutably dereferences the guard to access the underlying data. - #[inline(always)] - fn deref_mut(&mut self) -> &mut T { - // SAFETY: A guard instance holds the lock locked. - unsafe { &mut *self.lock.data.get() } - } -} - -/// SAFETY: A guard instance hold the lock locked, with exclusive access to the -/// underlying data. -#[cfg(all(loom, test))] -#[cfg(not(tarpaulin_include))] -unsafe impl crate::loom::Guard for MutexGuard<'_, T, R> { - type Target = T; - - fn get(&self) -> &loom::cell::UnsafeCell { - &self.lock.data - } -} - #[cfg(all(not(loom), test))] mod test { - use super::{MutexNode, MutexNodeInit}; - use crate::raw::yields::Mutex; use crate::test::tests; #[test] - fn node_drop_does_not_matter() { - assert!(!core::mem::needs_drop::()); - assert!(!core::mem::needs_drop::()); + fn node_waiter_drop_does_not_matter() { + tests::node_waiter_drop_does_not_matter::(); } #[test] - fn node_default_and_new_init() { - let mut d = MutexNode::default(); - let d_init = d.initialize(); - assert!(d_init.next.get_mut().is_null()); - assert!(*d_init.locked.get_mut()); - - let mut n = MutexNode::new(); - let n_init = n.initialize(); - assert!(n_init.next.get_mut().is_null()); - assert!(*n_init.locked.get_mut()); + fn lots_and_lots_lock() { + tests::lots_and_lots_lock::>(); } #[test] - fn lots_and_lots() { - tests::lots_and_lots::>(); + fn lots_and_lots_try_lock() { + tests::lots_and_lots_try_lock::>(); } #[test] - fn smoke() { - tests::smoke::>(); + fn lots_and_lots_mixed_lock() { + tests::lots_and_lots_mixed_lock::>(); } #[test] - fn test_guard_debug_display() { - tests::test_guard_debug_display::>(); + fn smoke() { + tests::smoke::>(); } #[test] diff --git a/src/raw/thread_local.rs b/src/raw/thread_local.rs new file mode 100644 index 0000000..0eba7ed --- /dev/null +++ b/src/raw/thread_local.rs @@ -0,0 +1,742 @@ +use core::cell::RefCell; + +use super::{Mutex, MutexNode}; +use crate::cfg::thread::LocalKey; +use crate::inner::raw as inner; +use crate::relax::Relax; + +#[cfg(test)] +use crate::test::{LockNew, LockThen, TryLockThen}; + +type Key = &'static LocalMutexNode; + +/// Declares a new [`raw::LocalMutexNode`] key, which is a handle to the thread +/// local node of the currently running thread. +/// +/// The macro wraps any number of static declarations and make them thread +/// local. Each provided name is associated with a single thread local key. The +/// keys are wrapped and managed by the [`LocalMutexNode`] type, which are the +/// actual handles meant to be used with the `lock_with_local_then` API family +/// from [`raw::Mutex`]. Handles are provided by reference to functions. +/// +/// See: [`try_lock_with_local_then`], [`lock_with_local_then`], +/// [`try_lock_with_local_then_unchecked`] or [`lock_with_local_then_unchecked`]. +/// +/// The thread local node definition generated by this macro avoids lazy +/// initialization and does not need to be dropped, which enables a more +/// efficient underlying implementation. See [`std::thread_local!`] macro. +/// +/// # Sintax +/// +/// * Allows multiple static definitions, must be separated with semicolons. +/// * Visibility is optional (private by default). +/// * Requires `static` keyword and a **UPPER_SNAKE_CASE** name. +/// +/// # Example +/// +/// ``` +/// use mcslock::raw::spins::Mutex; +/// +/// // Multiple difenitions. +/// mcslock::thread_local_node! { +/// pub static NODE; +/// static OTHER_NODE1; +/// } +/// +/// // Single definition. +/// mcslock::thread_local_node!(pub static OTHER_NODE2); +/// +/// let mutex = Mutex::new(0); +/// // Keys are provided to APIs by reference. +/// mutex.lock_with_local_then(&NODE, |data| *data = 10); +/// assert_eq!(mutex.lock_with_local_then(&NODE, |data| *data), 10); +/// ``` +/// [`raw::Mutex`]: Mutex +/// [`raw::LocalMutexNode`]: LocalMutexNode +/// [`std::thread_local!`]: https://doc.rust-lang.org/std/macro.thread_local.html +/// [`try_lock_with_local_then`]: Mutex::try_lock_with_local_then +/// [`lock_with_local_then`]: Mutex::lock_with_local_then +/// [`try_lock_with_local_then_unchecked`]: Mutex::try_lock_with_local_then_unchecked +/// [`lock_with_local_then_unchecked`]: Mutex::lock_with_local_then_unchecked +#[macro_export] +macro_rules! thread_local_node { + // Empty (base for recursion). + () => {}; + // Process multiply definitions (recursive). + ($vis:vis static $node:ident; $($rest:tt)*) => { + $crate::__thread_local_node_inner!($vis $node, raw); + $crate::thread_local_node!($($rest)*); + }; + // Process single declaration. + ($vis:vis static $node:ident) => { + $crate::__thread_local_node_inner!($vis $node, raw); + }; +} + +/// A handle to a [`MutexNode`] stored at the thread local storage. +/// +/// Thread local nodes can be claimed for temporary, exclusive access during +/// runtime for locking purposes. Node handles refer to the node stored at +/// the current running thread. +/// +/// Just like `MutexNode`, this is an opaque type that holds metadata for the +/// [`raw::Mutex`]'s waiting queue. You must declare a thread local node with +/// the [`thread_local_node!`] macro, and provide the generated handle to the +/// appropriate [`raw::Mutex`] locking APIs. Attempting to lock a mutex with a +/// thread local node that already is in use for the locking thread will cause +/// a panic. Handles are provided by reference to functions. +/// +/// See: [`try_lock_with_local_then`], [`lock_with_local_then`], +/// [`try_lock_with_local_then_unchecked`] or [`lock_with_local_then_unchecked`]. +/// +/// [`MutexNode`]: MutexNode +/// [`raw::Mutex`]: Mutex +/// [`try_lock_with_local_then`]: Mutex::try_lock_with_local_then +/// [`lock_with_local_then`]: Mutex::lock_with_local_then +/// [`try_lock_with_local_then_unchecked`]: Mutex::try_lock_with_local_then_unchecked +/// [`lock_with_local_then_unchecked`]: Mutex::lock_with_local_then_unchecked +#[derive(Debug)] +#[repr(transparent)] +pub struct LocalMutexNode { + inner: inner::LocalMutexNode, +} + +#[cfg(not(tarpaulin_include))] +impl LocalMutexNode { + /// Creates a new `LocalMutexNode` key from the provided thread local node + /// key. + /// + /// This function is **NOT** part of the public API and so must not be + /// called directly by user's code. It is subjected to changes **WITHOUT** + /// prior notice or accompanied with relevant SemVer changes. + #[cfg(not(all(loom, test)))] + #[doc(hidden)] + #[must_use] + #[inline(always)] + pub const fn __new(key: LocalKey>) -> Self { + let inner = inner::LocalMutexNode::new(key); + Self { inner } + } + + /// Creates a new Loom based `LocalMutexNode` key from the provided thread + /// local node key. + #[cfg(all(loom, test))] + #[must_use] + pub(crate) const fn new(key: &'static LocalKey>) -> Self { + let inner = inner::LocalMutexNode::new(key); + Self { inner } + } +} + +impl Mutex { + /// Attempts to acquire this mutex and then runs a closure against the + /// protected data. + /// + /// If the lock could not be acquired at this time, then a [`None`] value is + /// given back as the closure argument. If the lock has been acquired, then + /// a [`Some`] value with the mutex proteced data is given instead. The lock + /// will be unlocked when the closure scope ends. + /// + /// To acquire a MCS lock through this function, it's also required a + /// queue node, which is a record that keeps a link for forming the queue, + /// to be stored in the current locking thread local storage. See + /// [`LocalMutexNode`] and [`thread_local_node!`]. + /// + /// This function does not block. + /// + /// # Panics + /// + /// Will panic if the thread local node is already mutably borrowed. + /// + /// Panics if the key currently has its destructor running, and it **may** + /// panic if the destructor has previously been run for this thread. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::thread; + /// + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Arc::new(Mutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || { + /// c_mutex.try_lock_with_local_then(&NODE, |data| { + /// if let Some(data) = data { + /// *data = 10; + /// } else { + /// println!("try_lock_with_local_then failed"); + /// } + /// }); + /// }) + /// .join().expect("thread::spawn failed"); + /// + /// assert_eq!(mutex.lock_with_local_then(&NODE, |data| *data), 10); + /// ``` + /// + /// Compile fail: borrows of the data cannot escape the given closure: + /// + /// ```compile_fail,E0515 + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(1); + /// let borrow = mutex.try_lock_with_local_then(&NODE, |data| &*data.unwrap()); + /// ``` + /// + /// Panic: thread local node cannot be borrowed more than once at the same + /// time: + /// + #[doc = concat!("```should_panic,", already_borrowed_error!())] + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(0); + /// + /// mutex.lock_with_local_then(&NODE, |_data| { + /// // `NODE` is already mutably borrowed in this thread by the + /// // enclosing `lock_with_local_then`, the borrow is live for the full + /// // duration of this closure scope. + /// let mutex = Mutex::new(()); + /// mutex.try_lock_with_local_then(&NODE, |_data| ()); + /// }); + /// ``` + #[inline] + #[track_caller] + pub fn try_lock_with_local_then(&self, node: Key, f: F) -> Ret + where + F: FnOnce(Option<&mut T>) -> Ret, + { + self.inner.try_lock_with_local_then(&node.inner, f) + } + + /// Attempts to acquire this mutex and then runs a closure against the + /// protected data. + /// + /// If the lock could not be acquired at this time, then a [`None`] value is + /// given back as the closure argument. If the lock has been acquired, then + /// a [`Some`] value with the mutex protected data is given instead. The lock + /// will be unlocked when the closure scope ends. + /// + /// To acquire a MCS lock through this function, it's also required a + /// queue node, which is a record that keeps a link for forming the queue, + /// to be stored in the current locking thread local storage. See + /// [`LocalMutexNode`] and [`thread_local_node!`]. + /// + /// This function does not block. + /// + /// # Safety + /// + /// Unlike [`try_lock_with_local_then`], this method is unsafe because it does + /// not check if the current thread local node is already mutably borrowed. + /// If the current thread local node is already borrowed, calling this + /// function is undefined behavior. + /// + /// # Panics + /// + /// Panics if the key currently has its destructor running, and it **may** + /// panic if the destructor has previously been run for this thread. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::thread; + /// + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Arc::new(Mutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || unsafe { + /// c_mutex.try_lock_with_local_then_unchecked(&NODE, |data| { + /// if let Some(data) = data { + /// *data = 10; + /// } else { + /// println!("try_lock_with_local_then_unchecked failed"); + /// } + /// }); + /// }) + /// .join().expect("thread::spawn failed"); + /// + /// assert_eq!(mutex.lock_with_local_then(&NODE, |d| *d), 10); + /// ``` + /// + /// Compile fail: borrows of the data cannot escape the given closure: + /// + /// ```compile_fail,E0515 + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(1); + /// let data = unsafe { + /// mutex.try_lock_with_local_then_unchecked(&NODE, |g| &*g.unwrap()) + /// }; + /// ``` + /// + /// Undefined behavior: thread local node cannot be borrowed more than once + /// at the same time: + /// + /// ```no_run + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(0); + /// + /// mutex.lock_with_local_then(&NODE, |_data| unsafe { + /// // UB: `NODE` is already mutably borrowed in this thread by the + /// // enclosing `lock_with_local_then`, the borrow is live for the full + /// // duration of this closure scope. + /// let mutex = Mutex::new(()); + /// mutex.try_lock_with_local_then_unchecked(&NODE, |_data| ()); + /// }); + /// ``` + /// [`try_lock_with_local_then`]: Mutex::try_lock_with_local_then + #[inline] + pub unsafe fn try_lock_with_local_then_unchecked(&self, node: Key, f: F) -> Ret + where + F: FnOnce(Option<&mut T>) -> Ret, + { + self.inner.try_lock_with_local_then_unchecked(&node.inner, f) + } + + /// Acquires this mutex and then runs the closure against the protected data. + /// + /// This function will block the local thread until it is available to acquire + /// the mutex. Upon acquiring the mutex, the user provided closure will be + /// executed against the mutex protected data. Once the closure goes out of + /// scope, it will unlock the mutex. + /// + /// To acquire a MCS lock through this function, it's also required a + /// queue node, which is a record that keeps a link for forming the queue, + /// to be stored in the current locking thread local storage. See + /// [`LocalMutexNode`] and [`thread_local_node!`]. + /// + /// This function will block if the lock is unavailable. + /// + /// # Panics + /// + /// Will panic if the thread local node is already mutably borrowed. + /// + /// Panics if the key currently has its destructor running, and it **may** + /// panic if the destructor has previously been run for this thread. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::thread; + /// + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Arc::new(Mutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || { + /// c_mutex.lock_with_local_then(&NODE, |data| *data = 10); + /// }) + /// .join().expect("thread::spawn failed"); + /// + /// assert_eq!(mutex.lock_with_local_then(&NODE, |data| *data), 10); + /// ``` + /// + /// Compile fail: borrows of the data cannot escape the given closure: + /// + /// ```compile_fail,E0515 + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(1); + /// let borrow = mutex.lock_with_local_then(&NODE, |data| &*data); + /// ``` + /// + /// Panic: thread local node cannot be borrowed more than once at the same + /// time: + /// + #[doc = concat!("```should_panic,", already_borrowed_error!())] + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(0); + /// + /// mutex.lock_with_local_then(&NODE, |_data| { + /// // `NODE` is already mutably borrowed in this thread by the + /// // enclosing `lock_with_local_then`, the borrow is live for the full + /// // duration of this closure scope. + /// let mutex = Mutex::new(()); + /// mutex.lock_with_local_then(&NODE, |_data| ()); + /// }); + /// ``` + #[inline] + #[track_caller] + pub fn lock_with_local_then(&self, node: Key, f: F) -> Ret + where + F: FnOnce(&mut T) -> Ret, + { + self.inner.lock_with_local_then(&node.inner, f) + } + + /// Acquires this mutex and then runs the closure against the protected data. + /// + /// This function will block the local thread until it is available to acquire + /// the mutex. Upon acquiring the mutex, the user provided closure will be + /// executed against the mutex protected data. Once the closure goes out of + /// scope, it will unlock the mutex. + /// + /// To acquire a MCS lock through this function, it's also required a + /// queue node, which is a record that keeps a link for forming the queue, + /// to be stored in the current locking thread local storage. See + /// [`LocalMutexNode`] and [`thread_local_node!`]. + /// + /// This function will block if the lock is unavailable. + /// + /// # Safety + /// + /// Unlike [`lock_with_local_then`], this method is unsafe because it does not + /// check if the current thread local node is already mutably borrowed. If + /// the current thread local node is already borrowed, calling this + /// function is undefined behavior. + /// + /// # Panics + /// + /// Panics if the key currently has its destructor running, and it **may** + /// panic if the destructor has previously been run for this thread. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::thread; + /// + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Arc::new(Mutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || unsafe { + /// c_mutex.lock_with_local_then_unchecked(&NODE, |data| *data = 10); + /// }) + /// .join().expect("thread::spawn failed"); + /// + /// assert_eq!(mutex.lock_with_local_then(&NODE, |data| *data), 10); + /// ``` + /// + /// Compile fail: borrows of the data cannot escape the given closure: + /// + /// ```compile_fail,E0515 + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(1); + /// let data = unsafe { + /// mutex.lock_with_local_then_unchecked(&NODE, |data| &*data) + /// }; + /// ``` + /// + /// Undefined behavior: thread local node cannot be borrowed more than once + /// at the same time: + /// + /// ```no_run + /// use mcslock::raw::spins::Mutex; + /// + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(0); + /// + /// mutex.lock_with_local_then(&NODE, |_data| unsafe { + /// // UB: `NODE` is already mutably borrowed in this thread by the + /// // enclosing `lock_with_local_then`, the borrow is live for the full + /// // duration of this closure scope. + /// let mutex = Mutex::new(()); + /// mutex.lock_with_local_then_unchecked(&NODE, |_data| ()); + /// }); + /// ``` + /// [`lock_with_local_then`]: Mutex::lock_with_local_then + #[inline] + pub unsafe fn lock_with_local_then_unchecked(&self, node: Key, f: F) -> Ret + where + F: FnOnce(&mut T) -> Ret, + { + self.inner.lock_with_local_then_unchecked(&node.inner, f) + } + + /// Mutable borrows must not escape the closure. + /// + /// ```compile_fail + /// use mcslock::raw::spins::Mutex; + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(1); + /// let borrow = mutex.lock_with_local_then(&NODE, |data| data); + /// ``` + /// + /// ```compile_fail,E0521 + /// use std::thread; + /// use mcslock::raw::spins::Mutex; + /// mcslock::thread_local_node!(static NODE); + /// + /// let mutex = Mutex::new(1); + /// mutex.lock_with_local_then(&NODE, |data| { + /// thread::spawn(move || { + /// let data = data; + /// }); + /// }); + /// ``` + #[cfg(not(tarpaulin_include))] + const fn __borrows_must_not_escape_closure() {} +} + +// A thread local node definition used for testing. +#[cfg(test)] +#[cfg(not(tarpaulin_include))] +thread_local_node!(static TEST_NODE); + +/// A Mutex wrapper type that calls `lock_with_local_then` and +/// `try_lock_with_local_then` when implementing testing traits. +#[cfg(test)] +struct MutexPanic(Mutex); + +#[cfg(test)] +impl LockNew for MutexPanic { + type Target = T; + + fn new(value: Self::Target) -> Self + where + Self::Target: Sized, + { + Self(Mutex::new(value)) + } +} + +#[cfg(test)] +impl LockThen for MutexPanic { + type Guard<'a> = &'a mut Self::Target + where + Self: 'a, + Self::Target: 'a; + + fn lock_then(&self, f: F) -> Ret + where + F: FnOnce(&mut Self::Target) -> Ret, + { + self.0.lock_with_local_then(&TEST_NODE, f) + } +} + +#[cfg(test)] +impl TryLockThen for MutexPanic { + fn try_lock_then(&self, f: F) -> Ret + where + F: FnOnce(Option<&mut Self::Target>) -> Ret, + { + self.0.try_lock_with_local_then(&TEST_NODE, f) + } + + fn is_locked(&self) -> bool { + self.0.is_locked() + } +} + +/// A Mutex wrapper type that calls `lock_with_local_then_unchecked` and +/// `try_lock_with_local_then_unchecked` when implementing testing traits. +#[cfg(test)] +struct MutexUnchecked(Mutex); + +#[cfg(test)] +impl LockNew for MutexUnchecked { + type Target = T; + + fn new(value: Self::Target) -> Self + where + Self::Target: Sized, + { + Self(Mutex::new(value)) + } +} + +#[cfg(test)] +impl LockThen for MutexUnchecked { + type Guard<'a> = &'a mut Self::Target + where + Self: 'a, + Self::Target: 'a; + + fn lock_then(&self, f: F) -> Ret + where + F: FnOnce(&mut Self::Target) -> Ret, + { + // SAFETY: caller must guarantee that this thread local node is not + // already mutably borrowed for some other lock acquisition. + unsafe { self.0.lock_with_local_then_unchecked(&TEST_NODE, f) } + } +} + +#[cfg(test)] +impl TryLockThen for MutexUnchecked { + fn try_lock_then(&self, f: F) -> Ret + where + F: FnOnce(Option<&mut T>) -> Ret, + { + // SAFETY: caller must guarantee that this thread local node is not + // already mutably borrowed for some other lock acquisition. + unsafe { self.0.try_lock_with_local_then_unchecked(&TEST_NODE, f) } + } + + fn is_locked(&self) -> bool { + self.0.is_locked() + } +} + +#[cfg(all(not(loom), test))] +mod test { + use crate::raw::MutexNode; + use crate::relax::Yield; + use crate::test::tests; + + type MutexPanic = super::MutexPanic; + type MutexUnchecked = super::MutexUnchecked; + + #[test] + fn ref_cell_node_drop_does_not_matter() { + use core::{cell::RefCell, mem}; + assert!(!mem::needs_drop::>()); + } + + #[test] + fn lots_and_lots_lock() { + tests::lots_and_lots_lock::>(); + } + + #[test] + fn lots_and_lots_lock_unchecked() { + tests::lots_and_lots_lock::>(); + } + + #[test] + fn lots_and_lots_try_lock() { + tests::lots_and_lots_try_lock::>(); + } + + #[test] + fn lots_and_lots_try_lock_unchecked() { + tests::lots_and_lots_try_lock::>(); + } + + #[test] + fn lots_and_lots_mixed_lock() { + tests::lots_and_lots_mixed_lock::>(); + } + + #[test] + fn lots_and_lots_mixed_lock_unchecked() { + tests::lots_and_lots_mixed_lock::>(); + } + + #[test] + fn smoke() { + tests::smoke::>(); + } + + #[test] + fn smoke_unchecked() { + tests::smoke::>(); + } + + #[test] + fn test_try_lock() { + tests::test_try_lock::>(); + } + + #[test] + fn test_try_lock_unchecked() { + tests::test_try_lock::>(); + } + + #[test] + #[should_panic = already_borrowed_error!()] + fn test_lock_arc_nested() { + tests::test_lock_arc_nested::, MutexPanic<_>>(); + } + + #[test] + #[should_panic = already_borrowed_error!()] + fn test_acquire_more_than_one_lock() { + tests::test_acquire_more_than_one_lock::>(); + } + + #[test] + fn test_lock_arc_access_in_unwind() { + tests::test_lock_arc_access_in_unwind::>(); + } + + #[test] + fn test_lock_arc_access_in_unwind_unchecked() { + tests::test_lock_arc_access_in_unwind::>(); + } + + #[test] + fn test_lock_unsized() { + tests::test_lock_unsized::>(); + } + + #[test] + fn test_lock_unsized_unchecked() { + tests::test_lock_unsized::>(); + } +} + +#[cfg(all(loom, test))] +mod model { + use crate::loom::models; + use crate::relax::Yield; + + type MutexPanic = super::MutexPanic; + type MutexUnchecked = super::MutexUnchecked; + + #[test] + fn try_lock_join() { + models::try_lock_join::>(); + } + + #[test] + fn try_lock_join_unchecked() { + models::try_lock_join::>(); + } + + #[test] + fn lock_join() { + models::lock_join::>(); + } + + #[test] + fn lock_join_unchecked() { + models::lock_join::>(); + } + + #[test] + fn mixed_lock_join() { + models::mixed_lock_join::>(); + } + + #[test] + fn mixed_lock_join_unchecked() { + models::mixed_lock_join::>(); + } +} diff --git a/src/relax.rs b/src/relax.rs index 22a9303..b888ddb 100644 --- a/src/relax.rs +++ b/src/relax.rs @@ -1,5 +1,5 @@ -// Modified version of relax.rs from spin-rs to support Loom yielding, -// exponential backoff and requires unsafe for `Relax`. +// Heavily modified version of relax.rs from spin-rs to support Loom yielding, +// exponential backoff, abort on unwind (debug) and requires unsafe for `Relax`. // // Original file at its most recent change (at the time of writing): // https://github.com/mvdnes/spin-rs/blob/5860ee114094cf200b97348ff332155fbd7159b4/src/relax.rs @@ -14,10 +14,14 @@ //! Strategies that determine the behaviour of locks when encountering contention. +use crate::cfg::debug_abort; use crate::cfg::hint; + #[cfg(any(feature = "yield", test))] use crate::cfg::thread; +pub(crate) use wait::RelaxWait; + /// A trait implemented by spinning relax strategies. /// /// # Example @@ -54,6 +58,31 @@ pub unsafe trait Relax { fn relax(&mut self); } +/// The actual implementation of this crate's `Relax` types. +trait RelaxImpl { + /// The actual `new` implementation. + fn new() -> Self; + + /// The actual `relax` implementation. + fn relax(&mut self); +} + +// SAFETY: Both `new` and `relax` function implementation are protected with a +// process abort (under test with unwind on panic configuration) in case any of +// them where to panic the thread. +#[doc(hidden)] +unsafe impl Relax for R { + #[inline(always)] + fn new() -> Self { + debug_abort::on_unwind(|| R::new()) + } + + #[inline(always)] + fn relax(&mut self) { + debug_abort::on_unwind(|| R::relax(self)); + } +} + /// A strategy that rapidly spins while informing the CPU that it should power /// down non-essential components via [`core::hint::spin_loop`]. /// @@ -72,15 +101,11 @@ pub unsafe trait Relax { /// [priority inversion]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html pub struct Spin; -// SAFETY: None of the associated function implementations contain any code -// that could cause a thread exit. -unsafe impl Relax for Spin { - #[inline(always)] +impl RelaxImpl for Spin { fn new() -> Self { Self } - #[inline(always)] fn relax(&mut self) { hint::spin_loop(); } @@ -97,16 +122,12 @@ unsafe impl Relax for Spin { #[cfg_attr(docsrs, doc(cfg(feature = "yield")))] pub struct Yield; -// SAFETY: None of the associated function implementations contain any code -// that could cause a thread exit. #[cfg(any(feature = "yield", test))] -unsafe impl Relax for Yield { - #[inline(always)] +impl RelaxImpl for Yield { fn new() -> Self { Self } - #[inline(always)] fn relax(&mut self) { thread::yield_now(); } @@ -120,30 +141,14 @@ unsafe impl Relax for Yield { /// (i.e: this is a workaround for possible compiler bugs). pub struct Loop; -// SAFETY: None of the associated function implementations contain any code -// that could cause a thread exit. -unsafe impl Relax for Loop { - #[inline(always)] +impl RelaxImpl for Loop { fn new() -> Self { Self } - #[inline(always)] fn relax(&mut self) {} } -// Exponential backoff is based on the crossbeam-utils implementation. -// link to most recent change (as the time of writing): -// https://github.com/crossbeam-rs/crossbeam/blob/371de8c2d304db07662450995848f3dc9598ac99/crossbeam-utils/src/backoff.rs -// -// Copyright (c) 2019 The Crossbeam Project Developers -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// A strategy that, as [`Spin`], will run a busy-wait spin-loop, except this /// implementation will perform exponential backoff. /// @@ -153,25 +158,28 @@ unsafe impl Relax for Loop { /// subject to priority inversion problems, you may want to consider a yielding /// strategy or using a scheduler-aware lock. pub struct SpinBackoff { - step: Step, + inner: Backoff<{ Self::MAX }>, } impl SpinBackoff { - const SPIN_LIMIT: u32 = 6; + /// The largest value the inner backoff counter can reach. + const MAX: Uint = DEFAULT_SHIFTS; } -// SAFETY: None of the associated function implementations contain any code -// that could cause a thread exit. -unsafe impl Relax for SpinBackoff { - #[inline(always)] +// The maximum inner value **must** be smaller than Uint::BITS, or else the +// bitshift operation will overflow, which is not only incorrect but it will +// also result in UB when executed under `Relax::relax` on debug mode since it +// will panic and exit the thread which is forbidded by `Relax`. +const _: () = assert!(SpinBackoff::MAX < Uint::BITS); + +impl RelaxImpl for SpinBackoff { fn new() -> Self { - Self { step: Step::default() } + Self { inner: Backoff::new() } } - #[inline(always)] fn relax(&mut self) { - self.step.spin_to(Self::SPIN_LIMIT); - self.step.step_to(Self::SPIN_LIMIT); + self.inner.saturating_spin(); + self.inner.saturating_step(); } } @@ -187,97 +195,173 @@ unsafe impl Relax for SpinBackoff { #[cfg(any(feature = "yield", test))] #[cfg_attr(docsrs, doc(cfg(feature = "yield")))] pub struct YieldBackoff { - step: Step, + inner: Backoff<{ Self::MAX }>, } #[cfg(any(feature = "yield", test))] impl YieldBackoff { - const SPIN_LIMIT: u32 = SpinBackoff::SPIN_LIMIT; - const YIELD_LIMIT: u32 = 10; + /// The largest value the inner backoff counter can reach. + const MAX: Uint = DEFAULT_SHIFTS; } -// SAFETY: None of the associated function implementations contain any code -// that could cause a thread exit. +// The maximum inner value **must** be smaller than Uint::BITS, or else the +// bitshift operation will overflow, which is not only incorrect but it will +// also result in UB when executed under `Relax::relax` on debug mode since it +// will panic and exit the thread which is forbidded by `Relax`. #[cfg(any(feature = "yield", test))] -unsafe impl Relax for YieldBackoff { - #[inline(always)] +const _: () = assert!(YieldBackoff::MAX < Uint::BITS); + +#[cfg(any(feature = "yield", test))] +impl RelaxImpl for YieldBackoff { fn new() -> Self { - Self { step: Step::default() } + Self { inner: Backoff::new() } } - #[inline(always)] fn relax(&mut self) { - if self.step.0 <= Self::SPIN_LIMIT { - self.step.spin(); + if self.inner.0 < Self::MAX { + self.inner.wrapping_spin(); } else { thread::yield_now(); } - self.step.step_to(Self::YIELD_LIMIT); + self.inner.saturating_step(); } } -/// Keeps count of the number of steps taken. -#[derive(Default)] -struct Step(u32); +// Exponential backoff is inspired by the crossbeam-utils implementation. +// link to most recent change (as the time of writing): +// https://github.com/crossbeam-rs/crossbeam/blob/371de8c2d304db07662450995848f3dc9598ac99/crossbeam-utils/src/backoff.rs +// +// Copyright (c) 2019 The Crossbeam Project Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. -impl Step { - /// Unbounded backoff spinning. - #[cfg(any(feature = "yield", test))] - #[inline(always)] - fn spin(&self) { - for _ in 0..1 << self.0 { +/// An unsigned integer type use as the inner type for [`Backoff`]. +/// +/// All backoff related arithmetic operations (eg. left shift, sum) should only +/// use this same type as the right-hand and lef-hand side types. +type Uint = u32; + +/// The default max number of shifts the inner value of `Backoff` will produce. +#[cfg(not(miri))] +const DEFAULT_SHIFTS: Uint = 6; + +/// The default max number of shifts the inner value of `Backoff` will produce. +/// +/// For testing purposes, lets make this super small, else Miri runs will take +/// far more time without much benefit. +#[cfg(miri)] +const DEFAULT_SHIFTS: Uint = 1; + +/// Inner backoff counter that keeps track of the number of shifts applied. +/// +/// The maximum value the inner shift counter can take is defined by `MAX`. +struct Backoff(Uint); + +impl Backoff { + /// Creates a new `Backoff` instance with the counter initialized to 0. + const fn new() -> Self { + Self(0) + } + + /// The number of iterations that the backoff spin loop will execute, the + /// result of the expression may overflow. + /// + /// # Panics + /// + /// Panics on `shl` arithmetic overflow under debug profile. + const fn end(shifts: Uint) -> Uint { + 1 << shifts + } + + /// Runs a bounded spin loop `1 << self.inner` times, up to `MAX` times. + fn saturating_spin(&self) { + let shifts = self.0.min(MAX); + for _ in 0..Self::end(shifts) { hint::spin_loop(); } } - /// Bounded backoff spinning. - #[inline(always)] - fn spin_to(&self, max: u32) { - for _ in 0..1 << self.0.min(max) { + /// Runs a unbounded spin loop `1 << self.inner` times, the result of the + /// expression may overflow. + /// + /// # Panics + /// + /// Panics on `shl` arithmetic overflow under debug profile. + #[cfg(any(feature = "yield", test))] + fn wrapping_spin(&self) { + for _ in 0..Self::end(self.0) { hint::spin_loop(); } } - /// Bounded step increment. - #[inline(always)] - fn step_to(&mut self, end: u32) { - if self.0 <= end { - self.0 += 1; - } + /// Incremets one to the inner counter, saturating the counter at `MAX`. + fn saturating_step(&mut self) { + (self.0 < MAX).then(|| self.0 += 1); + } +} + +mod wait { + use core::marker::PhantomData; + + use crate::lock::Wait; + use crate::relax::Relax; + + /// A generic relaxed waiter, that implements [`Relax`] so long as `R` + /// implements it too. + /// + /// This saves us from defining a blanket [`Wait`] impl for a generic `T` where + /// `T` implements [`Relax`], because that would prevent us from implementing + /// `Wait` for `T` where `T` implements some other target trait, since they + /// would conflict. + pub struct RelaxWait(PhantomData); + + impl Wait for RelaxWait { + type LockRelax = R; + type UnlockRelax = R; } } #[cfg(all(not(loom), test))] mod test { - fn returns() { + use super::{Relax, Uint}; + + fn returns() { let mut relax = R::new(); - for _ in 0..10 { + for _ in 0..=MAX.saturating_mul(10) { relax.relax(); } } #[test] fn spins() { - returns::(); + returns::(); } #[test] fn spins_backoff() { - returns::(); + use super::SpinBackoff; + const MAX: Uint = SpinBackoff::MAX; + returns::(); } #[test] fn yields() { - returns::(); + returns::(); } #[test] fn yields_backoff() { - returns::(); + use super::YieldBackoff; + const MAX: u32 = YieldBackoff::MAX; + returns::(); } #[test] fn loops() { - returns::(); + returns::(); } } diff --git a/src/test.rs b/src/test.rs index fb24638..bc9b645 100644 --- a/src/test.rs +++ b/src/test.rs @@ -1,8 +1,33 @@ -#[cfg(all(not(loom), test))] -pub use core::ops::DerefMut as Guard; +pub use core::ops::{Deref, DerefMut}; -#[cfg(all(loom, test))] -pub use crate::loom::Guard; +/// A trait for convertion from `&Self` to a type that implements the [`Deref`] +/// trait. +pub trait AsDeref { + /// The type of the value that `Self::Deref` dereferences to. + type Target: ?Sized; + + /// The type that implements [`Deref`] trait. + type Deref<'a>: Deref + where + Self: 'a, + Self::Target: 'a; + + /// Returns a instance of the type that implements the [`Deref`] trait. + fn as_deref(&self) -> Self::Deref<'_>; +} + +/// A trait for convertion from `&mut Self` to a type that implements the +/// [`DerefMut`] trait. +pub trait AsDerefMut: AsDeref { + /// The type that implements [`DerefMut`] trait. + type DerefMut<'a>: DerefMut + where + Self: 'a, + Self::Target: 'a; + + /// Returns a instance of the type that implements the [`DerefMut`] trait. + fn as_deref_mut(&mut self) -> Self::DerefMut<'_>; +} /// A trait for lock types that can hold user defined values. pub trait LockNew { @@ -15,24 +40,29 @@ pub trait LockNew { Self::Target: Sized; } -/// A trait for lock types that can run closures against the guard. -pub trait LockWith: LockNew { - /// The guard type that holds exclusive access to the underlying data. - type Guard<'a>: Guard +/// A trait for lock types that can run closures against the protected data. +pub trait LockThen: LockNew { + /// A `guard` has access to a type that can can give shared and exclusive + /// references to the protected data. + type Guard<'a>: AsDerefMut where Self: 'a, Self::Target: 'a; - // Attempts to acquire this lock and then runs the closure against its - // guard. - fn try_lock_with(&self, f: F) -> Ret + /// Acquires a mutex and then runs the closure against the protected data. + fn lock_then(&self, f: F) -> Ret where - F: FnOnce(Option>) -> Ret; + F: FnOnce(Self::Guard<'_>) -> Ret; +} - /// Acquires a mutex and then runs the closure against its guard. - fn lock_with(&self, f: F) -> Ret +/// A trait for lock types that can test if the lock is busy and run closures +/// against the protected data in case of success. +pub trait TryLockThen: LockThen { + /// Attempts to acquire this lock and then runs the closure against the + /// protected data if successful. + fn try_lock_then(&self, f: F) -> Ret where - F: FnOnce(Self::Guard<'_>) -> Ret; + F: FnOnce(Option>) -> Ret; /// Returns `true` if the lock is currently held. #[allow(dead_code)] @@ -52,6 +82,30 @@ pub trait LockData: LockNew { fn get_mut(&mut self) -> &mut Self::Target; } +// Trivial implementation of `AsDeref` for `T` where `T: Deref`. +impl AsDeref for T { + type Target = ::Target; + + type Deref<'a> = &'a ::Target + where + Self: 'a; + + fn as_deref(&self) -> Self::Deref<'_> { + self + } +} + +// Trivial implementation of `AsDerefMut` for `T` where `T: DerefMut`. +impl AsDerefMut for T { + type DerefMut<'a> = &'a mut ::Target + where + Self: 'a; + + fn as_deref_mut(&mut self) -> Self::DerefMut<'_> { + self + } +} + #[cfg(all(not(loom), test))] pub mod tests { // Test suite from the Rust's Mutex implementation with minor modifications @@ -66,13 +120,17 @@ pub mod tests { // option. This file may not be copied, modified, or distributed // except according to those terms. - use std::fmt::{Debug, Display}; + use core::ops::RangeInclusive; + use std::fmt::Debug; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; - use super::{LockData, LockWith}; + #[cfg(feature = "barging")] + use std::fmt::Display; + + use super::{AsDeref, AsDerefMut, LockData, LockThen, TryLockThen}; type Int = u32; @@ -87,32 +145,55 @@ pub mod tests { } } - pub fn lots_and_lots() - where - L: LockWith + Send + Sync + 'static, - { - const ITERS: u32 = 1000; - const CONCURRENCY: u32 = 3; + const ITERS: Int = 1000; + const CONCURRENCY: Int = 3; + const EXPECTED_VALUE: Int = ITERS * CONCURRENCY * 2; + const EXPECTED_RANGE: RangeInclusive = 1..=EXPECTED_VALUE; - fn inc>(data: &Arc) { - for _ in 0..ITERS { - data.lock_with(|mut guard| *guard += 1); - } + fn inc>(mutex: &Arc) { + mutex.lock_then(|mut data| *data.as_deref_mut() += 1); + } + + fn try_inc>(mutex: &Arc) { + mutex.try_lock_then(|opt| opt.map(|mut data| *data.as_deref_mut() += 1)); + } + + fn inc_for>(mutex: &Arc) { + for _ in 0..ITERS { + inc::(mutex); } + } + + fn try_inc_for>(mutex: &Arc) { + for _ in 0..ITERS { + try_inc::(mutex); + } + } - let data = Arc::new(L::new(0)); + fn mixed_inc_for>(mutex: &Arc) { + for run in 0..ITERS { + let f = if run % 2 == 0 { inc } else { try_inc }; + f(mutex); + } + } + + fn lots_and_lots(f: fn(&Arc)) -> Int + where + L: LockThen + Send + Sync + 'static, + { + let mutex = Arc::new(L::new(0)); let (tx, rx) = channel(); for _ in 0..CONCURRENCY { - let data1 = Arc::clone(&data); + let c_mutex1 = Arc::clone(&mutex); let tx2 = tx.clone(); thread::spawn(move || { - inc(&data1); + f(&c_mutex1); tx2.send(()).unwrap(); }); - let data2 = Arc::clone(&data); + let c_mutex2 = Arc::clone(&mutex); let tx2 = tx.clone(); thread::spawn(move || { - inc(&data2); + f(&c_mutex2); tx2.send(()).unwrap(); }); } @@ -121,44 +202,75 @@ pub mod tests { for _ in 0..2 * CONCURRENCY { rx.recv().unwrap(); } - let value = data.lock_with(|guard| *guard); - assert_eq!(value, ITERS * CONCURRENCY * 2); + mutex.lock_then(|data| *data.as_deref()) + } + + pub fn node_waiter_drop_does_not_matter() { + use crate::inner::raw::{MutexNode, MutexNodeInit}; + assert!(!core::mem::needs_drop::()); + assert!(!core::mem::needs_drop::>()); + assert!(!core::mem::needs_drop::>()); + } + + pub fn lots_and_lots_lock() + where + L: LockThen + Send + Sync + 'static, + { + let value = lots_and_lots(inc_for::); + assert_eq!(value, EXPECTED_VALUE); + } + + pub fn lots_and_lots_try_lock() + where + L: TryLockThen + Send + Sync + 'static, + { + let value = lots_and_lots(try_inc_for::); + assert!(EXPECTED_RANGE.contains(&value)); + } + + pub fn lots_and_lots_mixed_lock() + where + L: TryLockThen + Send + Sync + 'static, + { + let value = lots_and_lots(mixed_inc_for::); + assert!(EXPECTED_RANGE.contains(&value)); } pub fn smoke() where - L: LockWith, + L: LockThen, { let mutex = L::new(1); - mutex.lock_with(|guard| drop(guard)); - mutex.lock_with(|guard| drop(guard)); + mutex.lock_then(|data| drop(data)); + mutex.lock_then(|data| drop(data)); } + #[cfg(feature = "barging")] pub fn test_guard_debug_display() where - L: LockWith, - for<'a> ::Guard<'a>: Debug + Display, + L: LockThen, + for<'a> ::Guard<'a>: Debug + Display, { - let data = 42; - let mutex = L::new(data); - mutex.lock_with(|guard| { - assert_eq!(format!("{data:?}"), format!("{guard:?}")); - assert_eq!(format!("{data}"), format!("{guard}")); + let value = 42; + let mutex = L::new(value); + mutex.lock_then(|data| { + assert_eq!(format!("{value:?}"), format!("{data:?}")); + assert_eq!(format!("{value}"), format!("{data}")); }); } pub fn test_mutex_debug() where - L: LockWith + Debug + Send + Sync + 'static, + L: LockThen + Debug + Send + Sync + 'static, { - let data = 42; - let mutex = Arc::new(L::new(data)); - let msg = format!("Mutex {{ data: {data:?} }}"); + let value = 42; + let mutex = Arc::new(L::new(value)); + let msg = format!("Mutex {{ data: {value:?} }}"); assert_eq!(msg, format!("{mutex:?}")); let c_mutex = Arc::clone(&mutex); let msg = "Mutex { data: }".to_string(); - mutex.lock_with(|_guard| { + mutex.lock_then(|_data| { assert_eq!(msg, format!("{:?}", *c_mutex)); }); } @@ -175,21 +287,21 @@ pub mod tests { where L: LockData + From, { - let data = 42; - let mutex = L::from(data); - assert_eq!(data, mutex.into_inner()); + let value = 42; + let mutex = L::from(value); + assert_eq!(value, mutex.into_inner()); } pub fn test_try_lock() where - L: LockWith, + L: TryLockThen, { use std::rc::Rc; let mutex = Rc::new(L::new(())); let c_mutex = Rc::clone(&mutex); - mutex.try_lock_with(|guard| { + mutex.try_lock_then(|data| { assert!(c_mutex.is_locked()); - *guard.unwrap() = (); + *data.unwrap().as_deref_mut() = (); }); assert!(!mutex.is_locked()); } @@ -227,8 +339,8 @@ pub mod tests { pub fn test_lock_arc_nested() where - L1: LockWith, - L2: LockWith> + Send + Sync + 'static, + L1: LockThen, + L2: LockThen> + Send + Sync + 'static, { // Tests nested locks and access // to underlying data. @@ -236,7 +348,10 @@ pub mod tests { let arc2 = Arc::new(L2::new(arc)); let (tx, rx) = channel(); let _t = thread::spawn(move || { - let val = arc2.lock_with(|arc2| arc2.lock_with(|g| *g)); + let val = arc2.lock_then(|arc2| { + let deref = arc2.as_deref(); + deref.lock_then(|d| *d.as_deref()) + }); assert_eq!(val, 1); tx.send(()).unwrap(); }); @@ -245,7 +360,7 @@ pub mod tests { pub fn test_acquire_more_than_one_lock() where - L: LockWith + Send + Sync + 'static, + L: LockThen + Send + Sync + 'static, { let arc = Arc::new(L::new(1)); let (tx, rx) = channel(); @@ -253,52 +368,54 @@ pub mod tests { let tx2 = tx.clone(); let c_arc = Arc::clone(&arc); let _t = thread::spawn(move || { - c_arc.lock_with(|_g| { + c_arc.lock_then(|_d| { let mutex = L::new(1); - mutex.lock_with(|_g| ()); + mutex.lock_then(|_d| ()); }); tx2.send(()).unwrap(); }); } drop(tx); - rx.recv().unwrap(); + for _ in 0..4 { + rx.recv().unwrap(); + } } pub fn test_lock_arc_access_in_unwind() where - L: LockWith + Send + Sync + 'static, + L: LockThen + Send + Sync + 'static, { let arc = Arc::new(L::new(1)); let arc2 = arc.clone(); let _ = thread::spawn(move || { - struct Unwinder> { + struct Unwinder> { i: Arc, } - impl> Drop for Unwinder { + impl> Drop for Unwinder { fn drop(&mut self) { - self.i.lock_with(|mut g| *g += 1); + self.i.lock_then(|mut d| *d.as_deref_mut() += 1); } } let _u = Unwinder { i: arc2 }; panic!(); }) .join(); - let value = arc.lock_with(|g| *g); + let value = arc.lock_then(|d| *d.as_deref()); assert_eq!(value, 2); } pub fn test_lock_unsized() where - L: LockWith, + L: LockThen, { - let lock: &L = &L::new([1, 2, 3]); + let mutex: &L = &L::new([1, 2, 3]); { - lock.lock_with(|mut g| { - g[0] = 4; - g[2] = 5; + mutex.lock_then(|mut d| { + d.as_deref_mut()[0] = 4; + d.as_deref_mut()[2] = 5; }); } let comp: &[Int] = &[4, 2, 5]; - lock.lock_with(|g| assert_eq!(&*g, comp)); + mutex.lock_then(|d| assert_eq!(&*d.as_deref(), comp)); } } diff --git a/src/thread_local.rs b/src/thread_local.rs index 6045cb1..868167b 100644 --- a/src/thread_local.rs +++ b/src/thread_local.rs @@ -1,64 +1,3 @@ -use core::cell::{RefCell, RefMut}; -use core::panic::Location; - -use crate::cfg::thread::LocalKey; -use crate::raw::{Mutex, MutexGuard, MutexNode}; -use crate::relax::Relax; - -type StaticNode = &'static LocalMutexNode; - -/// A handle to a [`MutexNode`] stored at the thread local storage. -/// -/// Thread local nodes can be claimed for temporary, exclusive access during -/// runtime for locking purposes. Node handles refer to the node stored at -/// the current running thread. -/// -/// Just like `MutexNode`, this is an opaque type that holds metadata for the -/// [`raw::Mutex`]'s waiting queue. You must declare a thread local node with -/// the [`thread_local_node!`] macro, and provide the generated handle to the -/// appropriate [`raw::Mutex`] locking APIs. Attempting to lock a mutex with a -/// thread local node that already is in use for the locking thread will cause -/// a panic. Handles are provided by reference to functions. -/// -/// See: [`try_lock_with_local`], [`lock_with_local`], -/// [`try_lock_with_local_unchecked`] or [`lock_with_local_unchecked`]. -/// -/// [`MutexNode`]: MutexNode -/// [`raw::Mutex`]: Mutex -/// [`thread_local_node!`]: crate::thread_local_node -/// [`try_lock_with_local`]: Mutex::try_lock_with_local -/// [`lock_with_local`]: Mutex::lock_with_local -/// [`try_lock_with_local_unchecked`]: Mutex::try_lock_with_local_unchecked -/// [`lock_with_local_unchecked`]: Mutex::lock_with_local_unchecked -#[repr(transparent)] -#[derive(Debug)] -pub struct LocalMutexNode { - #[cfg(not(all(loom, test)))] - key: LocalKey>, - - // We can't take ownership of Loom's `thread_local!` value since it is a - // `static`, non-copy value, so we just point to it. - #[cfg(all(loom, test))] - key: &'static LocalKey>, -} - -impl LocalMutexNode { - /// Creates a new `LocalMutexNode` key from the provided thread local node - /// key. - /// - /// This function is **NOT** part of the public API and so must not be - /// called directly by user's code. It is subjected to changes **WITHOUT** - /// prior notice or accompanied with relevant SemVer changes. - #[cfg(not(all(loom, test)))] - #[cfg(not(tarpaulin_include))] - #[doc(hidden)] - #[must_use] - #[inline(always)] - pub const fn __new(key: LocalKey>) -> Self { - Self { key } - } -} - /// Non-recursive, inner definition of `thread_local_node!`. /// /// This macro is **NOT** part of the public API and so must not be called @@ -68,14 +7,14 @@ impl LocalMutexNode { #[doc(hidden)] #[macro_export] macro_rules! __thread_local_node_inner { - ($vis:vis $node:ident) => { - $vis const $node: $crate::raw::LocalMutexNode = { + ($vis:vis $node:ident, $($mod:ident$(::)?)+) => { + $vis const $node: $crate::$($mod::)+LocalMutexNode = { ::std::thread_local! { - static NODE: ::core::cell::RefCell<$crate::raw::MutexNode> = const { - ::core::cell::RefCell::new($crate::raw::MutexNode::new()) + static NODE: ::core::cell::RefCell<$crate::$($mod::)+MutexNode> = const { + ::core::cell::RefCell::new($crate::$($mod::)+MutexNode::new()) }; } - $crate::raw::LocalMutexNode::__new(NODE) + $crate::$($mod::)+LocalMutexNode::__new(NODE) }; }; } @@ -88,714 +27,21 @@ macro_rules! __thread_local_node_inner { #[cfg(all(loom, test))] #[macro_export] macro_rules! __thread_local_node_inner { - ($vis:vis $node:ident) => { - $vis static $node: $crate::raw::LocalMutexNode = { + ($vis:vis $node:ident, $($mod:ident)::+) => { + $vis static $node: $crate::$($mod::)+LocalMutexNode = { ::loom::thread_local! { - static NODE: ::core::cell::RefCell<$crate::raw::MutexNode> = { - ::core::cell::RefCell::new($crate::raw::MutexNode::new()) + static NODE: ::core::cell::RefCell<$crate::$($mod::)+MutexNode> = { + ::core::cell::RefCell::new($crate::$($mod::)+MutexNode::new()) }; } - $crate::raw::LocalMutexNode { key: &NODE } + $crate::$($mod::)+LocalMutexNode::new(&NODE) }; }; } -/// Declares a new [`LocalMutexNode`] key, which is a handle to the thread local -/// node of the currently running thread. -/// -/// The macro wraps any number of static declarations and make them thread -/// local. Each provided name is associated with a single thread local key. The -/// keys are wrapped and managed by the [`LocalMutexNode`] type, which are the -/// actual handles meant to be used with the `lock_with_local` API family from -/// [`raw::Mutex`]. Handles are provided by reference to functions. -/// -/// See: [`try_lock_with_local`], [`lock_with_local`], -/// [`try_lock_with_local_unchecked`] or [`lock_with_local_unchecked`]. -/// -/// The thread local node definition generated by this macro avoids lazy -/// initialization and does not need to be dropped, which enables a more -/// efficient underlying implementation. See [`std::thread_local!`] macro. -/// -/// # Sintax -/// -/// * Allows multiple static definitions, must be separated with semicolons. -/// * Visibility is optional (private by default). -/// * Requires `static` keyword and a **UPPER_SNAKE_CASE** name. -/// -/// # Example -/// -/// ``` -/// use mcslock::raw::spins::Mutex; -/// -/// // Multiple difenitions. -/// mcslock::thread_local_node! { -/// pub static NODE; -/// static OTHER_NODE1; -/// } -/// -/// // Single definition. -/// mcslock::thread_local_node!(pub static OTHER_NODE2); -/// -/// let mutex = Mutex::new(0); -/// // Keys are provided to APIs by reference. -/// mutex.lock_with_local(&NODE, |mut guard| *guard = 10); -/// assert_eq!(mutex.lock_with_local(&NODE, |guard| *guard), 10); -/// ``` -/// [`raw::Mutex`]: Mutex -/// [`std::thread_local!`]: https://doc.rust-lang.org/std/macro.thread_local.html -/// [`try_lock_with_local`]: Mutex::try_lock_with_local -/// [`lock_with_local`]: Mutex::lock_with_local -/// [`try_lock_with_local_unchecked`]: Mutex::try_lock_with_local_unchecked -/// [`lock_with_local_unchecked`]: Mutex::lock_with_local_unchecked -#[macro_export] -macro_rules! thread_local_node { - // Empty (base for recursion). - () => {}; - // Process multiply definitions (recursive). - ($vis:vis static $node:ident; $($rest:tt)*) => { - $crate::__thread_local_node_inner!($vis $node); - $crate::thread_local_node!($($rest)*); - }; - // Process single declaration. - ($vis:vis static $node:ident) => { - $crate::__thread_local_node_inner!($vis $node); - }; -} - /// The local node error message as a string literal. macro_rules! already_borrowed_error { () => { - "thread local MCS lock node is already mutably borrowed" + "mcslock::raw::LocalMutexNode is already mutably borrowed" }; } - -/// Panics the thread with a message pointing to the panic location. -#[inline(never)] -#[cold] -fn panic_already_borrowed(caller: &Location<'static>) -> ! { - panic!("{}, conflict at: {}", already_borrowed_error!(), caller) -} - -impl Mutex { - /// Attempts to acquire this mutex and then runs a closure against its guard. - /// - /// If the lock could not be acquired at this time, then a [`None`] value is - /// given back as the closure argument. If the lock has been acquired, then - /// a [`Some`] value with the mutex guard is given instead. The lock will be - /// unlocked when the guard is dropped. - /// - /// To acquire a MCS lock through this function, it's also required a - /// queue node, which is a record that keeps a link for forming the queue, - /// to be stored in the current locking thread local storage. See - /// [`LocalMutexNode`] and [`thread_local_node!`]. - /// - /// This function does not block. - /// - /// # Panics - /// - /// Will panic if the thread local node is already mutably borrowed. - /// - /// Panics if the key currently has its destructor running, and it **may** - /// panic if the destructor has previously been run for this thread. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use std::thread; - /// - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Arc::new(Mutex::new(0)); - /// let c_mutex = Arc::clone(&mutex); - /// - /// thread::spawn(move || { - /// c_mutex.try_lock_with_local(&NODE, |guard| { - /// if let Some(mut guard) = guard { - /// *guard = 10; - /// } else { - /// println!("try_lock_with_local failed"); - /// } - /// }); - /// }) - /// .join().expect("thread::spawn failed"); - /// - /// assert_eq!(mutex.lock_with_local(&NODE, |guard| *guard), 10); - /// ``` - /// - /// Compile fail: borrows of the guard or its data cannot escape the given - /// closure: - /// - /// ```compile_fail,E0515 - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(1); - /// let data = mutex.try_lock_with_local(&NODE, |guard| &*guard.unwrap()); - /// ``` - /// - /// Panic: thread local node cannot be borrowed more than once at the same - /// time: - /// - #[doc = concat!("```should_panic,", already_borrowed_error!())] - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(0); - /// - /// mutex.lock_with_local(&NODE, |_guard| { - /// // `NODE` is already mutably borrowed in this thread by the - /// // enclosing `lock_with_local`, the borrow is live for the full - /// // duration of this closure scope. - /// let mutex = Mutex::new(()); - /// mutex.try_lock_with_local(&NODE, |_guard| ()); - /// }); - /// ``` - /// [`LocalMutexNode`]: LocalMutexNode - /// [`thread_local_node!`]: crate::thread_local_node - #[inline] - #[track_caller] - pub fn try_lock_with_local(&self, node: StaticNode, f: F) -> Ret - where - F: FnOnce(Option>) -> Ret, - { - self.with_local_node(node, |mutex, node| f(mutex.try_lock(node))) - } - - /// Attempts to acquire this mutex and then runs a closure against its guard. - /// - /// If the lock could not be acquired at this time, then a [`None`] value is - /// given back as the closure argument. If the lock has been acquired, then - /// a [`Some`] value with the mutex guard is given instead. The lock will be - /// unlocked when the guard is dropped. - /// - /// To acquire a MCS lock through this function, it's also required a - /// queue node, which is a record that keeps a link for forming the queue, - /// to be stored in the current locking thread local storage. See - /// [`LocalMutexNode`] and [`thread_local_node!`]. - /// - /// This function does not block. - /// - /// # Safety - /// - /// Unlike [`try_lock_with_local`], this method is unsafe because it does - /// not check if the current thread local node is already mutably borrowed. - /// If the current thread local node is already borrowed, calling this - /// function is undefined behavior. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use std::thread; - /// - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Arc::new(Mutex::new(0)); - /// let c_mutex = Arc::clone(&mutex); - /// - /// thread::spawn(move || unsafe { - /// c_mutex.try_lock_with_local_unchecked(&NODE, |guard| { - /// if let Some(mut guard) = guard { - /// *guard = 10; - /// } else { - /// println!("try_lock_with_local_unchecked failed"); - /// } - /// }); - /// }) - /// .join().expect("thread::spawn failed"); - /// - /// assert_eq!(mutex.lock_with_local(&NODE, |guard| *guard), 10); - /// ``` - /// - /// Compile fail: borrows of the guard or its data cannot escape the given - /// closure: - /// - /// ```compile_fail,E0515 - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(1); - /// let data = unsafe { - /// mutex.try_lock_with_local_unchecked(&NODE, |g| &*g.unwrap()) - /// }; - /// ``` - /// - /// Undefined behavior: thread local node cannot be borrowed more than once - /// at the same time: - /// - /// ```no_run - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(0); - /// - /// mutex.lock_with_local(&NODE, |_guard| unsafe { - /// // UB: `NODE` is already mutably borrowed in this thread by the - /// // enclosing `lock_with_local`, the borrow is live for the full - /// // duration of this closure scope. - /// let mutex = Mutex::new(()); - /// mutex.try_lock_with_local_unchecked(&NODE, |_guard| ()); - /// }); - /// ``` - /// [`try_lock_with_local`]: Mutex::try_lock_with_local - #[inline] - pub unsafe fn try_lock_with_local_unchecked(&self, node: StaticNode, f: F) -> Ret - where - F: FnOnce(Option>) -> Ret, - { - self.with_local_node_unchecked(node, |mutex, node| f(mutex.try_lock(node))) - } - - /// Acquires this mutex and then runs the closure against its guard. - /// - /// This function will block the local thread until it is available to acquire - /// the mutex. Upon acquiring the mutex, the user provided closure will be - /// executed against the mutex guard. Once the guard goes out of scope, it - /// will unlock the mutex. - /// - /// To acquire a MCS lock through this function, it's also required a - /// queue node, which is a record that keeps a link for forming the queue, - /// to be stored in the current locking thread local storage. See - /// [`LocalMutexNode`] and [`thread_local_node!`]. - /// - /// This function will block if the lock is unavailable. - /// - /// # Panics - /// - /// Will panic if the thread local node is already mutably borrowed. - /// - /// Panics if the key currently has its destructor running, and it **may** - /// panic if the destructor has previously been run for this thread. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use std::thread; - /// - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Arc::new(Mutex::new(0)); - /// let c_mutex = Arc::clone(&mutex); - /// - /// thread::spawn(move || { - /// c_mutex.lock_with_local(&NODE, |mut guard| *guard = 10); - /// }) - /// .join().expect("thread::spawn failed"); - /// - /// assert_eq!(mutex.lock_with_local(&NODE, |guard| *guard), 10); - /// ``` - /// - /// Compile fail: borrows of the guard or its data cannot escape the given - /// closure: - /// - /// ```compile_fail,E0515 - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(1); - /// let data = mutex.lock_with_local(&NODE, |guard| &*guard); - /// ``` - /// - /// Panic: thread local node cannot be borrowed more than once at the same - /// time: - /// - #[doc = concat!("```should_panic,", already_borrowed_error!())] - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(0); - /// - /// mutex.lock_with_local(&NODE, |_guard| { - /// // `NODE` is already mutably borrowed in this thread by the - /// // enclosing `lock_with_local`, the borrow is live for the full - /// // duration of this closure scope. - /// let mutex = Mutex::new(()); - /// mutex.lock_with_local(&NODE, |_guard| ()); - /// }); - /// ``` - /// [`LocalMutexNode`]: LocalMutexNode - /// [`thread_local_node!`]: crate::thread_local_node - #[inline] - #[track_caller] - pub fn lock_with_local(&self, node: StaticNode, f: F) -> Ret - where - F: FnOnce(MutexGuard<'_, T, R>) -> Ret, - { - self.with_local_node(node, |mutex, node| f(mutex.lock(node))) - } - - /// Acquires this mutex and then runs the closure against its guard. - /// - /// This function will block the local thread until it is available to acquire - /// the mutex. Upon acquiring the mutex, the user provided closure will be - /// executed against the mutex guard. Once the guard goes out of scope, it - /// will unlock the mutex. - /// - /// To acquire a MCS lock through this function, it's also required a - /// queue node, which is a record that keeps a link for forming the queue, - /// to be stored in the current locking thread local storage. See - /// [`LocalMutexNode`] and [`thread_local_node!`]. - /// - /// This function will block if the lock is unavailable. - /// - /// # Safety - /// - /// Unlike [`lock_with_local`], this method is unsafe because it does not - /// check if the current thread local node is already mutably borrowed. If - /// the current thread local node is already borrowed, calling this - /// function is undefined behavior. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use std::thread; - /// - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Arc::new(Mutex::new(0)); - /// let c_mutex = Arc::clone(&mutex); - /// - /// thread::spawn(move || unsafe { - /// c_mutex.lock_with_local_unchecked(&NODE, |mut guard| *guard = 10); - /// }) - /// .join().expect("thread::spawn failed"); - /// - /// assert_eq!(mutex.lock_with_local(&NODE, |guard| *guard), 10); - /// ``` - /// - /// Compile fail: borrows of the guard or its data cannot escape the given - /// closure: - /// - /// ```compile_fail,E0515 - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(1); - /// let data = unsafe { - /// mutex.lock_with_local_unchecked(&NODE, |g| &*g) - /// }; - /// ``` - /// - /// Undefined behavior: thread local node cannot be borrowed more than once - /// at the same time: - /// - /// ```no_run - /// use mcslock::raw::spins::Mutex; - /// - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(0); - /// - /// mutex.lock_with_local(&NODE, |_guard| unsafe { - /// // UB: `NODE` is already mutably borrowed in this thread by the - /// // enclosing `lock_with_local`, the borrow is live for the full - /// // duration of this closure scope. - /// let mutex = Mutex::new(()); - /// mutex.lock_with_local_unchecked(&NODE, |_guard| ()); - /// }); - /// ``` - /// [`lock_with_local`]: Mutex::lock_with_local - #[inline] - pub unsafe fn lock_with_local_unchecked(&self, node: StaticNode, f: F) -> Ret - where - F: FnOnce(MutexGuard<'_, T, R>) -> Ret, - { - self.with_local_node_unchecked(node, |mutex, node| f(mutex.lock(node))) - } - - /// Guard cannot outlive the closure or else it would allow the guard drop - /// call to access the thread local node even though its exclusive borrow - /// has already expired at the end of the closure. - /// - /// ```compile_fail - /// use mcslock::raw::spins::Mutex; - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(1); - /// let guard = mutex.lock_with_local(&NODE, |guard| guard); - /// ``` - /// - /// ```compile_fail,E0521 - /// use std::thread; - /// use mcslock::raw::spins::Mutex; - /// mcslock::thread_local_node!(static NODE); - /// - /// let mutex = Mutex::new(1); - /// mutex.lock_with_local(&NODE, |guard| { - /// thread::spawn(move || { - /// let guard = guard; - /// }); - /// }); - /// ``` - #[cfg(not(tarpaulin_include))] - const fn __guard_cant_escape_closure() {} -} - -impl Mutex { - /// Runs `f` over a raw mutex and a thread local node as arguments. - /// - /// # Panics - /// - /// Will panic if the thread local node is already mutably borrowed. - /// - /// Panics if the key currently has its destructor running, and it **may** - /// panic if the destructor has previously been run for this thread. - #[track_caller] - fn with_local_node(&self, node: StaticNode, f: F) -> Ret - where - F: FnOnce(&Self, &mut MutexNode) -> Ret, - { - let caller = Location::caller(); - let panic = |_| panic_already_borrowed(caller); - let f = |mut node: RefMut<_>| f(self, &mut node); - node.key.with(|node| node.try_borrow_mut().map_or_else(panic, f)) - } - - /// Runs 'f' over the a raw mutex and thread local node as arguments without - /// checking if the node is currently mutably borrowed. - /// - /// # Safety - /// - /// Mutably borrowing a [`RefCell`] while references are still live is - /// undefined behaviour. Threfore, caller must guarantee that the thread - /// local node is not already in use for the current thread. A thread local - /// node is release to the current thread once the associated `with_local`'s - /// f closure runs out of scope. - unsafe fn with_local_node_unchecked(&self, node: StaticNode, f: F) -> Ret - where - F: FnOnce(&Self, &mut MutexNode) -> Ret, - { - // SAFETY: Caller guaranteed that no other references are live. - node.key.with(|node| f(self, unsafe { &mut *node.as_ptr() })) - } -} - -// A thread local node definition used for testing. -#[cfg(test)] -#[cfg(not(tarpaulin_include))] -thread_local_node!(static TEST_NODE); - -/// A Mutex wrapper type that calls `lock_with_local` and -/// `try_lock_with_local` when implementing testing traits. -#[cfg(test)] -struct MutexPanic(Mutex); - -#[cfg(test)] -impl crate::test::LockNew for MutexPanic { - type Target = T; - - fn new(value: Self::Target) -> Self - where - Self::Target: Sized, - { - Self(Mutex::new(value)) - } -} - -#[cfg(test)] -impl crate::test::LockWith for MutexPanic { - type Guard<'a> = MutexGuard<'a, Self::Target, R> - where - Self: 'a, - Self::Target: 'a; - - fn try_lock_with(&self, f: F) -> Ret - where - F: FnOnce(Option>) -> Ret, - { - self.0.try_lock_with_local(&TEST_NODE, f) - } - - fn lock_with(&self, f: F) -> Ret - where - F: FnOnce(MutexGuard<'_, T, R>) -> Ret, - { - self.0.lock_with_local(&TEST_NODE, f) - } - - fn is_locked(&self) -> bool { - self.0.is_locked() - } -} - -/// A Mutex wrapper type that calls `lock_with_local_unchecked` and -/// `try_lock_with_local_unchecked` when implementing testing traits. -#[cfg(test)] -struct MutexUnchecked(Mutex); - -#[cfg(test)] -impl crate::test::LockNew for MutexUnchecked { - type Target = T; - - fn new(value: Self::Target) -> Self - where - Self::Target: Sized, - { - Self(Mutex::new(value)) - } -} - -#[cfg(test)] -impl crate::test::LockWith for MutexUnchecked { - type Guard<'a> = MutexGuard<'a, Self::Target, R> - where - Self: 'a, - Self::Target: 'a; - - fn try_lock_with(&self, f: F) -> Ret - where - F: FnOnce(Option>) -> Ret, - { - // SAFETY: caller must guarantee that this thread local node is not - // already mutably borrowed for some other lock acquisition. - unsafe { self.0.try_lock_with_local_unchecked(&TEST_NODE, f) } - } - - fn lock_with(&self, f: F) -> Ret - where - F: FnOnce(MutexGuard<'_, T, R>) -> Ret, - { - // SAFETY: caller must guarantee that this thread local node is not - // already mutably borrowed for some other lock acquisition. - unsafe { self.0.lock_with_local_unchecked(&TEST_NODE, f) } - } - - fn is_locked(&self) -> bool { - self.0.is_locked() - } -} - -#[cfg(all(not(loom), test))] -mod test { - use crate::raw::MutexNode; - use crate::relax::Yield; - use crate::test::tests; - - type MutexPanic = super::MutexPanic; - type MutexUnchecked = super::MutexUnchecked; - - #[test] - fn ref_cell_node_drop_does_not_matter() { - use core::{cell::RefCell, mem}; - assert!(!mem::needs_drop::>()); - } - - #[test] - fn lots_and_lots() { - tests::lots_and_lots::>(); - } - - #[test] - fn lots_and_lots_unchecked() { - tests::lots_and_lots::>(); - } - - #[test] - fn smoke() { - tests::smoke::>(); - } - - #[test] - fn smoke_unchecked() { - tests::smoke::>(); - } - - #[test] - fn test_try_lock() { - tests::test_try_lock::>(); - } - - #[test] - fn test_try_lock_unchecked() { - tests::test_try_lock::>(); - } - - #[test] - #[should_panic = already_borrowed_error!()] - fn test_lock_arc_nested() { - tests::test_lock_arc_nested::, MutexPanic<_>>(); - } - - #[test] - #[should_panic = already_borrowed_error!()] - fn test_acquire_more_than_one_lock() { - tests::test_acquire_more_than_one_lock::>(); - } - - #[test] - fn test_lock_arc_access_in_unwind() { - tests::test_lock_arc_access_in_unwind::>(); - } - - #[test] - fn test_lock_arc_access_in_unwind_unchecked() { - tests::test_lock_arc_access_in_unwind::>(); - } - - #[test] - fn test_lock_unsized() { - tests::test_lock_unsized::>(); - } - - #[test] - fn test_lock_unsized_unchecked() { - tests::test_lock_unsized::>(); - } -} - -#[cfg(all(loom, test))] -mod model { - use crate::loom::models; - use crate::relax::Yield; - - type MutexPanic = super::MutexPanic; - type MutexUnchecked = super::MutexUnchecked; - - #[test] - fn try_lock_join() { - models::try_lock_join::>(); - } - - #[test] - fn try_lock_join_unchecked() { - models::try_lock_join::>(); - } - - #[test] - fn lock_join() { - models::lock_join::>(); - } - - #[test] - fn lock_join_unchecked() { - models::lock_join::>(); - } - - #[test] - fn mixed_lock_join() { - models::mixed_lock_join::>(); - } - - #[test] - fn mixed_lock_join_unchecked() { - models::mixed_lock_join::>(); - } -}