From 96c2788e98ff1b3e2229f2001d5e16e1a455ff26 Mon Sep 17 00:00:00 2001 From: "Cliff L. Biffle" Date: Wed, 1 May 2024 17:33:03 -0700 Subject: [PATCH] New list implementation. This replaces the five-year-old lilos::list module with a new implementation derived from my "lilist" sketch. Compared to the original, the new list has the following advantages: - Its implementation is shorter. - Its API is simpler. - It no longer requires two-phase init, and so things using it _also_ don't require two-phase init. - It can be tested under Miri to catch pointer abuses. - It's exposed as a separate crate, which means I can update it without having to rev the OS. --- Cargo.lock | 17 +- Cargo.toml | 2 + RELEASE-NOTES.mkdn | 42 +- build-all.sh | 2 +- clippy-all.sh | 2 +- list/Cargo.toml | 16 + list/README.mkdn | 23 + list/src/lib.rs | 891 +++++++++++++++++++++++++++++++++++++ msrv-all.sh | 2 +- os/Cargo.toml | 3 +- os/src/exec.rs | 32 +- os/src/lib.rs | 12 +- os/src/list.rs | 14 +- os/src/mutex.rs | 127 +++--- os/src/time.rs | 4 +- rwlock/Cargo.toml | 3 +- rwlock/src/lib.rs | 75 +--- semaphore/Cargo.toml | 3 +- semaphore/src/lib.rs | 110 +---- testsuite/Cargo.toml | 1 + testsuite/src/lib.rs | 1 - testsuite/src/list.rs | 89 ++-- testsuite/src/rwlock.rs | 16 +- testsuite/src/semaphore.rs | 32 +- 24 files changed, 1159 insertions(+), 360 deletions(-) create mode 100644 list/Cargo.toml create mode 100644 list/README.mkdn create mode 100644 list/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 99061e7..3e4791c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -164,11 +164,12 @@ dependencies = [ [[package]] name = "lilos" -version = "1.1.0" +version = "1.2.0" dependencies = [ "cfg-if", "cortex-m", "cortex-m-rt", + "lilos-list", "pin-project", ] @@ -262,19 +263,28 @@ dependencies = [ ] [[package]] -name = "lilos-rwlock" +name = "lilos-list" version = "0.1.0" +dependencies = [ + "pin-project", +] + +[[package]] +name = "lilos-rwlock" +version = "0.2.0" dependencies = [ "lilos", + "lilos-list", "pin-project", "scopeguard", ] [[package]] name = "lilos-semaphore" -version = "0.1.0" +version = "0.2.0" dependencies = [ "lilos", + "lilos-list", "pin-project", ] @@ -288,6 +298,7 @@ dependencies = [ "futures", "lilos", "lilos-handoff", + "lilos-list", "lilos-rwlock", "lilos-semaphore", "panic-semihosting", diff --git a/Cargo.toml b/Cargo.toml index e57c355..64b2e17 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] resolver = "2" members = [ + "list", "os", "handoff", "semaphore", @@ -23,6 +24,7 @@ rust-version = "1.69" [workspace.dependencies] # Internal lilos = { path = "os", version = "1.1.0", default-features = false } +lilos-list = { path = "list", version = "0.1.0" } lilos-testsuite = { path = "testsuite" } lilos-handoff = { path = "handoff" } lilos-semaphore = { path = "semaphore" } diff --git a/RELEASE-NOTES.mkdn b/RELEASE-NOTES.mkdn index f57ff63..657565d 100644 --- a/RELEASE-NOTES.mkdn +++ b/RELEASE-NOTES.mkdn @@ -1,14 +1,46 @@ # Release Notes -## Version 1.1.1 (in progress) +## Version 1.2.0 (in progress) + +### Core API changes + +- The original `list` module, and the `List` and `Node` types it contained, are + now deprecated. The new `lilos-list` crate provides a dramatically improved + version. In particular, this eliminates the need for the "two-phase init" + dance with pinned OS types that has haunted `lilos` from the early days. + - Uses of the old `List` type should use the new one from `lilos-list`. + - Instead of using macros to create a list, write: `pin!(List::new())`. + - The `Node` type is now private. Instead, the "contents" field that used to + be passed to `create_node!` is now a parameter to the `List::wait` function. + +- Two-phase init of `Mutex` is now deprecated. You can now just write + `pin!(Mutex::create(something))`. (It's `create` and not `new` because I'm + preserving `new` in deprecated form for backwards compatibility with 1.1.) The + mutex creation macros still work. + +- It is now possible to create a `Mutex` or `List` in a `const fn` context, + including in a static initializer. + +### Non-Core API + +- The `lilos-list` crate exists now. + +- `rwlock` 0.2 uses the new `lilos-list` internally, and no longer requires (or + supports) two-phase init. I've removed the creation macro. + +- `semaphore` 0.2 uses the new `lilos-list` internally, and no longer requires (or + supports) two-phase init. I've removed the creation macro here, too. ### Internal improvements -- All code using lists, particularly timer lists, should now be slightly - smaller. +- All internal use of lists has been rewritten to use `waitlist`. + +- All code using any kind of lists, particularly timer lists, should now be + slightly smaller. -- Removed an internal unused `Waker` implementation, reducing the base flash - requirement of the OS. +- Removed an internal mostly-unused `Waker` implementation, reducing the base + flash requirement of the OS for applications that use the new `waitlist` + module. - `spsc` now uses `get_unchecked` in a couple of places to avoid generating bounds checks. This is the first place in `lilos` where I've used unsafe code diff --git a/build-all.sh b/build-all.sh index 6431498..6eeb799 100755 --- a/build-all.sh +++ b/build-all.sh @@ -20,7 +20,7 @@ for k in ${!MODES[@]}; do popd > /dev/null done -DIRS="handoff semaphore rwlock testsuite/stm32f4 testsuite/stm32g0 testsuite/stm32f3 testsuite/lm3s6965 examples/*/*/" +DIRS="handoff semaphore rwlock list testsuite/stm32f4 testsuite/stm32g0 testsuite/stm32f3 testsuite/lm3s6965 examples/*/*/" for d in $DIRS; do if [[ $d == *memory.x ]]; then diff --git a/clippy-all.sh b/clippy-all.sh index 56bbcc5..12fe468 100755 --- a/clippy-all.sh +++ b/clippy-all.sh @@ -2,7 +2,7 @@ set -euo pipefail -DIRS="os handoff semaphore rwlock testsuite/stm32f4 testsuite/stm32g0 testsuite/stm32f3 examples/*/*/" +DIRS="os list handoff semaphore rwlock testsuite/stm32f4 testsuite/stm32g0 testsuite/stm32f3 examples/*/*/" for d in $DIRS; do echo "---- clipping in $d" diff --git a/list/Cargo.toml b/list/Cargo.toml new file mode 100644 index 0000000..ba01707 --- /dev/null +++ b/list/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "lilos-list" +version = "0.1.0" +authors = ["Cliff L. Biffle "] +description = "Allocation-free intrusive doubly-linked wait queues for lilos." +keywords = ["async", "embedded", "os"] +categories = ["embedded", "no-std"] +readme = "README.mkdn" + +edition.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +pin-project.workspace = true diff --git a/list/README.mkdn b/list/README.mkdn new file mode 100644 index 0000000..bf8f65e --- /dev/null +++ b/list/README.mkdn @@ -0,0 +1,23 @@ +# Allocation-free doubly-linked intrusive lists + +This is the list type used to implement timer lists and wait queues in +[`lilos`]. It takes an unusual approach to implementing a sound doubly-linked +intrusive list in Rust without allocation, which is otherwise quite difficult: +it presents a different API that's easier to make sound. + +This data structure can be built for any platform, and has tests that can run +both hosted and under Miri (to check for pointer abuses). + +See the rustdoc for more details. + +## Versioning + +It's not important for applications or custom synchronization primitives to use +_exactly_ the same version of `lilos-list` as `lilos` does internally. Having +multiple versions linked into a single binary will work fine. (However, it will +take somewhat less space in flash if you can arrange to use the same version.) + +`lilos-list` is versioned separately from the OS API and will likely go through +major versions faster than the rest of the OS. + +[`lilos`]: https://docs.rs/lilos/latest/lilos/ diff --git a/list/src/lib.rs b/list/src/lib.rs new file mode 100644 index 0000000..f974024 --- /dev/null +++ b/list/src/lib.rs @@ -0,0 +1,891 @@ +//! A no-allocation wait-list and associated utility code. +//! +//! This crates provides a doubly-linked intrusive list for tracking timers or +//! wait queues without using dynamic memory allocation. To achieve a safe Rust +//! API, the interface is a bit unusual. +//! +//! See the [`List`] type for details. +//! +//! This list is used internally by [`lilos`] but may also be useful for +//! applications or crates implementing new synchronization primitives. Because +//! `lilos`'s internal use of this list is not exposed, it's not important to +//! use exactly the same version, except to conserve flash space. +//! +//! [`lilos`]: https://docs.rs/lilos/latest/lilos/ + +// IMPLEMENTATION NOTE: for shorthand and to avoid repeating ourselves, the +// safety comments below refer to something called the Link Valid Invariant. +// This invariant is: +// +// Once a List has been pinned, all nodes reachable from it -- its direct head +// and tail pointers, plus nodes connected to the next/prev pointers of other +// nodes reachable transitively -- must also be pinned and valid. +// +// In addition, a node's list pointer must also be valid or set to `None`. +// +// The operations below are careful to maintain this so that the pointer +// dereferencing is sound. + +#![cfg_attr(not(test), no_std)] +#![warn( + elided_lifetimes_in_paths, + explicit_outlives_requirements, + missing_debug_implementations, + missing_docs, + semicolon_in_expressions_from_macros, + single_use_lifetimes, + trivial_casts, + trivial_numeric_casts, + unreachable_pub, + unsafe_op_in_unsafe_fn, + unused_qualifications +)] +#![warn(clippy::undocumented_unsafe_blocks)] + +use core::future::Future; +use core::{ + cell::Cell, + marker::PhantomPinned, + pin::Pin, + ptr::NonNull, + task::{Poll, Waker}, +}; + +use pin_project::{pin_project, pinned_drop}; + +/// A list that records concurrent processes waiting for events of some sort. +/// +/// This is a very specialized doubly-linked list, designed to implement timer +/// lists and wait queues without dynamic memory allocation, while also passing +/// soundness checks. +/// +/// The list only supports the following operations: +/// +/// - `wait`, which produces a `Future` that will register a new node on the +/// list and resolve when it has been triggered by `wake_*`. +/// +/// - `wake_head_if`, which looks at the head node of the list and triggers its +/// owning `Future` if it passes a predicate, +/// +/// - And a handful of other wake-related operations that can be expressed in +/// terms of `wake_head_if`: `wake_while`, `wake_all`, `wake_one`. +/// +/// In particular, there is no way for the owner of the list to get direct +/// access to any of the nodes in the list -- only act upon them by waking them. +/// The wake operation _does not_ return a reference to the node that is woken. +/// +/// This property is critical to ensuring the soundness of an otherwise tricky +/// data structure in Rust. +/// +/// +/// # Operation complexity +/// +/// This is a doubly-linked list and its operations scale as you'd expect for +/// that data structure: +/// +/// - Inserting in a sorted list takes time linear in the size of the list +/// (`O(n)`). +/// +/// - Inserting in an unsorted list takes constant time (`O(1)`). +/// +/// - Waking the head node takes constant time (`O(1)`). +/// +/// - Waking a series of nodes takes time linear in the length of that sequence +/// (`O(m)`). +/// +/// - Detaching an arbitrary node (by dropping its owning `Future`) takes +/// constant time (`O(1)`). +/// +/// +/// # The contents type `T` +/// +/// Each node in the list has associated with it a value of type `T`. `T` must +/// implement `PartialOrd`, because nodes are kept sorted according to the order +/// defined by `T`'s `PartialOrd` impl, from the smallest at the head to the +/// largest at the tail. +/// +/// Using a timestamp type allows the list to act as a timer queue, efficiently +/// waking all nodes waiting for a certain time or lower. +/// +/// By using `()` for `T`, you can turn the list into one that simply keeps +/// track of insertion order. This is useful for wait queues for things like +/// mutexes. +/// +/// In some niche cases, it can be useful to associate additional information +/// with a node, without having it considered for sorting purposes. You can do +/// this by using a type that contains information but _does not_ use that +/// information in its `PartialOrd` implementation. See [`Meta`] for a utility +/// type that wraps some data and stops it from being sorted, or +/// [`OrderAndMeta`] for a type that lets you have some data used for +/// sorting, and some data simply stored. +#[derive(Default)] +pub struct List { + /// Links to the head and tail of the list, if the list is non-empty, or + /// `None` if it's empty. + root: Cell>>, + _marker: PhantomPinned, +} + +impl List { + /// Creates a new empty `List`. + /// + /// The `List` needs to be pinned before use, so this call often appears as + /// `pin!(List::new())`. + pub const fn new() -> Self { + Self { + root: Cell::new(None), + _marker: PhantomPinned, + } + } + + /// Checks if the list is empty (i.e. contains zero nodes). + pub fn is_empty(&self) -> bool { + self.root.get().is_none() + } + + /// Starting at the head, wakes nodes with contents that that satisfy `pred` + /// (that is, where `pred(n.contents)` returns `true`) until it finds one + /// that doesn't. + /// + /// Returns `true` if any nodes were awoken, `false` if none were (either + /// because the list is empty, or because the first node failed the + /// predicate). + pub fn wake_while( + self: Pin<&Self>, + mut pred: impl FnMut(&T) -> bool, + ) -> bool { + let mut any = false; + loop { + if !self.wake_head_if(&mut pred) { + break; + } + any = true; + } + any + } + + /// Wakes all nodes in the list in order from head to tail. + /// + /// Returns `true` if any nodes were awoken, `false` if the list was empty. + pub fn wake_all(self: Pin<&Self>) -> bool { + self.wake_while(|_| true) + } + + /// Wakes the head of the list, but only if it satisfies `pred` (that is, if + /// `pred(head.contents)` returns `true`). + /// + /// Returns `true` if there was a head node and it passed, `false` if it + /// didn't pass or didn't exist. + pub fn wake_head_if( + self: Pin<&Self>, + pred: impl FnOnce(&T) -> bool, + ) -> bool { + let Some(root) = self.root.get() else { + return false; + }; + + let node_ptr = root.head; + // Safety: dereferencing and pinning the head pointer is safe per the + // Link Valid Invariant. + let node = unsafe { Pin::new_unchecked(&*node_ptr.as_ptr()) }; + + debug_assert_eq!(node.list.get(), Some(NonNull::from(&*self))); + + // This node is the head, it should not have a previous node. + debug_assert!(node.prev.get().is_none()); + + if !pred(&node.contents) { + return false; + } + + if node_ptr == root.tail { + // This node is the _only_ node in the list. + debug_assert_eq!(node.next.get(), None, + "list thinks node @{node_ptr:?} is tail, \ + node thinks it has a next"); + self.root.set(None); + } else { + // Unlink this node from its neighbor. + let Some(next_ptr) = node.next.take() else { + panic!() + }; + // Safety: dereferencing and pinning the neighbor pointer is safe + // per the Link Valid Invariant. + let next = unsafe { Pin::new_unchecked(next_ptr.as_ref()) }; + next.prev.set(None); + self.root.set(Some(Root { + head: next_ptr, + ..root + })); + } + + node.list.take(); + if let Some(waker) = node.waker.take() { + waker.wake(); + } else { + panic!(); + } + + true + } + + /// Wakes the head of the list unconditionally. + /// + /// Returns `true` if the list was non-empty, `false` if it was empty. + pub fn wake_one(self: Pin<&Self>) -> bool { + self.wake_head_if(|_| true) + } + + /// Returns a future that will create a node containing `contents` and + /// insert it into this list at the appropriate position, and then resolve + /// only when the node is awoken by a `wake` operation. + /// + /// This waiter will appear in the list _after_ all nodes `n` where + /// `n.contents <= contents`. If `T` is effectively unordered (such as `()`) + /// this means the waiter just gets tacked onto the end of the list. + /// + /// This operation does nothing until the future is polled. This is + /// important: we need proof that the returned future has been pinned before + /// it's safe to link it into the list, and polling the future provides that + /// proof. + /// + /// `contents` is consumed by this operation, because it's generally `Copy` + /// like a timestamp or a `()`. + /// + /// # Cancellation + /// + /// Dropping the future before it resolves loses its place in line. If there + /// are other nodes that are equal to `contents`, dropping and recreating + /// the future will move this waiter after those other nodes. + pub fn join( + self: Pin<&Self>, + contents: T, + ) -> impl Future + Captures<&'_ Self> + where + T: PartialOrd, + { + WaitForDetach { + list: Some(self), + node: Node { + prev: Cell::new(None), + next: Cell::new(None), + waker: Cell::new(None), + list: Cell::new(None), + contents, + _marker: PhantomPinned, + }, + } + } + + /// Returns a future that does the same thing as [`List::join`], but with + /// additional behavior if it's dropped in a specific time window. + /// + /// Specifically: if the future has inserted its node into the list, then + /// been awoken, and then gets dropped before being polled again. In that + /// very narrow circumstance, the future will execute `cleanup` on its way + /// toward oblivion. + /// + /// This is important for synchronization primitives like mutexes, where + /// ownership gets passed to the next waiter in the queue without + /// unlocking/relocking (since that could allow races). The cleanup action + /// gives a waiter on such a queue an opportunity to wake the _next_ node on + /// the list. + pub fn join_with_cleanup( + self: Pin<&Self>, + contents: T, + cleanup: impl FnOnce(), + ) -> impl Future + Captures> + where + T: PartialOrd, + { + let inner = WaitForDetach { + list: Some(self), + node: Node { + prev: Cell::new(None), + next: Cell::new(None), + waker: Cell::new(None), + list: Cell::new(None), + contents, + _marker: PhantomPinned, + }, + }; + WaitWithCleanup { + inner, + cleanup: Some(cleanup), + } + } +} + +/// Internal future produced by the `join` operation. +#[pin_project] +struct WaitForDetach<'list, T> { + /// A pointer to the list we're attempting to attach to. Initialized to + /// `Some` on creation, then changed to `None` when we attach successfully. + list: Option>>, + /// The actual node. + #[pin] + node: Node, +} + +impl Future for WaitForDetach<'_, T> { + type Output = (); + + fn poll( + self: Pin<&mut Self>, + cx: &mut core::task::Context<'_>, + ) -> Poll { + let p = self.project(); + let node = p.node.into_ref(); + let node_ptr = NonNull::from(&*node); + + if let Some(list) = p.list.take() { + // We haven't yet attached to a list. We need to complete attachment + // now because we've taken the list field. + if let Some(mut root) = list.root.get() { + // The list is not empty. + // + // Traverse from the tail to the head looking for the + // right insertion position. + let mut maybe_cand = Some(root.tail); + while let Some(cand_ptr) = maybe_cand { + // Safety: dereferencing and pinning the node pointer is + // safe per the Link Valid Invariant. + let candidate = + unsafe { Pin::new_unchecked(cand_ptr.as_ref()) }; + + if candidate.contents <= node.contents { + // This is the right place for it. `candidate` will + // become `node`'s new `prev`. + let old_next = candidate.next.replace(Some(node_ptr)); + if let Some(next_ptr) = old_next { + // Safety: dereferencing and pinning the node + // pointer is safe per the Link Valid Invariant. + let next = unsafe { + Pin::new_unchecked(next_ptr.as_ref()) + }; + next.prev.set(Some(node_ptr)); + } + node.next.set(old_next); + node.prev.set(Some(cand_ptr)); + if cand_ptr == root.tail { + root.tail = node_ptr; + list.root.set(Some(root)); + } + break; + } + + maybe_cand = candidate.prev.get(); + } + + if maybe_cand.is_none() { + // node is becoming the new head of the list! + // Safety: dereferencing and pinning the head pointer is + // safe per the Link Valid Invariant. + let old_head = + unsafe { Pin::new_unchecked(root.head.as_ref()) }; + old_head.prev.set(Some(node_ptr)); + node.next.set(Some(root.head)); + root.head = node_ptr; + list.root.set(Some(root)); + } + } else { + // The list is currently empty. This node will become the only + // node in the list. + list.root.set(Some(Root { + head: node_ptr, + tail: node_ptr, + })); + } + + // Update the node to reflect that it's in a list now. + node.list.set(Some(NonNull::from(&*list))); + node.waker.set(Some(cx.waker().clone())); + + Poll::Pending + } else { + // We've already attached + // See if we've detached. + if node.list.get().is_none() { + // The node is not attached to any list, so we can resolve. Note + // that this means the future is "fused" -- it will keep + // returning ready if repeatedly polled. + Poll::Ready(()) + } else { + // The node remains attached to the list. While unlikely, + // it's possible that the waker has changed. Update it. + node.waker.set(Some(cx.waker().clone())); + Poll::Pending + } + } + } +} + +/// Internal future returned by `join_with_cleanup`. +#[pin_project(PinnedDrop)] +struct WaitWithCleanup<'list, T, F: FnOnce()> { + #[pin] + inner: WaitForDetach<'list, T>, + cleanup: Option, +} + +#[pinned_drop] +impl PinnedDrop for WaitWithCleanup<'_, T, F> { + fn drop(self: Pin<&mut Self>) { + let p = self.project(); + let pi = p.inner.project(); + let node = pi.node.into_ref(); + + if pi.list.is_none() && node.list.get().is_none() { + // Process cleanup if it hasn't already been done. + if let Some(cleanup) = p.cleanup.take() { + cleanup(); + } + } + } +} + +impl Future for WaitWithCleanup<'_, T, F> { + type Output = (); + + fn poll( + self: Pin<&mut Self>, + cx: &mut core::task::Context<'_>, + ) -> Poll { + let p = self.project(); + if p.inner.poll(cx).is_ready() { + // The inner future has resolved -- we no longer need the cleanup + // action to hang around. + p.cleanup.take(); + Poll::Ready(()) + } else { + Poll::Pending + } + } +} + +impl core::fmt::Debug for List { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("List").field("root", &self.root).finish() + } +} + +#[cfg(debug_assertions)] +impl Drop for List { + fn drop(&mut self) { + debug_assert!(self.root.get().is_none()); + } +} + +struct Root { + head: NonNull>, + tail: NonNull>, +} + +impl Copy for Root {} +impl Clone for Root { + fn clone(&self) -> Self { + *self + } +} + +impl core::fmt::Debug for Root { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Root") + .field("head", &self.head) + .field("tail", &self.tail) + .finish() + } +} + +struct Node { + /// Previous node in the list, or `None` if this is the head. + /// + /// This field may be changed by neighboring nodes, and thus through a + /// shared reference, so it's in a `Cell`. + prev: Cell>>, + /// Next node in the list, or `None` if this is the tail. + /// + /// This field may be changed by neighboring nodes, and thus through a + /// shared reference, so it's in a `Cell`. + next: Cell>>, + /// Waker that should be poked if this node is detached from a list. This + /// does double-duty as the "attached" flag. + /// + /// The waker gets triggered by the list, rather than the node's owner, when + /// the node leaves the list. This means we act on it through a shared + /// reference. Because Waker is not Copy, and because Clone requires a + /// second virtual dispatch, this is in an `Option>` so we can `take` + /// it even through a shared reference. + waker: Cell>, + /// Data borne by the node to assist in ordering of newly inserted nodes. To + /// associate data that does not participate in ordering, use a type with a + /// partialeq impl that ignores part of its contents. + contents: T, + /// Pointer to the list this node is associated with. + /// + /// This field is filled in when the node joins a list, so it can be used as + /// an "attached" indicator: `Some` if attached, `None` if not. + list: Cell>>>, + + _marker: PhantomPinned, +} + +impl Drop for Node { + fn drop(&mut self) { + if let Some(list_ptr) = self.list.take() { + // We are still attached to the list. Detach. + // Safety: dereferencing and pinning our pointer to the list is safe + // per the Link Valid Invariant. + let list = unsafe { Pin::new_unchecked(list_ptr.as_ref()) }; + // Patch up previous node, if any. + if let Some(prev_ptr) = self.prev.get() { + // Safety: dereferencing and pinning our pointer to our neighbor + // is safe per the Link Valid Invariant. + let prev = unsafe { Pin::new_unchecked(prev_ptr.as_ref()) }; + prev.next.set(self.next.get()); + } + // Patch up next self, if any. + if let Some(next_ptr) = self.next.get() { + // Safety: dereferencing and pinning our pointer to our neighbor + // is safe per the Link Valid Invariant. + let next = unsafe { Pin::new_unchecked(next_ptr.as_ref()) }; + next.prev.set(self.prev.get()); + } + + match (self.prev.get(), self.next.get()) { + (None, None) => { + // This was the only node in the list. + list.root.set(None); + } + (Some(prev_ptr), None) => { + // This was the tail of the list. + list.root.set(Some(Root { + tail: prev_ptr, + ..list.root.get().unwrap() + })); + } + (None, Some(next_ptr)) => { + // This was the head of the list. + list.root.set(Some(Root { + head: next_ptr, + ..list.root.get().unwrap() + })); + } + (Some(_), Some(_)) => { + // boring middle of list + } + } + } + debug_assert_eq!(self.list.get(), None); + } +} + +/// Combines an ordering type `T` with an arbitrary metadata type `M` for use in +/// a [`List`]. +/// +/// A [`List`] keeps its contents ordered by a type, which must implement +/// `PartialOrd`. That's all well and good, but sometimes you need to store +/// additional metadata in the list node that should not affect ordering -- such +/// as requested access level in a read-write lock, for instance. +/// +/// `OrderAndMeta` combines some piece of data `T` that implements `PartialOrd`, +/// with another piece of data `M` that is ignored for comparisons, so that you +/// can have a list that stays sorted but also carries arbitrary additional +/// data. +/// +/// If you don't care about the sorting part, have a look at [`Meta`] instead. +#[derive(Copy, Clone, Debug)] +pub struct OrderAndMeta(pub T, pub M); + +impl PartialEq for OrderAndMeta { + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl PartialOrd for OrderAndMeta { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +/// A wrapper type for including some arbitrary metadata `M` in a [`List`]. +/// +/// A [`List`] keeps its contents ordered by a type, which must implement +/// `PartialOrd`. Many lists don't need special sorting, and just want to record +/// the order of insertion of the nodes. In those cases, it's common to use +/// `List<()>` to disable the sorting. +/// +/// But what if you want the insertion-only-ordering of `()` but also need to +/// include some data in each list node? `Meta` wraps the data of your choice +/// and _blocks access to its `PartialOrd` impl,_ if it even has one to begin +/// with. That is, all `Meta`s look the same to the list, even if the `M` +/// they contain are different. +/// +/// If you'd like to store metadata that isn't used for sorting, but _do_ want +/// to sort the list by some other data, see [`OrderAndMeta`]. +#[derive(Copy, Clone, Debug)] +pub struct Meta(pub M); + +impl PartialEq for Meta { + fn eq(&self, _other: &Self) -> bool { + true + } +} + +impl PartialOrd for Meta { + fn partial_cmp(&self, _other: &Self) -> Option { + Some(core::cmp::Ordering::Equal) + } +} + +/// Marker trait implementing the "Captures Trick" from Rust RFC 3498, ensuring +/// that we do lifetime capturing right in the 2021 edition. +/// +/// TODO: revisit this when we can switch to the 2024 edition, where the default +/// behavior makes this less necessary. +pub trait Captures {} + +impl Captures for U {} + +#[cfg(test)] +mod tests { + #![allow(clippy::bool_assert_comparison)] + #![allow(clippy::undocumented_unsafe_blocks)] + + use core::{ + cell::Cell, + future::Future, + mem::forget, + pin::pin, + sync::atomic::{AtomicUsize, Ordering}, + task::{Context, RawWaker, RawWakerVTable, Waker}, + }; + use std::sync::Arc; + + use crate::List; + + /// VTable for the spy waker. + static VTABLE: RawWakerVTable = RawWakerVTable::new( + // clone + |p| { + let arc = unsafe { Arc::from_raw(p as *const AtomicUsize) }; + let second_arc = Arc::clone(&arc); + forget(arc); + RawWaker::new(Arc::into_raw(second_arc) as *const (), &VTABLE) + }, + // wake + |p| { + let arc = unsafe { Arc::from_raw(p as *const AtomicUsize) }; + arc.fetch_add(1, Ordering::Relaxed); + // drop it to release our count. + }, + // wake_by_ref + |p| { + let arc = unsafe { Arc::from_raw(p as *const AtomicUsize) }; + arc.fetch_add(1, Ordering::Relaxed); + forget(arc); + }, + // drop + |p| { + let _arc = unsafe { Arc::from_raw(p as *const AtomicUsize) }; + }, + ); + + /// Produces a `Waker` and also a shared `AtomicUsize` that counts how many + /// times the waker is actually used. + fn spy_waker() -> (Arc, Waker) { + let count = Arc::new(AtomicUsize::new(0)); + let second_count = Arc::clone(&count); + (count, unsafe { + Waker::from_raw(RawWaker::new( + Arc::into_raw(second_count) as *const (), + &VTABLE, + )) + }) + } + + #[test] + fn test_create_drop() { + // It'd be real bad if this didn't work, eh? + List::<()>::new(); + } + + #[test] + fn test_single_node_wait_resume() { + let list = pin!(List::<()>::new()); + let list = list.as_ref(); + + let (wake_count, waker) = spy_waker(); + let mut ctx = Context::from_waker(&waker); + + let mut wait_fut = pin!(list.join(())); + + assert!(list.is_empty()); + + assert!(wait_fut.as_mut().poll(&mut ctx).is_pending()); + assert_eq!(wake_count.load(Ordering::Relaxed), 0); + + assert!(!list.is_empty()); + + assert!(wait_fut.as_mut().poll(&mut ctx).is_pending()); + assert!(wait_fut.as_mut().poll(&mut ctx).is_pending()); + + assert!(list.wake_head_if(|_| true)); + assert_eq!(wake_count.load(Ordering::Relaxed), 1); + assert!(list.is_empty()); + + assert!(wait_fut.poll(&mut ctx).is_ready()); + assert_eq!(wake_count.load(Ordering::Relaxed), 1); + } + + #[test] + fn test_single_node_drop_while_in_list() { + let list = pin!(List::<()>::new()); + let list = list.as_ref(); + + let (wake_count, waker) = spy_waker(); + let mut ctx = Context::from_waker(&waker); + + { + let mut wait_fut = pin!(list.join(())); + assert!(wait_fut.as_mut().poll(&mut ctx).is_pending()); + assert!(!list.is_empty()); + } + assert!(list.is_empty()); + assert_eq!(wake_count.load(Ordering::Relaxed), 0); + } + + #[test] + fn test_wake_while_insert_order() { + let list = pin!(List::new()); + let list = list.into_ref(); + + let (wake_count, waker) = spy_waker(); + let mut ctx = Context::from_waker(&waker); + + // Insert a series of nodes: + let mut fut_a = pin!(list.join(())); + assert!(fut_a.as_mut().poll(&mut ctx).is_pending()); + + let mut fut_b = pin!(list.join(())); + assert!(fut_b.as_mut().poll(&mut ctx).is_pending()); + + let mut fut_c = pin!(list.join(())); + assert!(fut_c.as_mut().poll(&mut ctx).is_pending()); + + let mut fut_d = pin!(list.join(())); + assert!(fut_d.as_mut().poll(&mut ctx).is_pending()); + + // (Ab)use wake_while to only wake up two of them. + let mut check_count = 0; + let changes = list.wake_while(|_n| { + check_count += 1; + check_count <= 2 + }); + assert!(changes); + assert_eq!(check_count, 3); + assert_eq!(wake_count.load(Ordering::Relaxed), 2); + + // We must have woken nodes A and B: + assert!(fut_a.as_mut().poll(&mut ctx).is_ready()); + assert!(fut_b.as_mut().poll(&mut ctx).is_ready()); + // ...and not nodes C and D: + assert!(fut_c.as_mut().poll(&mut ctx).is_pending()); + assert!(fut_d.as_mut().poll(&mut ctx).is_pending()); + } + + #[test] + fn test_insert_and_wait_cancel_behavior() { + let list = pin!(List::new()); + let list = list.into_ref(); + + // Let's check my assertion about cancel behavior, shall we? + let fut = list.join(()); + drop(fut); // never polled, never woken + + // And just for funsies + { + let (_, waker) = spy_waker(); + let mut ctx = Context::from_waker(&waker); + let fut = pin!(list.join(())); + let _ = fut.poll(&mut ctx); // polled exactly once but not woken + } + } + + #[test] + fn test_iawwc_no_fire_if_never_polled() { + let list = pin!(List::new()); + let list = list.into_ref(); + + let cleanup_called = Cell::new(false); + + let fut = list.join_with_cleanup((), || cleanup_called.set(true)); + assert!(!cleanup_called.get()); + drop(fut); + // Should not be called if the node was detached at drop. + assert!(!cleanup_called.get()); + } + + #[test] + fn test_iawwc_no_fire_if_polled_after_detach() { + let list = pin!(List::new()); + let list = list.into_ref(); + + let (_, waker) = spy_waker(); + let mut ctx = Context::from_waker(&waker); + + let cleanup_called = Cell::new(false); + + { + let mut fut = + pin!(list.join_with_cleanup((), || cleanup_called.set(true),)); + assert!(!cleanup_called.get()); + // Poll so that the node attaches itself. + let _ = fut.as_mut().poll(&mut ctx); + // Cause the node to become detached... + assert!(list.wake_one()); + // ...and make it aware of this. + let _ = fut.poll(&mut ctx); + // dropped + } + // Should not be called if the node was polled after detach. + assert!(!cleanup_called.get()); + } + + #[test] + fn test_iawwc_fire() { + let list = pin!(List::new()); + let list = list.into_ref(); + + let (_, waker) = spy_waker(); + let mut ctx = Context::from_waker(&waker); + + let cleanup_called = Cell::new(false); + + // Testing the cleanup behavior is slightly subtle: we need to activate + // the logic for the case where... + // - The node has successfully been inserted + // - And then detached + // - But hasn't been polled to discover this. + // + // Once the node has been pinned it becomes hard to drop explicitly, but + // we can do it with a scope: + { + let mut fut = + pin!(list.join_with_cleanup((), || cleanup_called.set(true),)); + + // Poll the insert future to cause the node to link up. + let _ = fut.as_mut().poll(&mut ctx); + assert_eq!(cleanup_called.get(), false); // not yet + + // Detach it from the list. + assert!(list.wake_one()); + + assert_eq!(cleanup_called.get(), false); // not yet + + // dropped without poll + } + assert_eq!(cleanup_called.get(), true); // now + } +} diff --git a/msrv-all.sh b/msrv-all.sh index c1b3cbd..d54cfdc 100755 --- a/msrv-all.sh +++ b/msrv-all.sh @@ -4,7 +4,7 @@ set -euo pipefail jq --version -DIRS="os handoff semaphore rwlock testsuite/stm32f4 testsuite/stm32g0 testsuite/stm32f3 examples/*/*/" +DIRS="os list handoff semaphore rwlock testsuite/stm32f4 testsuite/stm32g0 testsuite/stm32f3 examples/*/*/" for d in $DIRS; do pushd $d > /dev/null diff --git a/os/Cargo.toml b/os/Cargo.toml index 0bf920a..c62a7cd 100644 --- a/os/Cargo.toml +++ b/os/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lilos" -version = "1.1.0" +version = "1.2.0" authors = ["Cliff L. Biffle "] description = "A tiny embedded OS based around Futures and async." keywords = ["async", "embedded", "os"] @@ -26,6 +26,7 @@ systick = [] cfg-if.workspace = true cortex-m.workspace = true cortex-m-rt.workspace = true +lilos-list.workspace = true pin-project.workspace = true [lib] diff --git a/os/src/exec.rs b/os/src/exec.rs index 631601b..74a0673 100644 --- a/os/src/exec.rs +++ b/os/src/exec.rs @@ -150,9 +150,8 @@ cfg_if::cfg_if! { use core::sync::atomic::AtomicBool; use core::ptr::{addr_of, addr_of_mut}; - use crate::cheap_assert; - use crate::list::List; - use core::mem::{MaybeUninit, ManuallyDrop}; + use lilos_list::List; + use core::mem::MaybeUninit; use crate::time::TickTime; } } @@ -254,6 +253,10 @@ static NOOP_VTABLE: RawWakerVTable = RawWakerVTable::new( /// Returns a [`Waker`] that doesn't do anything and costs nothing to `clone`. /// This is useful as a placeholder before a *real* `Waker` becomes available. /// You probably don't need this unless you're building your own wake lists. +#[deprecated( + since = "1.2.0", + note = "was only ever used with List which is now deprecated", +)] pub fn noop_waker() -> Waker { // Safety: Waker::from_raw is unsafe because the Waker can do weird and // destructive stuff if either the pointer, or the vtable functions, don't @@ -524,26 +527,12 @@ pub unsafe fn run_tasks_with_preemption_and_idle( let timer_list = unsafe { &mut *addr_of_mut!(TIMER_LIST) }; + // Initialize the list node itself. - // - // Safety: we discharge the obligations of `new` here by continuing, - // below, to pin and finish initialization of the list. - timer_list.write(ManuallyDrop::into_inner(unsafe { - List::new() - })); - // Safety: we're not going to overwrite or drop this static list, so we - // have trivially fulfilled the pin requirements. - let mut timer_list = unsafe { - Pin::new_unchecked(timer_list.assume_init_mut()) - }; - // Safety: finish_init requires that we haven't done anything to the - // List since `new` except for pinning it, which is the case here. - unsafe { - List::finish_init(timer_list.as_mut()); - } + timer_list.write(List::new()); // The list is initialized; we can now produce _shared_ references to - // it. Our one and only Pin<&mut List> ends here. + // it. Because it won't move again, we can pin those references. } #[cfg(feature = "systick")] @@ -553,7 +542,8 @@ pub unsafe fn run_tasks_with_preemption_and_idle( #[cfg(feature = "systick")] { // Scan for any expired timers. - tl.wake_thru(TickTime::now()); + let now = TickTime::now(); + tl.wake_while(|&t| t <= now); } // Capture and reset wake bits, then process any 1s. diff --git a/os/src/lib.rs b/os/src/lib.rs index 691300d..3906496 100644 --- a/os/src/lib.rs +++ b/os/src/lib.rs @@ -148,20 +148,20 @@ /// fancy messages. This means failures must be diagnosed by file:line only, so, /// don't use this more than once on the same line. In exchange, this makes /// asserts significantly smaller in terms of text size. -#[cfg_attr(not(debug_assertions), allow(unused_macros))] +#[cfg(any(debug_assertions, feature = "systick"))] macro_rules! cheap_assert { ($x:expr) => { if !$x { panic!(); }; } } -#[allow(unused_imports)] -pub(crate) use cheap_assert; -#[macro_use] -pub mod list; pub mod exec; -pub mod util; pub mod atomic; +pub mod util; + +#[macro_use] +#[deprecated(since = "1.2.0", note = "please move to the lilos-list crate")] +pub mod list; #[cfg(feature = "systick")] pub mod time; diff --git a/os/src/list.rs b/os/src/list.rs index 645d006..c6c3831 100644 --- a/os/src/list.rs +++ b/os/src/list.rs @@ -239,9 +239,9 @@ impl WakerCell { /// /// - The `waker` is a `core::task::Waker`, an abstract reference to a task that /// wishes to be woken up at some point. You'll generally provide -/// [`noop_waker`] and the OS will replace it with an appropriate one when the -/// node is inserted into a list. (The `create_node!` macro will provide -/// `noop_waker` automatically if not overridden.) +/// [`noop_waker`][crate::exec::noop_waker] and the OS will replace it with an +/// appropriate one when the node is inserted into a list. (The `create_node!` +/// macro will provide `noop_waker` automatically if not overridden.) /// /// - The `contents` is some `T`, and is typically a timestamp. Inserting a node /// into a list requires that `T` be `PartialOrd`, and the list will be @@ -270,7 +270,7 @@ impl Node { /// If you need to attach metadata to the node, see [`Node::new_with_meta`]. /// /// Note that you probably don't need to use this directly. See - /// [`create_node!`] for a more convenient option. + /// [`create_node!`][crate::create_node] for a more convenient option. /// /// # Safety /// @@ -290,7 +290,8 @@ impl Node { /// metadata. If your metadata is `()`, please use [`Node::new`] instead. /// /// Note that you probably don't need to use this directly. See - /// [`create_node_with_meta!`] for a more convenient option. + /// [`create_node_with_meta!`][crate::create_node_with_meta] for a more + /// convenient option. /// /// # Safety /// @@ -483,6 +484,7 @@ impl List { Node::new_with_meta( T::default(), meta, + #[allow(deprecated)] crate::exec::noop_waker(), ) }; @@ -657,7 +659,6 @@ impl List { /// threshold` is now detached, and its waker has been called. /// /// - All `Node`s remaining in this list have `contents > threshold`. - #[deprecated = "this function's name is misleading, please use wake_thru instead"] pub fn wake_less_than(self: Pin<&Self>, threshold: T) { self.wake_thru(threshold) } @@ -977,6 +978,7 @@ macro_rules! create_node { } }; ($var:ident, $dl:expr) => { + #[allow(deprecated)] $crate::create_node!($var, $dl, $crate::exec::noop_waker()) }; diff --git a/os/src/mutex.rs b/os/src/mutex.rs index c012a37..bcf5e06 100644 --- a/os/src/mutex.rs +++ b/os/src/mutex.rs @@ -1,11 +1,21 @@ //! Fair mutex that must be pinned. //! -//! This implements a mutex (a kind of lock) guarding a value of type `T`. -//! Creating a `Mutex` by hand is somewhat involved (see [`Mutex::new`] for -//! details), so there's a convenience macro, -//! [`create_mutex!`][crate::create_mutex]. +//! This implements a mutex (a kind of lock) guarding a value of type `T`. This +//! mutex must be pinned before it can be used, so the process of creating a +//! mutex generally looks like this: +//! +//! ``` +//! let my_mutex = pin!(Mutex::create(contents)); +//! // drop mutability to share with other code: +//! let my_mutex = my_mutex.into_ref(); +//! ``` +//! +//! There's also a convenience macro, [`create_mutex!`][crate::create_mutex]. +//! +//! If you don't want to store a value inside the mutex, use a `Mutex<()>`, +//! though note that this is a weird use case that might be better served by a +//! semaphore. //! -//! If you don't want to store a value inside the mutex, use a `Mutex<()>`. //! //! # `lock` vs `lock_assuming_cancel_safe` //! @@ -63,22 +73,16 @@ //! to unlock the mutex. (An OS task may contain many processes.) This makes //! unlocking more expensive, but means that the unlock operation is *fair*, //! preventing starvation of contending tasks. -//! -//! However, in exchange for this property, mutexes must be pinned, which makes -//! using them slightly more awkward. See the macros -//! [`create_mutex!`][crate::create_mutex] and -//! [`create_static_mutex!`][crate::create_static_mutex] for convenient -//! shorthand (or as examples of how to do it yourself). use core::cell::UnsafeCell; use core::mem::ManuallyDrop; use core::pin::Pin; use core::sync::atomic::{AtomicUsize, Ordering}; +use lilos_list::List; use pin_project::pin_project; use crate::atomic::AtomicArithExt; -use crate::list::List; pub use crate::util::CancelSafe; /// Holds a `T` that can be accessed from multiple concurrent futures/tasks, @@ -105,35 +109,13 @@ pub struct Mutex { } impl Mutex { - /// Returns an initialized but invalid mutex. - /// - /// # Safety - /// - /// The result is not safe to use or drop yet. You must move it to its final - /// resting place, pin it, and call `finish_init`. - pub unsafe fn new(contents: T) -> ManuallyDrop { - // Safety: List::new is unsafe because it produces a value that cannot - // yet be dropped. We discharge this obligation by unwrapping it and - // moving it into a _new_ ManuallyDrop, kicking the can down the road. - let list = unsafe { List::new() }; - ManuallyDrop::new(Mutex { + /// Returns a mutex containing `contents`. The result must be pinned before + /// it's good for much. + pub const fn create(contents: T) -> Self { + Self { state: AtomicUsize::new(0), - waiters: ManuallyDrop::into_inner(list), + waiters: List::new(), value: UnsafeCell::new(contents), - }) - } - - /// Finishes initializing a mutex, discharging obligations from `new`. - /// - /// # Safety - /// - /// This is safe to call exactly once on the result of `new`, after it has - /// been moved to its final position and pinned. - pub unsafe fn finish_init(this: Pin<&mut Self>) { - // Safety: List::finish_init is safe if our _own_ safety contract is - // upheld. - unsafe { - List::finish_init(this.project().waiters); } } @@ -221,12 +203,9 @@ impl Mutex { return perm; } - // We'd like to put our name on the wait list, please. - create_node!(wait_node, ()); - let p = self.project_ref(); - p.waiters.insert_and_wait_with_cleanup( - wait_node.as_mut(), + p.waiters.join_with_cleanup( + (), || { // Safety: if we are evicted from the wait list, which is // the only time this cleanup routine is called, then we own @@ -265,6 +244,29 @@ impl Mutex { } } +#[deprecated(since = "1.2.0", note = "old-style initialization is complicated, see Mutex::create")] +impl Mutex { + /// Returns an initialized but invalid mutex. + /// + /// # Safety + /// + /// The result is not safe to use or drop yet. You must move it to its final + /// resting place, pin it, and call `finish_init`. + pub unsafe fn new(contents: T) -> ManuallyDrop { + ManuallyDrop::new(Self::create(contents)) + } + + /// Finishes initializing a mutex, discharging obligations from `new`. + /// + /// # Safety + /// + /// This is safe to call exactly once on the result of `new`, after it has + /// been moved to its final position and pinned. + pub unsafe fn finish_init(_this: Pin<&mut Self>) { + // This operation no longer does anything. + } +} + /// A token that grants the ability to run one closure against the data guarded /// by a [`Mutex`]. /// @@ -375,11 +377,9 @@ impl Mutex> { } // We'd like to put our name on the wait list, please. - create_node!(wait_node, ()); - let p = self.project_ref(); - p.waiters.insert_and_wait_with_cleanup( - wait_node.as_mut(), + p.waiters.join_with_cleanup( + (), || { // Safety: if we are evicted from the wait list, which is // the only time this cleanup routine is called, then we own @@ -418,19 +418,8 @@ impl Mutex> { #[macro_export] macro_rules! create_mutex { ($var:ident, $contents:expr) => { - let $var = $contents; - // Safety: we discharge the obligations of `new` by pinning and - // finishing the value, below, before it can be dropped. - let mut $var = core::pin::pin!(unsafe { - core::mem::ManuallyDrop::into_inner($crate::mutex::Mutex::new($var)) - }); - // Safety: the value has not been operated on since `new` except for - // being pinned, so this operation causes it to become valid and safe. - unsafe { - $crate::mutex::Mutex::finish_init($var.as_mut()); - } - // Drop mutability. - let $var = $var.as_ref(); + let $var = core::pin::pin!($crate::mutex::Mutex::create($contents)); + let $var = $var.into_ref(); }; } @@ -479,25 +468,17 @@ macro_rules! create_static_mutex { // (which we'll do in a sec) unsafe { __m.write( - ManuallyDrop::into_inner($crate::mutex::Mutex::new($contents)) + $crate::mutex::Mutex::create($contents) ); } // Safety: this is the only mutable reference to M that will ever exist // in the program, so we can pin it as long as we don't touch M again // below (which we do not). - let mut m: Pin<&'static mut _> = unsafe { - Pin::new_unchecked(__m.assume_init_mut()) + let m: Pin<&'static _> = unsafe { + Pin::new_unchecked(__m.assume_init_ref()) }; - - // Safety: the value has not been operated on since `new` except for - // being pinned, so this operation causes it to become valid and safe. - unsafe { - $crate::mutex::Mutex::finish_init(m.as_mut()); - } - - // Drop mutability and return value. - m.into_ref() + m }}; } diff --git a/os/src/time.rs b/os/src/time.rs index ffbcc67..964247c 100644 --- a/os/src/time.rs +++ b/os/src/time.rs @@ -282,11 +282,9 @@ pub async fn sleep_until(deadline: TickTime) { return; } - crate::create_node!(node, deadline); - // Insert our node into the pending timer list. If we get cancelled, the // node will detach itself as it's being dropped. - crate::exec::get_timer_list().insert_and_wait(node.as_mut()).await + crate::exec::get_timer_list().join(deadline).await } /// Sleeps until the system time has increased by `d`. diff --git a/rwlock/Cargo.toml b/rwlock/Cargo.toml index e51063d..e53ea8e 100644 --- a/rwlock/Cargo.toml +++ b/rwlock/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lilos-rwlock" -version = "0.1.0" +version = "0.2.0" authors = ["Cliff L. Biffle "] description = "A read-write / writer-preferred lock for use with lilos." keywords = ["async", "embedded", "os"] @@ -17,6 +17,7 @@ default-target = "thumbv7em-none-eabihf" [dependencies] lilos.workspace = true +lilos-list.workspace = true pin-project.workspace = true scopeguard.workspace = true diff --git a/rwlock/src/lib.rs b/rwlock/src/lib.rs index 5b9fd84..f6e0f05 100644 --- a/rwlock/src/lib.rs +++ b/rwlock/src/lib.rs @@ -36,12 +36,9 @@ )] use core::cell::{Cell, UnsafeCell}; -use core::mem::ManuallyDrop; use core::ops::{Deref, DerefMut}; use core::pin::Pin; -use lilos::create_node_with_meta; -use lilos::exec::noop_waker; -use lilos::list::List; +use lilos_list::{List, Meta}; use lilos::util::CancelSafe; use pin_project::pin_project; use scopeguard::ScopeGuard; @@ -231,31 +228,14 @@ impl RwLock { /// /// The result is not safe to use or drop yet. You must move it to its final /// resting place, pin it, and call [`RwLock::finish_init`]. - pub unsafe fn new(contents: T) -> ManuallyDrop { + pub const fn new(contents: T) -> Self { // The Access value chosen here doesn't matter. - let list = unsafe { List::new_with_meta(Access::Exclusive) }; - ManuallyDrop::new(Self { + Self { lock: LockImpl { readers: Cell::new(0), - waiters: ManuallyDrop::into_inner(list), + waiters: List::new(), }, contents: UnsafeCell::new(contents), - }) - } - - /// Finishes initializing a read-write lock, discharging obligations from - /// [`RwLock::new`]. - /// - /// You'll rarely use this function directly. For a more convenient way of - /// creating a `RwLock`, see [`create_rwlock!`]. - /// - /// # Safety - /// - /// This is safe to call exactly once on the result of `new`, after it has - /// been moved to its final position and pinned. - pub unsafe fn finish_init(this: Pin<&mut Self>) { - unsafe { - List::finish_init(this.project().lock.project().waiters); } } } @@ -265,7 +245,7 @@ impl RwLock { struct LockImpl { readers: Cell, #[pin] - waiters: List<(), Access>, + waiters: List>, } impl LockImpl { @@ -279,10 +259,9 @@ impl LockImpl { // primitives, we have no need to register a cleanup action here, // because simply getting evicted from the wait list doesn't grant us // any access we'd need to give up -- that's handled below. - create_node_with_meta!(node, (), Access::Shared, noop_waker()); self.project_ref() .waiters - .insert_and_wait_with_cleanup(node, || { + .join_with_cleanup(Meta(Access::Shared), || { // The release routine advances the reader count _for us_ so // that our access is assured even if we're not promptly polled. // This means we have to reverse that change if we're cancelled. @@ -321,8 +300,8 @@ impl LockImpl { // Wake any number of pending _shared_ users and record their // count, to keep them from getting scooped before they're // polled. - self.project_ref().waiters.wake_while(|n| { - if n.meta() == &Access::Shared { + self.project_ref().waiters.wake_while(|Meta(access)| { + if access == &Access::Shared { let r = self.readers.get(); // We do not want to overflow the reader count during this // wake-frenzy. @@ -360,11 +339,9 @@ impl LockImpl { self.process_exclusive_cancellation(); })); - create_node_with_meta!(node, (), Access::Exclusive, noop_waker()); - self.project_ref() .waiters - .insert_and_wait_with_cleanup(node, || { + .join_with_cleanup(Meta(Access::Exclusive), || { // Disarm the trap. ScopeGuard::into_inner(trap.take().unwrap()); // The release routine decrements the reader count _for us_ so @@ -417,15 +394,15 @@ impl LockImpl { // Wake a _single_ exclusive lock attempt if one exists at the head // of the queue. - if p.waiters.wake_one_if(|n| n.meta() == &Access::Exclusive) { + if p.waiters.wake_head_if(|Meta(access)| access == &Access::Exclusive) { // Record it, to keep it from getting scooped. self.readers.set(-1); } else { // Wake any number of pending _shared_ users and record their // count, to keep them from getting scooped before they're // polled. - p.waiters.wake_while(|n| { - if n.meta() == &Access::Shared { + p.waiters.wake_while(|Meta(access)| { + if access == &Access::Shared { let r = self.readers.get(); // We do not want to overflow the reader count during this // wake-frenzy. @@ -461,7 +438,7 @@ impl LockImpl { if self .project_ref() .waiters - .wake_one_if(|n| n.meta() == &Access::Exclusive) + .wake_head_if(|Meta(access)| access == &Access::Exclusive) { // Found one. Record its count to ensure that nobody scoops it // before it gets polled. @@ -474,7 +451,7 @@ impl LockImpl { if self .project_ref() .waiters - .wake_one_if(|n| n.meta() == &Access::Shared) + .wake_head_if(|Meta(access)| access == &Access::Shared) { // Set the count back to saturated. self.readers.set(isize::MAX); @@ -843,27 +820,7 @@ impl DerefMut for ExclusiveGuard<'_, T> { #[macro_export] macro_rules! create_rwlock { ($var:ident, $contents:expr) => { - // Safety: we discharge the obligations of `new` by pinning and - // finishing the value, below, before it can be dropped. - let mut $var = core::pin::pin!({ - // Evaluate $contents eagerly, before defining any locals, and - // outside of any unsafe block. This ensures that the caller will - // get a warning if they do something unsafe in the $contents - // expression, while also ensuring that any existing variable called - // __contents is available for use here. - let __contents = $contents; - unsafe { - core::mem::ManuallyDrop::into_inner($crate::RwLock::new( - __contents, - )) - } - }); - // Safety: the value has not been operated on since `new` except for - // being pinned, so this operation causes it to become valid and safe. - unsafe { - $crate::RwLock::finish_init($var.as_mut()); - } - // Drop mutability. - let $var = $var.as_ref(); + let $var = core::pin::pin!($crate::RwLock::new($contents)); + let $var = $var.into_ref(); }; } diff --git a/semaphore/Cargo.toml b/semaphore/Cargo.toml index 40b7a26..e39c8d6 100644 --- a/semaphore/Cargo.toml +++ b/semaphore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lilos-semaphore" -version = "0.1.0" +version = "0.2.0" authors = ["Cliff L. Biffle "] description = "A counting semaphore for use with lilos." keywords = ["async", "embedded", "realtime", "os"] @@ -16,6 +16,7 @@ rust-version.workspace = true default-target = "thumbv7em-none-eabihf" [dependencies] +lilos-list.workspace = true lilos.workspace = true pin-project.workspace = true diff --git a/semaphore/src/lib.rs b/semaphore/src/lib.rs index d7ecbe1..ebd6a27 100644 --- a/semaphore/src/lib.rs +++ b/semaphore/src/lib.rs @@ -20,12 +20,10 @@ unused_qualifications )] -use core::mem::ManuallyDrop; use core::pin::Pin; use core::sync::atomic::{AtomicUsize, Ordering}; +use lilos_list::List; use lilos::atomic::AtomicExt; -use lilos::create_node; -use lilos::list::List; use pin_project::pin_project; /// A [counting semaphore]. @@ -124,10 +122,9 @@ impl Semaphore { } // Add ourselves to the wait list... - create_node!(node, ()); self.project_ref() .waiters - .insert_and_wait_with_cleanup(node, || { + .join_with_cleanup((), || { // This is called when we've been detached from the wait // list, which means a permit was transferred to us, but // we haven't been polled -- and won't ever be polled, @@ -206,36 +203,11 @@ impl Semaphore { } } - /// Returns an initialized but invalid `Semaphore`. - /// - /// You'll rarely use this function directly. For a more convenient way of - /// creating a `Semaphore`, see [`create_semaphore!`]. - /// - /// # Safety - /// - /// The result is not safe to use or drop yet. You must move it to its final - /// resting place, pin it, and call [`Semaphore::finish_init`]. - pub unsafe fn new(permits: usize) -> ManuallyDrop { - let list = unsafe { List::new() }; - ManuallyDrop::new(Semaphore { + /// Returns an `Semaphore` initialized with `permits` permits. + pub const fn new(permits: usize) -> Self { + Semaphore { available: AtomicUsize::new(permits), - waiters: ManuallyDrop::into_inner(list), - }) - } - - /// Finishes initializing a semaphore, discharging obligations from - /// [`Semaphore::new`]. - /// - /// You'll rarely use this function directly. For a more convenient way of - /// creating a `Semaphore`, see [`create_semaphore!`]. - /// - /// # Safety - /// - /// This is safe to call exactly once on the result of `new`, after it has - /// been moved to its final position and pinned. - pub unsafe fn finish_init(this: Pin<&mut Self>) { - unsafe { - List::finish_init(this.project().waiters); + waiters: List::new(), } } } @@ -257,28 +229,8 @@ pub struct NoPermits; #[macro_export] macro_rules! create_semaphore { ($var:ident, $permits:expr) => { - // Safety: we discharge the obligations of `new` by pinning and - // finishing the value, below, before it can be dropped. - let mut $var = core::pin::pin!({ - // Evaluate $permits eagerly, before defining any locals, and - // outside of any unsafe block. This ensures that the caller will - // get a warning if they do something unsafe in the $permits - // expression, while also ensuring that any existing variable called - // __permits is available for use here. - let __permits = $permits; - unsafe { - core::mem::ManuallyDrop::into_inner($crate::Semaphore::new( - __permits, - )) - } - }); - // Safety: the value has not been operated on since `new` except for - // being pinned, so this operation causes it to become valid and safe. - unsafe { - $crate::Semaphore::finish_init($var.as_mut()); - } - // Drop mutability. - let $var = $var.as_ref(); + let $var = core::pin::pin!($crate::Semaphore::new($permits)); + let $var = $var.into_ref(); }; } @@ -367,25 +319,9 @@ impl ScopedSemaphore { /// /// The result is not safe to use or drop yet. You must move it to its final /// resting place, pin it, and call [`ScopedSemaphore::finish_init`]. - pub unsafe fn new(permits: usize) -> ManuallyDrop { - ManuallyDrop::new(Self { - inner: ManuallyDrop::into_inner(unsafe { Semaphore::new(permits) }), - }) - } - - /// Finishes initializing a semaphore, discharging obligations from - /// [`ScopedSemaphore::new`]. - /// - /// You'll rarely use this function directly. For a more convenient way of - /// creating a `ScopedSemaphore`, see [`create_scoped_semaphore!`]. - /// - /// # Safety - /// - /// This is safe to call exactly once on the result of `new`, after it has - /// been moved to its final position and pinned. - pub unsafe fn finish_init(this: Pin<&mut Self>) { - unsafe { - Semaphore::finish_init(this.project().inner); + pub const fn new(permits: usize) -> Self { + Self { + inner: Semaphore::new(permits), } } } @@ -419,27 +355,7 @@ impl Drop for Permit<'_> { #[macro_export] macro_rules! create_scoped_semaphore { ($var:ident, $permits:expr) => { - // Safety: we discharge the obligations of `new` by pinning and - // finishing the value, below, before it can be dropped. - let mut $var = core::pin::pin!({ - // Evaluate $permits eagerly, before defining any locals, and - // outside of any unsafe block. This ensures that the caller will - // get a warning if they do something unsafe in the $permits - // expression, while also ensuring that any existing variable called - // __permits is available for use here. - let __permits = $permits; - unsafe { - core::mem::ManuallyDrop::into_inner($crate::ScopedSemaphore::new( - __permits, - )) - } - }); - // Safety: the value has not been operated on since `new` except for - // being pinned, so this operation causes it to become valid and safe. - unsafe { - $crate::ScopedSemaphore::finish_init($var.as_mut()); - } - // Drop mutability. - let $var = $var.as_ref(); + let $var = core::pin::pin!($crate::ScopedSemaphore::new($permits)); + let $var = $var.into_ref(); }; } diff --git a/testsuite/Cargo.toml b/testsuite/Cargo.toml index 52f6b74..d6fb8e8 100644 --- a/testsuite/Cargo.toml +++ b/testsuite/Cargo.toml @@ -29,6 +29,7 @@ lilos-handoff = { workspace = true, optional = true } lilos-semaphore = { workspace = true, optional = true } lilos-rwlock = { workspace = true, optional = true } panic-semihosting.workspace = true +lilos-list.workspace = true [lib] test = false diff --git a/testsuite/src/lib.rs b/testsuite/src/lib.rs index 92b8f7f..4d84fbc 100644 --- a/testsuite/src/lib.rs +++ b/testsuite/src/lib.rs @@ -86,7 +86,6 @@ async fn task_coordinator() -> Infallible { test_with_deadline_blocking, test_notify, - list::test_node_basics, list::test_list_basics, list::test_insert_and_wait_not_eager, list::test_insert_and_wait_wake_one, diff --git a/testsuite/src/list.rs b/testsuite/src/list.rs index 2e58b7b..359e853 100644 --- a/testsuite/src/list.rs +++ b/testsuite/src/list.rs @@ -5,72 +5,46 @@ use core::pin::{Pin, pin}; use core::cell::Cell; -use lilos::{create_list, create_node}; -use lilos::list::{List, Node}; +use lilos_list::List; use crate::{poll_and_assert_not_ready, poll_and_assert_ready}; -pub async fn test_node_basics() { - create_node!(node, ()); - // Node type is what we expect? - let node: Pin<&mut Node<()>> = node; - - assert!(node.is_detached(), "node must begin as detached"); - - // Detach must be safe to call on an already-detached node (idempotent). - node.as_ref().detach(); - - assert!(node.is_detached(), "node must remain detached if not inserted"); - - // And, detached node drop -- mostly just checks to see if it inadvertently - // panics! -} - pub async fn test_list_basics() { - create_list!(list); - // List type is what we expect? - let list: Pin<&mut List<()>> = list; + let list: Pin<&mut List<()>> = pin!(List::new()); // Make sure these don't, like, assert on an empty list or anything - list.as_ref().wake_all(); - assert_eq!(list.as_ref().wake_one(), false, + list.as_ref().wake_while(|_| true); + assert_eq!(list.as_ref().wake_head_if(|_| true), false, "wake_one on an empty list must return false"); // And, detached list drop. } pub async fn test_insert_and_wait_not_eager() { - create_list!(list); - // Drop list mutability. TODO: should create_list even return a Pin<&mut>? + let list = pin!(List::new()); let list = list.into_ref(); - create_node!(node, ()); - // Insertion must not happen eagerly, it must wait for the insert future to // be pinned and polled. { - let _fut = list.insert_and_wait(node.as_mut()); + let _fut = list.join(()); // Should not be able to wake it! assert_eq!(list.wake_one(), false); } - - assert_eq!(node.is_detached(), true); } pub async fn test_insert_and_wait_wake_one() { - create_list!(list); + let list = pin!(List::new()); // Drop list mutability. TODO: should create_list even return a Pin<&mut>? let list = list.into_ref(); // Check that we can insert a node, A: - create_node!(node_a, ()); - let mut fut_a = pin!(list.insert_and_wait(node_a.as_mut())); + let mut fut_a = pin!(list.join(())); poll_and_assert_not_ready!(fut_a); // Also insert a second node, B: - create_node!(node_b, ()); - let mut fut_b = pin!(list.insert_and_wait(node_b.as_mut())); + let mut fut_b = pin!(list.join(())); // We should be able to wake a node. assert!(list.wake_one()); @@ -81,25 +55,21 @@ pub async fn test_insert_and_wait_wake_one() { } pub async fn test_wake_while_insert_order() { - create_list!(list); + let list = pin!(List::new()); // Drop list mutability. TODO: should create_list even return a Pin<&mut>? let list = list.into_ref(); // Insert a series of nodes: - create_node!(node_a, ()); - let mut fut_a = pin!(list.insert_and_wait(node_a.as_mut())); + let mut fut_a = pin!(list.join(())); poll_and_assert_not_ready!(fut_a); - create_node!(node_b, ()); - let mut fut_b = pin!(list.insert_and_wait(node_b.as_mut())); + let mut fut_b = pin!(list.join(())); poll_and_assert_not_ready!(fut_b); - create_node!(node_c, ()); - let mut fut_c = pin!(list.insert_and_wait(node_c.as_mut())); + let mut fut_c = pin!(list.join(())); poll_and_assert_not_ready!(fut_c); - create_node!(node_d, ()); - let mut fut_d = pin!(list.insert_and_wait(node_d.as_mut())); + let mut fut_d = pin!(list.join(())); poll_and_assert_not_ready!(fut_d); // (Ab)use wake_while to only wake up two of them. @@ -120,35 +90,30 @@ pub async fn test_wake_while_insert_order() { } pub async fn test_insert_and_wait_cancel_behavior() { - create_list!(list); + let list = pin!(List::new()); // Drop list mutability. TODO: should create_list even return a Pin<&mut>? let list = list.into_ref(); - create_node!(node, ()); - // Let's check my assertion about cancel behavior, shall we? - let fut = list.insert_and_wait(node.as_mut()); + let fut = list.join(()); drop(fut); // never polled, never woken - assert!(node.is_detached()); // And just for funsies { - let fut = pin!(list.insert_and_wait(node.as_mut())); + let fut = pin!(list.join(())); let _ = futures::poll!(fut); // polled exactly once but not woken } - assert!(node.is_detached()); // still works? } pub async fn test_iawwc_no_fire_if_never_polled() { - create_list!(list); + let list = pin!(List::new()); // Drop list mutability. TODO: should create_list even return a Pin<&mut>? let list = list.into_ref(); - create_node!(node, ()); let cleanup_called = Cell::new(false); - let fut = list.insert_and_wait_with_cleanup( - node.as_mut(), + let fut = list.join_with_cleanup( + (), || cleanup_called.set(true), ); assert!(!cleanup_called.get()); @@ -158,16 +123,15 @@ pub async fn test_iawwc_no_fire_if_never_polled() { } pub async fn test_iawwc_no_fire_if_polled_after_detach() { - create_list!(list); + let list = pin!(List::new()); // Drop list mutability. TODO: should create_list even return a Pin<&mut>? let list = list.into_ref(); - create_node!(node, ()); let cleanup_called = Cell::new(false); { - let mut fut = pin!(list.insert_and_wait_with_cleanup( - node.as_mut(), + let mut fut = pin!(list.join_with_cleanup( + (), || cleanup_called.set(true), )); assert!(!cleanup_called.get()); @@ -184,11 +148,10 @@ pub async fn test_iawwc_no_fire_if_polled_after_detach() { } pub async fn test_iawwc_fire() { - create_list!(list); + let list = pin!(List::new()); // Drop list mutability. TODO: should create_list even return a Pin<&mut>? let list = list.into_ref(); - create_node!(node, ()); let cleanup_called = Cell::new(false); // Testing the cleanup behavior is slightly subtle: we need to activate the @@ -200,8 +163,8 @@ pub async fn test_iawwc_fire() { // Once the node has been pinned it becomes hard to drop explicitly, but we // can do it with a scope: { - let mut fut = pin!(list.insert_and_wait_with_cleanup( - node.as_mut(), + let mut fut = pin!(list.join_with_cleanup( + (), || cleanup_called.set(true), )); // Poll the insert future to cause the node to link up. diff --git a/testsuite/src/rwlock.rs b/testsuite/src/rwlock.rs index bcb37c5..598e56c 100644 --- a/testsuite/src/rwlock.rs +++ b/testsuite/src/rwlock.rs @@ -1,15 +1,16 @@ use core::pin::pin; -use lilos_rwlock::create_rwlock; +use lilos_rwlock::RwLock; use crate::{poll_and_assert_not_ready, poll_and_assert_ready}; pub async fn test_create_drop() { - create_rwlock!(_a_lock, 0u32); + let _a_lock = pin!(RwLock::new(0u32)); } pub async fn test_uncontended_try_lock_shared() { - create_rwlock!(a_lock, 0u32); + let a_lock = pin!(RwLock::new(0u32)); + let a_lock = a_lock.into_ref(); let guard1 = a_lock.try_lock_shared() .expect("uncontended lock should succeed"); assert!(a_lock.try_lock_exclusive().is_err()); @@ -24,7 +25,8 @@ pub async fn test_uncontended_try_lock_shared() { } pub async fn test_uncontended_try_lock_exclusive() { - create_rwlock!(a_lock, 0u32); + let a_lock = pin!(RwLock::new(0u32)); + let a_lock = a_lock.into_ref(); let guard1 = a_lock.try_lock_exclusive() .expect("uncontended lock-exclusive should succeed"); @@ -39,7 +41,8 @@ pub async fn test_uncontended_try_lock_exclusive() { } pub async fn test_blocking() { - create_rwlock!(a_lock, 0u32); + let a_lock = pin!(RwLock::new(0u32)); + let a_lock = a_lock.into_ref(); let guard1 = a_lock.lock_shared().await; let guard2 = a_lock.lock_shared().await; @@ -90,7 +93,8 @@ pub async fn test_blocking() { /// shared claims are always coalesced even if they were once separated by an /// exclusive claim, since-cancelled. pub async fn test_fairness() { - create_rwlock!(a_lock, 0u32); + let a_lock = pin!(RwLock::new(0u32)); + let a_lock = a_lock.into_ref(); // E1 S1 S2 E2 S3 S4 E3 S5 // ^--- will be removed diff --git a/testsuite/src/semaphore.rs b/testsuite/src/semaphore.rs index 337f1fd..c61ba38 100644 --- a/testsuite/src/semaphore.rs +++ b/testsuite/src/semaphore.rs @@ -1,26 +1,30 @@ use core::pin::pin; -use lilos_semaphore::{create_semaphore, create_scoped_semaphore}; +use lilos_semaphore::{Semaphore, ScopedSemaphore}; pub async fn test_create_drop() { - create_semaphore!(_a_semaphore, 10); + let _a_semaphore = pin!(Semaphore::new(10)); + let _a_semaphore = _a_semaphore.into_ref(); } pub async fn test_acquire() { - create_semaphore!(a_semaphore, 10); + let a_semaphore = pin!(Semaphore::new(10)); + let a_semaphore = a_semaphore.into_ref(); a_semaphore.acquire().await; assert_eq!(a_semaphore.permits_available(), 9); } pub async fn test_release() { - create_semaphore!(a_semaphore, 10); + let a_semaphore = pin!(Semaphore::new(10)); + let a_semaphore = a_semaphore.into_ref(); a_semaphore.release(); assert_eq!(a_semaphore.permits_available(), 11); } pub async fn test_exhaustion() { // Start out with a permit. - create_semaphore!(a_semaphore, 1); + let a_semaphore = pin!(Semaphore::new(1)); + let a_semaphore = a_semaphore.into_ref(); // Take it. a_semaphore.acquire().await; @@ -39,7 +43,8 @@ pub async fn test_exhaustion() { pub async fn test_fairness() { // Start out with a permit. - create_semaphore!(a_semaphore, 1); + let a_semaphore = pin!(Semaphore::new(1)); + let a_semaphore = a_semaphore.into_ref(); // Take it. a_semaphore.acquire().await; @@ -85,7 +90,8 @@ pub async fn test_fairness() { pub async fn test_cancellation() { // Start out with a permit. - create_semaphore!(a_semaphore, 1); + let a_semaphore = pin!(Semaphore::new(1)); + let a_semaphore = a_semaphore.into_ref(); // Take it. a_semaphore.acquire().await; @@ -117,24 +123,28 @@ pub async fn test_cancellation() { // which should Just Work. So, just gonna do basics: pub async fn test_scoped_create_drop() { - create_scoped_semaphore!(_a_semaphore, 10); + let _a_semaphore = pin!(ScopedSemaphore::new(10)); + let _a_semaphore = _a_semaphore.into_ref(); } pub async fn test_scoped_acquire() { - create_scoped_semaphore!(a_semaphore, 10); + let a_semaphore = pin!(ScopedSemaphore::new(10)); + let a_semaphore = a_semaphore.into_ref(); let _permit = a_semaphore.acquire().await; assert_eq!(a_semaphore.permits_available(), 9); } pub async fn test_scoped_release() { - create_scoped_semaphore!(a_semaphore, 10); + let a_semaphore = pin!(ScopedSemaphore::new(10)); + let a_semaphore = a_semaphore.into_ref(); a_semaphore.out_of_band_release(1); assert_eq!(a_semaphore.permits_available(), 11); } pub async fn test_scoped_exhaustion() { // Start out with a permit. - create_scoped_semaphore!(a_semaphore, 1); + let a_semaphore = pin!(ScopedSemaphore::new(1)); + let a_semaphore = a_semaphore.into_ref(); // Take it. let _permit = a_semaphore.acquire().await;