diff --git a/examples/tests/task/context_switch/fp_registers.rs b/examples/tests/task/context_switch/fp_registers.rs index a684328..fd2195b 100644 --- a/examples/tests/task/context_switch/fp_registers.rs +++ b/examples/tests/task/context_switch/fp_registers.rs @@ -13,10 +13,10 @@ use hopter::{ task::main, }; -/// Whether the verifier task is running. -static TEST_STARTED: AtomicBool = AtomicBool::new(false); +/// Whether the clobbering task should run. +static RUN_CLOBBER: AtomicBool = AtomicBool::new(false); -/// Whether the cloberring task has executed. +/// Whether the cloberring task has run. static CLOBBERED: AtomicBool = AtomicBool::new(false); static mut KNOWN_VALUE: [f32; 32] = [ @@ -48,44 +48,75 @@ fn main(_: cortex_m::Peripherals) { extern "C" fn verify_registers() -> ! { unsafe { asm!( - // Set `TEST_STARTED` to true. - "ldr r0, ={test_started}", + // Set `RUN_CLOBBER` to true. + "0:", + "ldr r0, ={run_clobber}", "mov r1, #1", "strb r1, [r0]", - "0:", // Set register `s0-s15` to known values. "ldr r0, ={known_value}", "vldmia r0, {{s0-s15}}", // Trigger context switch. - "svc #1", + "mov r0, #0xe0", + "msr basepri, r0", + "mov r1, #0x10000000", + "movw r0, #0xed04", + "movt r0, #0xe000", + "str r1, [r0]", + "mov r0, #0", + "msr basepri, r0", + // See if the clobbering task has run. + "ldr r0, ={clobber}", + "ldrb r0, [r0]", + // If the clobbering task has not run yet, we loop back and do + // everything another time. + "cmp r0, #0", + "beq 0b", // Examine the values of registers `s0-s15`. They should remain the // same as before the context switch. "ldr r0, ={known_value}", "vldmia r0, {{s16-s31}}", "bl {compare_fp_regs}", + "1:", + // Set `CLOBERRED` to false. + "ldr r0, ={cloberred}", + "mov r1, #1", + "strb r1, [r0]", + // Set `RUN_CLOBBER` to true. + "ldr r0, ={run_clobber}", + "mov r1, #1", + "strb r1, [r0]", // Set register `s16-s31` to known values. "ldr r0, ={known_value} + 64", "vldmia r0, {{s16-s31}}", // Trigger context switch. - "svc #1", - // Examine the values of registers `s16-s31`. They should remain the - // same as before the context switch. - "ldr r0, ={known_value} + 64", - "vldmia r0, {{s0-s15}}", - "bl {compare_fp_regs}", + "mov r0, #0xe0", + "msr basepri, r0", + "mov r1, #0x10000000", + "movw r0, #0xed04", + "movt r0, #0xe000", + "str r1, [r0]", + "mov r0, #0", + "msr basepri, r0", // See if the clobbering task has run. "ldr r0, ={clobber}", "ldrb r0, [r0]", // If the clobbering task has not run yet, we loop back and do // everything another time. "cmp r0, #0", - "beq 0b", + "beq 1b", + // Examine the values of registers `s16-s31`. They should remain the + // same as before the context switch. + "ldr r0, ={known_value} + 64", + "vldmia r0, {{s0-s15}}", + "bl {compare_fp_regs}", // If the clobbering task has run, then we have verified that the // registers in this task's context were not affected. Declare // success. "b {success}", - test_started = sym TEST_STARTED, + run_clobber = sym RUN_CLOBBER, known_value = sym KNOWN_VALUE, + cloberred = sym CLOBBERED, compare_fp_regs = sym compare_fp_regs, clobber = sym clobber_all_fp_regs, success = sym success, @@ -100,15 +131,22 @@ extern "C" fn verify_registers() -> ! { extern "C" fn clobber_all_fp_regs() -> ! { unsafe { asm!( - "ldr r0, ={test_started}", "0:", - // Load the current value of `TEST_STARTED`. + "ldr r0, ={run_clobber}", + // Load the current value of `RUN_CLOBBER`. "ldrb r1, [r0]", "cmp r1, #0", // Goto cloberring the register if has started. "bne 1f", // Otherwise, perform a context switch and try again. - "svc #1", + "mov r0, #0xe0", + "msr basepri, r0", + "mov r1, #0x10000000", + "movw r0, #0xed04", + "movt r0, #0xe000", + "str r1, [r0]", + "mov r0, #0", + "msr basepri, r0", "b 0b", // The verify task is running now. Clobber all registers. This // should not affect the registers in the verify task's context. @@ -120,12 +158,22 @@ extern "C" fn clobber_all_fp_regs() -> ! { // Clobber registers. "ldr r0, ={clobbered_value}", "vldmia r0, {{s0-s31}}", + // Set `RUN_CLOBBER` to false. + "ldr r0, ={run_clobber}", + "mov r1, #1", + "strb r1, [r0]", // Perform context switch so that the verifier task can perform // the check. - "2:", - "svc #1", - "b 2b", - test_started = sym TEST_STARTED, + "mov r0, #0xe0", + "msr basepri, r0", + "mov r1, #0x10000000", + "movw r0, #0xed04", + "movt r0, #0xe000", + "str r1, [r0]", + "mov r0, #0", + "msr basepri, r0", + "b 0b", + run_clobber = sym RUN_CLOBBER, clobbered_value = sym CLOBBERED_VALUE, cloberred = sym CLOBBERED, options(noreturn) diff --git a/examples/tests/task/context_switch/gp_registers.rs b/examples/tests/task/context_switch/gp_registers.rs index 9474c72..49042fc 100644 --- a/examples/tests/task/context_switch/gp_registers.rs +++ b/examples/tests/task/context_switch/gp_registers.rs @@ -13,10 +13,10 @@ use hopter::{ task::main, }; -/// Whether the verifier task is running. -static TEST_STARTED: AtomicBool = AtomicBool::new(false); +/// Whether the clobbering task should run. +static RUN_CLOBBER: AtomicBool = AtomicBool::new(false); -/// Whether the cloberring task has executed. +/// Whether the cloberring task has run. static CLOBBERED: AtomicBool = AtomicBool::new(false); #[main] @@ -41,15 +41,12 @@ fn main(_: cortex_m::Peripherals) { extern "C" fn verify_registers() -> ! { unsafe { asm!( - // Set `TEST_STARTED` to true. - "ldr r0, ={test_started}", + // Set `RUN_CLOBBER` to true. + "0:", + "ldr r0, ={run_clobber}", "mov r1, #1", "strb r1, [r0]", - "0:", - // Preserve the current stack pointer value in the stack. - "mov r0, sp", - "push {{r0}}", - // Write some known values to the registers. + // Write some known values to the low registers. "mov r0, #1", "mov r1, #2", "mov r2, #3", @@ -58,16 +55,24 @@ extern "C" fn verify_registers() -> ! { "mov r5, #6", "mov r6, #7", "mov r7, #8", - "mov r8, #9", - "mov r9, #10", - "mov r10, #11", - "mov r11, #12", - "mov r12, #13", - "mov lr, #14", // Trigger context switch. - "svc #1", - // Examine the values of registers. They should remain the same as - // before the context switch. + "mov r8, #0xe0", + "msr basepri, r8", + "mov r9, #0x10000000", + "movw r8, #0xed04", + "movt r8, #0xe000", + "str r9, [r8]", + "mov r8, #0", + "msr basepri, r8", + // See if the clobbering task has run. + "ldr r8, ={clobber}", + "ldrb r8, [r8]", + // If the clobbering task has not run yet, we loop back and do + // everything another time. + "cmp r8, #0", + "beq 0b", + // Examine the values of low registers. They should remain the same + // as before the context switch. "cmp r0, #1", "bne {error}", "cmp r1, #2", @@ -84,6 +89,43 @@ extern "C" fn verify_registers() -> ! { "bne {error}", "cmp r7, #8", "bne {error}", + "1:", + // Set `CLOBERRED` to false. + "ldr r0, ={cloberred}", + "mov r1, #1", + "strb r1, [r0]", + // Set `RUN_CLOBBER` to true. + "ldr r0, ={run_clobber}", + "mov r1, #1", + "strb r1, [r0]", + // Preserve the current stack pointer value in the stack. + "mov r0, sp", + "push {{r0}}", + // Write some known values to the high registers. + "mov r8, #9", + "mov r9, #10", + "mov r10, #11", + "mov r11, #12", + "mov r12, #13", + "mov lr, #14", + // Trigger context switch. + "mov r0, #0xe0", + "msr basepri, r0", + "mov r1, #0x10000000", + "movw r0, #0xed04", + "movt r0, #0xe000", + "str r1, [r0]", + "mov r0, #0", + "msr basepri, r0", + // See if the clobbering task has run. + "ldr r0, ={clobber}", + "ldrb r0, [r0]", + // If the clobbering task has not run yet, we loop back and do + // everything another time. + "cmp r0, #0", + "beq 1b", + // Examine the values of high registers. They should remain the same + // as before the context switch. "cmp r8, #9", "bne {error}", "cmp r9, #10", @@ -101,18 +143,12 @@ extern "C" fn verify_registers() -> ! { "pop {{r0}}", "cmp r0, sp", "bne {error}", - // See if the clobbering task has run. - "ldr r0, ={clobber}", - "ldrb r0, [r0]", - // If the clobbering task has not run yet, we loop back and do - // everything another time. - "cmp r0, #0", - "beq 0b", // If the clobbering task has run, then we have verified that the // registers in this task's context were not affected. Declare // success. "b {success}", - test_started = sym TEST_STARTED, + run_clobber = sym RUN_CLOBBER, + cloberred = sym CLOBBERED, clobber = sym clobber_all_gp_regs, error = sym error, success = sym success, @@ -127,15 +163,22 @@ extern "C" fn verify_registers() -> ! { extern "C" fn clobber_all_gp_regs() -> ! { unsafe { asm!( - "ldr r0, ={test_started}", "0:", - // Load the current value of `TEST_STARTED`. + "ldr r0, ={run_clobber}", + // Load the current value of `RUN_CLOBBER`. "ldrb r1, [r0]", "cmp r1, #0", // Goto cloberring the register if has started. "bne 1f", // Otherwise, perform a context switch and try again. - "svc #1", + "mov r0, #0xe0", + "msr basepri, r0", + "mov r1, #0x10000000", + "movw r0, #0xed04", + "movt r0, #0xe000", + "str r1, [r0]", + "mov r0, #0", + "msr basepri, r0", "b 0b", // The verify task is running now. Clobber all registers. This // should not affect the registers in the verify task's context. @@ -159,12 +202,22 @@ extern "C" fn clobber_all_gp_regs() -> ! { "mov r11, #0xffffffff", "mov r12, #0xffffffff", "mov lr, #0xffffffff", + // Set `RUN_CLOBBER` to false. + "ldr r0, ={run_clobber}", + "mov r1, #1", + "strb r1, [r0]", // Perform context switch so that the verifier task can perform // the check. - "2:", - "svc #1", - "b 2b", - test_started = sym TEST_STARTED, + "mov r0, #0xe0", + "msr basepri, r0", + "mov r1, #0x10000000", + "movw r0, #0xed04", + "movt r0, #0xe000", + "str r1, [r0]", + "mov r0, #0", + "msr basepri, r0", + "b 0b", + run_clobber = sym RUN_CLOBBER, cloberred = sym CLOBBERED, options(noreturn) ) diff --git a/src/interrupt/context_switch.rs b/src/interrupt/context_switch.rs index f4e4018..9cd8131 100644 --- a/src/interrupt/context_switch.rs +++ b/src/interrupt/context_switch.rs @@ -112,3 +112,18 @@ extern "C" fn pendsv_handler(ex_ret_lr: u32) { // chosen task to run. Scheduler::pick_next(); } + +/// Invoke the scheduler to choose a new task to run. +/// +/// A task may voluntarily yield the CPU or it may be forced to yield, e.g., +/// when becoming blocked on a synchronization primitive. +/// +/// This function should be called in a task's context and never in an ISR's +/// context. +pub(crate) fn yield_current_task() { + unsafe { + cortex_m::register::basepri::write(config::PENDSV_PRIORITY); + cortex_m::peripheral::SCB::set_pendsv(); + cortex_m::register::basepri::write(config::IRQ_ENABLE_BASEPRI_PRIORITY); + } +} diff --git a/src/interrupt/svc.rs b/src/interrupt/svc.rs index 12621b5..9b6c912 100644 --- a/src/interrupt/svc.rs +++ b/src/interrupt/svc.rs @@ -41,21 +41,6 @@ pub(crate) unsafe extern "C" fn svc_free(ptr: *mut u8) { ) } -/// Yield the current task. Let the scheduler choose the next task to run. -/// A task may voluntarily yield the CPU or it may be forced to yield when -/// becoming blocked on a synchronization primitive. -#[naked] -pub(crate) extern "C" fn svc_yield_current_task() { - unsafe { - asm!( - "svc {task_yield}", - "bx lr", - task_yield = const(SVCNum::TaskYield as u8), - options(noreturn) - ) - } -} - /// Terminate the current task and free its task struct. #[naked] pub(crate) unsafe extern "C" fn svc_destroy_current_task() { diff --git a/src/interrupt/svc_handler.rs b/src/interrupt/svc_handler.rs index 0a1d5a1..ddf67f8 100644 --- a/src/interrupt/svc_handler.rs +++ b/src/interrupt/svc_handler.rs @@ -1,17 +1,12 @@ //! A task invokes SVC when the kernel operation requested by the task needs //! to run with the kernel contiguous stack. An SVC always returns back to the -//! calling task, i.e., SVC itself does not *directly* perform any context -//! switch. Based on this invariant, the SVC entry instruction sequence is -//! optimized so that only minimal context is saved. Specifically, caller-saved -//! registers are pushed to the user segmented stack, forming the trap frame, -//! while callee-saved registers are preserved by the handler functions -//! following the function call ABI. So, there is no need to make a copy of -//! callee-saved registers, unlike in [`TaskCtxt`](crate::task::TaskCtxt). -//! -//! To clarify, a task indeed invokes SVC to yield, but the actual context -//! switch is done by chaining PendSV after the SVC. Logically, the SVC still -//! returns to the yielding task, but PendSV then immediately causes the task -//! to be switched out of the CPU. +//! calling task, i.e., SVC does not perform any context switch. Based on this +//! invariant, the SVC entry instruction sequence is optimized so that only +//! minimal context is saved. Specifically, caller-saved registers are pushed +//! to the user segmented stack, forming the trap frame, while callee-saved +//! registers are preserved by the handler functions following the function +//! call ABI. So, there is no need to make a copy of callee-saved registers, +//! unlike in [`TaskCtxt`](crate::task::TaskCtxt). use super::trap_frame::TrapFrame; use crate::{ @@ -26,21 +21,14 @@ use int_enum::IntEnum; #[repr(u8)] #[derive(IntEnum)] pub(crate) enum SVCNum { - /// The calling task wants to get off from CPU. A task may voluntarily - /// yield the CPU or it may be forced to yield when becoming blocked on a - /// synchronization primitive. - /// - /// Note that the SVC handler does not directly perform a context switch. - /// Instead, a PendSV will be tail chained to perform it. - TaskYield = 1, /// The task wants to terminate and release its task struct. - TaskDestroy = 2, + TaskDestroy = 0, /// The calling task wants to release its top stacklet. - TaskLessStack = 3, + TaskLessStack = 1, /// The task wants to allocate dynamic memory. - MemAlloc = 4, + MemAlloc = 2, /// The task wants to free dynamic memory. - MemFree = 5, + MemFree = 3, /// The task wants to allocate a stacklet to run the stack unwinder. TaskUnwindPrepare = 252, /// The task wants to release the stacklet used to run the unwinder and @@ -147,7 +135,6 @@ fn get_svc_num(tf: &TrapFrame) -> SVCNum { /// the assembly code. extern "C" fn svc_handler(tf: &mut TrapFrame, ctxt: &mut TaskSVCCtxt) { match get_svc_num(tf) { - SVCNum::TaskYield => Scheduler::yield_current_task_from_svc(), SVCNum::TaskDestroy => Scheduler::drop_current_task_from_svc(), SVCNum::TaskLessStack => task::less_stack(tf, ctxt), SVCNum::TaskMoreStack => task::more_stack(tf, ctxt, MoreStackReason::Normal), diff --git a/src/schedule/idle.rs b/src/schedule/idle.rs index e7ff8e8..0b2b9e3 100644 --- a/src/schedule/idle.rs +++ b/src/schedule/idle.rs @@ -1,5 +1,5 @@ use crate::{ - interrupt::svc, + interrupt::context_switch, sync::{SpinSchedSafe, SpinSchedSafeGuard}, }; use alloc::{sync::Arc, vec::Vec}; @@ -28,7 +28,7 @@ pub(super) unsafe extern "C" fn idle_task() -> ! { // The idle task is always executed first when the scheduler is just // started. A main task should always be present awaiting to run. Perform // a context switch to let the main task run. - svc::svc_yield_current_task(); + context_switch::yield_current_task(); // If nothing to do, enter low power state. loop { diff --git a/src/schedule/scheduler.rs b/src/schedule/scheduler.rs index 68bc920..b6cf9c0 100644 --- a/src/schedule/scheduler.rs +++ b/src/schedule/scheduler.rs @@ -1,7 +1,7 @@ use super::{current, idle}; use crate::{ config, - interrupt::svc, + interrupt::context_switch, sync::{Access, AllowPendOp, Holdable, RefCellSchedSafe, RunPendedOp, SoftLock, Spin}, task::{Task, TaskListAdapter, TaskListInterfaces, TaskState}, unrecoverable::{self, Lethal}, @@ -330,7 +330,7 @@ impl Scheduler { // Go through an SVC to perform context switch if currently is in // task context. if current::is_in_task_context() { - svc::svc_yield_current_task(); + context_switch::yield_current_task(); // Tail chain a PendSV to directly perform a context switch if // currently is in an ISR context. But if the code is already // *performing* context switch, i.e., called by PendSV, then we @@ -356,12 +356,6 @@ impl Scheduler { // Tail chain a PendSV to perform a context switch. cortex_m::peripheral::SCB::set_pendsv() } - - /// Switch to another ready task. - pub(crate) fn yield_current_task_from_svc() { - // Tail chain a PendSV to perform a context switch. - cortex_m::peripheral::SCB::set_pendsv() - } } /// The guard type returned when suspending the scheduler. The scheduler will diff --git a/src/sync/mailbox.rs b/src/sync/mailbox.rs index fe41b56..394483c 100644 --- a/src/sync/mailbox.rs +++ b/src/sync/mailbox.rs @@ -1,6 +1,6 @@ use super::{Access, AllowPendOp, RefCellSchedSafe, RunPendedOp, SoftLock, Spin}; use crate::{ - interrupt::svc, + interrupt::context_switch, schedule::{current, scheduler::Scheduler}, task::{Task, TaskState}, time, unrecoverable, @@ -209,7 +209,7 @@ impl Mailbox { }); if should_block { - svc::svc_yield_current_task(); + context_switch::yield_current_task(); } } @@ -270,7 +270,7 @@ impl Mailbox { if should_block { // If the task should block, request a context switch. - svc::svc_yield_current_task(); + context_switch::yield_current_task(); // We reach here if either the waiting task is notified or the // waiting time reaches timeout. diff --git a/src/sync/wait_queue.rs b/src/sync/wait_queue.rs index 2f796b2..40c10a4 100644 --- a/src/sync/wait_queue.rs +++ b/src/sync/wait_queue.rs @@ -2,7 +2,7 @@ use super::{ Access, AllowPendOp, Lockable, RefCellSchedSafe, RunPendedOp, SoftLock, Spin, UnlockableGuard, }; use crate::{ - interrupt::svc, + interrupt::context_switch, schedule::{current, scheduler::Scheduler}, task::{TaskListAdapter, TaskListInterfaces, TaskState}, unrecoverable, @@ -103,7 +103,7 @@ impl WaitQueue { // We have put the current task to the wait queue. // Tell the scheduler to run another task. - svc::svc_yield_current_task(); + context_switch::yield_current_task(); // Outline the logic to reduce the stack frame size of `.wait()`. #[inline(never)] @@ -153,7 +153,7 @@ impl WaitQueue { // Otherwise, we have put the current task to the wait queue. // Tell the scheduler to run another task. - svc::svc_yield_current_task(); + context_switch::yield_current_task(); } // Outline the logic to reduce the stack frame size of `.wait_until()`. @@ -230,7 +230,7 @@ impl WaitQueue { // the scheduler to run another task. After this task is scheduled // again, take back the lock and try again. Ok(mutex) => { - svc::svc_yield_current_task(); + context_switch::yield_current_task(); guard = mutex.lock_and_get_guard(); } } diff --git a/src/task/current.rs b/src/task/current.rs index f2de6d5..559e7e1 100644 --- a/src/task/current.rs +++ b/src/task/current.rs @@ -1,6 +1,6 @@ use crate::{ config, - interrupt::svc, + interrupt::context_switch, schedule::{current, scheduler::Scheduler}, }; @@ -9,7 +9,7 @@ use crate::{ pub fn yield_current() { // Yield only if the scheduler is not suspended. if !Scheduler::is_suspended() { - svc::svc_yield_current_task(); + context_switch::yield_current_task(); } } @@ -25,7 +25,7 @@ pub fn change_current_priority(prio: u8) -> Result<(), ()> { return Err(()); } current::with_cur_task(|cur_task| cur_task.change_intrinsic_priority(prio)); - svc::svc_yield_current_task(); + context_switch::yield_current_task(); Ok(()) } diff --git a/src/time/mod.rs b/src/time/mod.rs index 8d03979..1c9aa30 100644 --- a/src/time/mod.rs +++ b/src/time/mod.rs @@ -1,6 +1,6 @@ use crate::{ config, - interrupt::svc, + interrupt::context_switch, schedule::{current, scheduler::Scheduler}, sync::{Access, AllowPendOp, RefCellSchedSafe, RunPendedOp, SoftLock, Spin}, task::{Task, TaskListAdapter, TaskListInterfaces, TaskState}, @@ -150,7 +150,7 @@ fn sleep_ms_unchecked(ms: u32) { // Yield from the current task. Even if the current task has already // been woken up, yielding from it will not introduce deadlock. - svc::svc_yield_current_task(); + context_switch::yield_current_task(); } // Outline the logic to reduce the stack frame size of `sleep_ms`. diff --git a/src/unwind/unwind.rs b/src/unwind/unwind.rs index 91c03aa..c9e6cf8 100644 --- a/src/unwind/unwind.rs +++ b/src/unwind/unwind.rs @@ -39,7 +39,7 @@ use super::{ use crate::{ config, interrupt::{ - svc, + context_switch, svc, svc_handler::SVCNum, trap_frame::{self, TrapFrame}, }, @@ -524,7 +524,7 @@ impl UnwindState<'static> { // Let the scheduler re-schedule so the above priority reduction // will take effect. if !Scheduler::is_suspended() { - svc::svc_yield_current_task(); + context_switch::yield_current_task(); } } @@ -1029,7 +1029,7 @@ unsafe extern "C" fn resume_unwind<'a>( if !config::ALLOW_TASK_PREEMPTION { if !current::is_in_isr_context() { if !Scheduler::is_suspended() { - svc::svc_yield_current_task(); + context_switch::yield_current_task(); } } }