Skip to content

Commit

Permalink
Refactor scheduler structure.
Browse files Browse the repository at this point in the history
  • Loading branch information
zyma98 committed Aug 15, 2024
1 parent 30085c1 commit 12891b9
Show file tree
Hide file tree
Showing 24 changed files with 364 additions and 333 deletions.
7 changes: 4 additions & 3 deletions src/allocator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ use core::{
use super::{
boot,
interrupt::{svc, trap_frame::TrapFrame},
schedule, task,
schedule::scheduler,
task,
unrecoverable::{self, Lethal},
};

Expand Down Expand Up @@ -165,7 +166,7 @@ pub fn init_allocator() {
fn die_if_not_in_svc() {
// Only perform sanity check after the scheduler has started, otherwise
// we may still be running with the bootstrap stack with MSP.
if !schedule::is_scheduler_started() {
if !scheduler::has_started() {
return;
}

Expand All @@ -187,7 +188,7 @@ fn die_if_not_in_svc() {
fn die_if_not_in_svc_or_pendsv() {
// Only perform sanity check after the scheduler has started, otherwise
// we may still be running with the bootstrap stack with MSP.
if !schedule::is_scheduler_started() {
if !scheduler::has_started() {
return;
}

Expand Down
4 changes: 2 additions & 2 deletions src/boot/entry.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::{allocator, config, schedule, task, unrecoverable::Lethal};
use crate::{allocator, config, schedule::scheduler, task, unrecoverable::Lethal};
use alloc::boxed::Box;
use core::sync::atomic::AtomicPtr;
use cortex_m::peripheral::scb::SystemHandler;
Expand Down Expand Up @@ -54,6 +54,6 @@ pub extern "C" fn entry() -> ! {
.set_priority(config::MAIN_TASK_PRIORITY)
.spawn()
.unwrap_or_die();
schedule::start_scheduler();
scheduler::start();
}
}
41 changes: 26 additions & 15 deletions src/interrupt/context_switch.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
use crate::{config, schedule, task::TaskCtxt};
use crate::{
config,
schedule::{current, scheduler},
unrecoverable,
};
use core::arch::asm;

/// The interrupt entry function for PendSV. It preserves the registers and segmented
/// stack status of the previously running task.
/// stack status of the previously running task. PendSV is used for context switch.
///
/// The PendSV handling is slower than other IRQ, because it saves the full context of
/// the task for the purpose of doing context switch. On the contrary, other IRQ handlers
Expand All @@ -26,10 +30,10 @@ unsafe extern "C" fn pendsv_entry() {
"ldmia r12, {{r0-r2}}",
// Let `r3` hold the previously running task's stack pointer.
"mrs r3, psp",
// Let `r12` hold the address of `CUR_TASK_REGS`.
// Let `r12` hold the address of `CUR_TASK_CTXT_PTR`.
"movw r12, :lower16:{cur_task}",
"movt r12, :upper16:{cur_task}",
// Let `r12` hold `CUR_TASK_REGS`, which is a pointer.
// Let `r12` hold `CUR_TASK_CTXT_PTR`, which is a pointer.
"ldr r12, [r12]",
// Preserve the TLS, stacklet boundary, and register `r4-r11`.
// Register `r0-r3` and `r12` are pushed by hardware onto the task's stack.
Expand All @@ -49,9 +53,12 @@ unsafe extern "C" fn pendsv_entry() {
"mov r0, lr",
// Call the handler function.
"bl {pendsv_handler}",
// Move the return value, which is holding the content of `CUR_TASK_REGS`,
// to `r12`.
"mov r12, r0",
// Let `r12` hold the address of `CUR_TASK_CTXT_PTR`.
"movw r12, :lower16:{cur_task}",
"movt r12, :upper16:{cur_task}",
// Let `r12` hold `CUR_TASK_CTXT_PTR`, which is a pointer.
// The pointer content should have been updated by the scheduler.
"ldr r12, [r12]",
// Let `r0-r2` hold the TLS fields and `r3` hold the task's stack pointer.
// Restore the value in r4-r11.
"ldmia r12!, {{r0-r3, r4-r11}}",
Expand All @@ -66,22 +73,24 @@ unsafe extern "C" fn pendsv_entry() {
"mrs r3, msp",
"ldr r2, ={kern_stk_bottom}",
"cmp r2, r3",
// Infinite loop if the check fails.
// Call `unrecoverable::die` if the check fails.
"bne 0f",
// Perform exception return, assuming that the task has floating
// point context. Register r0-r3, r12, lr, s0-s15, and fpscr will
// be restored from the trap frame on the task's stack.
"ldr lr, ={ex_ret_to_psp_with_fp}",
"bx lr",
// Infinite loop.
// Call `unrecoverable::die`.
"0:",
"b 0b",
cur_task = sym schedule::CUR_TASK_REGS,
"bl {die}",
"udf #254",
cur_task = sym current::CUR_TASK_CTXT_PTR,
tls_mem_addr = const config::TLS_MEM_ADDR,
kern_stk_boundary = const config::CONTIGUOUS_STACK_BOUNDARY,
pendsv_handler = sym pendsv_handler,
kern_stk_bottom = const config::CONTIGUOUS_STACK_BOTTOM,
ex_ret_to_psp_with_fp = const 0xffffffedu32,
die = sym unrecoverable::die,
options(noreturn)
)
}
Expand All @@ -90,14 +99,16 @@ unsafe extern "C" fn pendsv_entry() {
/// and the floating point registers s0-s15 were pushed in the trap frame.
fn die_if_unexpected_pendsv(ex_ret_lr: u32) {
if ex_ret_lr != 0xffffffed {
loop {}
unrecoverable::die();
}
}

/// The PendSV handler. Decleared as `extern "C"` because it's called from
/// The PendSV handler. Decleared as `extern "C"` because it is called from
/// the assembly code.
extern "C" fn pendsv_handler(ex_ret_lr: u32) -> *mut TaskCtxt {
extern "C" fn pendsv_handler(ex_ret_lr: u32) {
die_if_unexpected_pendsv(ex_ret_lr);

schedule::schedule()
// The `CUR_TASK_CTXT_PTR` pointer will be updated to reflect the next
// chosen task to run.
scheduler::pick_next();
}
4 changes: 1 addition & 3 deletions src/interrupt/mod.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
mod context_switch;
pub(super) mod context_switch;
pub mod default;
pub(super) mod svc;
mod svc_handler;
mod systick;
#[cfg(feature = "exti1_panic")]
mod test;
pub mod trap_frame;

pub(super) use svc_handler::{SVCNum, TaskSVCCtxt};
Expand Down
7 changes: 4 additions & 3 deletions src/interrupt/svc_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@
use super::trap_frame::TrapFrame;
use crate::{
allocator, config, schedule,
allocator, config,
schedule::scheduler,
task::{self, MoreStackReason, TaskLocalStorage},
unrecoverable::{self, Lethal},
};
Expand Down Expand Up @@ -145,8 +146,8 @@ extern "C" fn svc_handler(tf: &mut TrapFrame, ctxt: &mut TaskSVCCtxt) {
match get_svc_num(tf) {
// Task wants to yield. Mark its state as ready so that the
// scheduler can schedule it later.
SVCNum::TaskYield => schedule::yield_cur_task_from_isr(),
SVCNum::TaskDestroy => schedule::destroy_current_task_and_schedule(),
SVCNum::TaskYield => scheduler::yield_cur_task_from_isr(),
SVCNum::TaskDestroy => scheduler::destroy_current_task_and_schedule(),
SVCNum::TaskLessStack => task::less_stack(tf, ctxt),
SVCNum::TaskMoreStack => task::more_stack(tf, ctxt, MoreStackReason::Normal),
SVCNum::TaskMoreStackFromDrop => task::more_stack(tf, ctxt, MoreStackReason::Drop),
Expand Down
8 changes: 0 additions & 8 deletions src/interrupt/test.rs

This file was deleted.

4 changes: 2 additions & 2 deletions src/schedule/cpu_usage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,13 +132,13 @@ impl<Clock> IdleCallback for CpuUsage<Clock>
where
Clock: MicrosecPrecision + Send + Sync,
{
fn idle_begin_callback(&self) {
fn idle_begin(&self) {
// Save the timestamp that the idle task resumes.
let cur_ts = self.clock.read_clock_us();
self.swap_idle_start_timestamp(cur_ts);
}

fn idle_end_callback(&self) {
fn idle_end(&self) {
// If the idle task was running but it is now switched out, update
// the slots used to calculate CPU load.
let cur_ts = self.clock.read_clock_us();
Expand Down
133 changes: 133 additions & 0 deletions src/schedule/current.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
use crate::{
sync::{self, RwLock},
task::{Task, TaskCtxt},
unrecoverable,
};
use alloc::sync::Arc;
use core::{
arch::asm,
sync::atomic::{AtomicPtr, Ordering},
};

/// The `Arc` of the currently running task. After the scheduler is started,
/// it should always be `Some`. When no other user task is ready, the current
/// task should be the idle task.
///
/// The [`RwLock`] around it is only for sanity check.
///
/// NOTE: We must use a [`RwLock`] instead of a
/// [`SpinSchedSafe`](crate::sync::SpinSchedSafe) to protect the data. This is
/// because the [`more_stack`](crate::task::more_stack) and
/// [`less_stack`](crate::task::less_stack) function need to access the current
/// task struct. However, `more_stack` or `less_stack` may be invoked when the
/// current task struct is being accessed. Thus, using a spin lock will cause
/// deadlock, and `RwLock` is necessary.
static CUR_TASK: RwLock<Option<Arc<Task>>> = RwLock::new(None);

/// Set another task to be the current task. The current task will lock its
/// context field. See [`Task`] for the context lock invariant.
pub(super) fn update_cur_task(task: Arc<Task>) {
let mut write_guard = CUR_TASK.write();

// Unlock the context struct for the task being context switched out of
// the CPU. The only case where the current task is `None` is upon system
// boot.
//
// Safety: The lock was acquired by the next statement when the task was
// being context switched on to the CPU.
if let Some(ref cur_task) = *write_guard {
unsafe {
cur_task.force_unlock_ctxt();
}
}

// Acquire the context lock for the task going to be executed on the CPU.
let task_ctxt_ptr = task.lock_ctxt();

// Update the global pointer so that the context switch assembly sequence
// will find the new task context through the pointer.
CUR_TASK_CTXT_PTR.store(task_ctxt_ptr, Ordering::SeqCst);

// Update the global `Arc` current task reference.
write_guard.replace(task);
}

/// Point to the struct that preserves the task's callee-saved registers upon
/// context switch. This pointer is used by the context switch assembly
/// sequence in [`context_switch`](crate::interrupt::context_switch).
#[no_mangle]
pub(crate) static CUR_TASK_CTXT_PTR: AtomicPtr<TaskCtxt> = AtomicPtr::new(core::ptr::null_mut());

/// Do things with the current task struct. When the given closure is being
/// executed, the current task `Arc` will be locked in reader mode and no
/// context switch will happen during this period.
///
/// [`with_current_task_arc`] has slightly better performance than this
/// function. Use that function if `&Task` suffices.
pub(crate) fn with_current_task_arc<F, R>(closure: F) -> R
where
F: FnOnce(Arc<Task>) -> R,
{
// Suspend the scheduler and lock the current task `Arc` in reader mode.
let _sched_suspend_guard = sync::suspend_scheduler();
let read_guard = CUR_TASK.read();

// Run the closure.
if let Some(cur_task) = &*read_guard {
closure(cur_task.clone())
} else {
unrecoverable::die();
}
}

/// Do things with the current task struct. When the given closure is being
/// executed, the current task `Arc` will be locked in reader mode and no
/// context switch will happen during this period.
///
/// This function has slightly better performance than [`with_current_task_arc`].
pub(crate) fn with_current_task<F, R>(closure: F) -> R
where
F: FnOnce(&Task) -> R,
{
// Suspend the scheduler and lock the current task `Arc` in reader mode.
let _sched_suspend_guard = sync::suspend_scheduler();
let read_guard = CUR_TASK.read();

// Run the closure.
if let Some(cur_task) = &*read_guard {
closure(cur_task)
} else {
unrecoverable::die();
}
}

/// Return if the code is currently executing in an interrupt service routine
/// (ISR) context.
pub(crate) fn is_in_isr_context() -> bool {
let ipsr: u32;

unsafe {
asm!(
"mrs {}, ipsr",
out(reg) ipsr,
options(nomem, nostack)
);
}

ipsr != 0
}

/// Return if the code is currently executing in the PendSV exception context.
pub(crate) fn is_in_pendsv_context() -> bool {
let ipsr: u32;

unsafe {
asm!(
"mrs {}, ipsr",
out(reg) ipsr,
options(nomem, nostack)
);
}

ipsr == 14
}
Loading

0 comments on commit 12891b9

Please sign in to comment.