Skip to content

Commit

Permalink
Temp checkpoint.
Browse files Browse the repository at this point in the history
  • Loading branch information
zyma98 committed Sep 23, 2024
1 parent 7225322 commit 53c67d6
Show file tree
Hide file tree
Showing 11 changed files with 229 additions and 211 deletions.
40 changes: 25 additions & 15 deletions src/schedule/current.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use super::scheduler::Scheduler;
use super::scheduler::{SchedSuspendGuard, Scheduler};
use crate::{
sync::{RwSpin, RwSpinReadGuard, RwSpinWriteGuard},
task::{Task, TaskCtxt},
Expand Down Expand Up @@ -65,38 +65,48 @@ pub(crate) static CUR_TASK_CTXT_PTR: AtomicPtr<TaskCtxt> = AtomicPtr::new(core::
///
/// [`with_current_task_arc`] has slightly better performance than this
/// function. Use that function if `&Task` suffices.
pub(crate) fn with_current_task_arc<F, R>(closure: F) -> R
pub(crate) fn with_cur_task_arc<F, R>(op: F) -> R
where
F: FnOnce(Arc<Task>) -> R,
{
// Suspend the scheduler and lock the current task `Arc` in reader mode.
let _sched_suspend_guard = Scheduler::suspend();
let read_guard = CUR_TASK.read();

// Run the closure.
if let Some(cur_task) = &*read_guard {
closure(cur_task.clone())
} else {
unrecoverable::die();
}
let guard = Scheduler::suspend();
with_cur_task_arc_and_suspended_sched(guard, op)
}

/// Do things with the current task struct. When the given closure is being
/// executed, the current task `Arc` will be locked in reader mode and no
/// context switch will happen during this period.
///
/// This function has slightly better performance than [`with_current_task_arc`].
pub(crate) fn with_current_task<F, R>(closure: F) -> R
pub(crate) fn with_cur_task<F, R>(op: F) -> R
where
F: FnOnce(&Task) -> R,
{
// Suspend the scheduler and lock the current task `Arc` in reader mode.
let _sched_suspend_guard = Scheduler::suspend();
let guard = Scheduler::suspend();
with_cur_task_and_suspended_sched(guard, op)
}

pub(crate) fn with_cur_task_arc_and_suspended_sched<F, R>(_guard: SchedSuspendGuard, op: F) -> R
where
F: FnOnce(Arc<Task>) -> R,
{
let read_guard: RwSpinReadGuard<_> = CUR_TASK.read();
if let Some(cur_task) = &*read_guard {
op(cur_task.clone())
} else {
unrecoverable::die();
}
}

// Run the closure.
pub(crate) fn with_cur_task_and_suspended_sched<F, R>(_guard: SchedSuspendGuard, op: F) -> R
where
F: FnOnce(&Task) -> R,
{
let read_guard: RwSpinReadGuard<_> = CUR_TASK.read();
if let Some(cur_task) = &*read_guard {
closure(cur_task)
op(cur_task)
} else {
unrecoverable::die();
}
Expand Down
68 changes: 35 additions & 33 deletions src/schedule/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ struct InnerPendAccessor<'a> {
/// them linked.
impl<'a> RunPendedOp for InnerFullAccessor<'a> {
fn run_pended_op(&mut self) {
current::with_current_task(|cur_task| {
current::with_cur_task(|cur_task| {
let mut locked_list = self.ready_linked_list.lock_now_or_die();
while let Some(task) = self.insert_buffer.dequeue() {
if task.should_preempt(cur_task) {
Expand Down Expand Up @@ -182,13 +182,12 @@ impl Scheduler {
unrecoverable::die();
}

READY_TASK_QUEUE
.lock()
.must_with_full_access(|full_access| {
READY_TASK_QUEUE.with_suspended_scheduler(|queue, sched_guard| {
queue.must_with_full_access(|full_access| {
let mut locked_list = full_access.ready_linked_list.lock_now_or_die();

// Clean up for the current task.
current::with_current_task_arc(|cur_task| {
current::with_cur_task_arc_and_suspended_sched(sched_guard, |cur_task| {
match cur_task.get_state() {
// Put the current task back to the ready queue only if the
// task is in `Running` state.
Expand Down Expand Up @@ -260,6 +259,7 @@ impl Scheduler {
// performed one.
PENDING_CTXT_SWITCH.store(false, Ordering::SeqCst);
})
})
}

/// Return if the scheduler has been started.
Expand All @@ -281,34 +281,36 @@ impl Scheduler {

/// Internal implementation to insert a task to the ready queue.
fn insert_task_to_ready_queue(task: Arc<Task>) {
READY_TASK_QUEUE.lock().with_access(|access| match access {
// The queue is not under contention. Directly put the task to the
// linked list.
Access::Full { full_access } => {
// Request a context switch if the incoming ready task has a
// higher priority than the current task. Check it only when
// the scheduler has started otherwise there will be no current
// task.
if Scheduler::has_started() {
current::with_current_task(|cur_task| {
if task.should_preempt(cur_task) {
PENDING_CTXT_SWITCH.store(true, Ordering::SeqCst);
}
});
}
READY_TASK_QUEUE.with_suspended_scheduler(|queue, sched_guard| {
queue.with_access(|access| match access {
// The queue is not under contention. Directly put the task to the
// linked list.
Access::Full { full_access } => {
// Request a context switch if the incoming ready task has a
// higher priority than the current task. Check it only when
// the scheduler has started otherwise there will be no current
// task.
if Scheduler::has_started() {
current::with_cur_task_and_suspended_sched(sched_guard, |cur_task| {
if task.should_preempt(cur_task) {
PENDING_CTXT_SWITCH.store(true, Ordering::SeqCst);
}
});
}

// Put the ready task to the linked list.
task.set_state(TaskState::Ready);
let mut locked_list = full_access.ready_linked_list.lock_now_or_die();
locked_list.push_back(task);
}
// The queue is under contention. The current execution context, which
// must be an ISR, preempted another context that is holding the full
// access. Place the task in the lock-free buffer. The full access
// holder will later put it back to the linked list.
Access::PendOnly { pend_access } => {
pend_access.insert_buffer.enqueue(task).unwrap_or_die();
}
// Put the ready task to the linked list.
task.set_state(TaskState::Ready);
let mut locked_list = full_access.ready_linked_list.lock_now_or_die();
locked_list.push_back(task);
}
// The queue is under contention. The current execution context, which
// must be an ISR, preempted another context that is holding the full
// access. Place the task in the lock-free buffer. The full access
// holder will later put it back to the linked list.
Access::PendOnly { pend_access } => {
pend_access.insert_buffer.enqueue(task).unwrap_or_die();
}
})
});
}

Expand Down Expand Up @@ -349,7 +351,7 @@ impl Scheduler {
pub(crate) fn drop_current_task_from_svc() {
// Mark the task state as `Destructing` so that the scheduler will drop
// the task struct upon a later context switch.
current::with_current_task(|cur_task| cur_task.set_state(TaskState::Destructing));
current::with_cur_task(|cur_task| cur_task.set_state(TaskState::Destructing));

// Tail chain a PendSV to perform a context switch.
cortex_m::peripheral::SCB::set_pendsv()
Expand Down
118 changes: 63 additions & 55 deletions src/sync/mailbox.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,35 +166,37 @@ impl Mailbox {
let mut should_block = true;

// Suspend scheduling and acquire full access to the mailbox fields.
self.inner.lock().must_with_full_access(|full_access| {
let mut locked_wait_task = full_access.wait_task.lock_now_or_die();

// A sanity check to prevent more than one task to try to wait on
// the same mailbox.
assert!(locked_wait_task.is_none());

// If the counter is currently positive, decrement the counter and
// do not block.
if full_access.count.load(Ordering::SeqCst) > 0 {
full_access.count.fetch_sub(1, Ordering::SeqCst);
should_block = false;
return;
}

// Otherwise the task is going to be blocked. Reset the flag.
full_access.task_notified.store(false, Ordering::SeqCst);

current::with_current_task_arc(|cur_task| {
cur_task.set_state(TaskState::Blocked);

// Record the waiting task on this mailbox.
*locked_wait_task = Some(Arc::clone(&cur_task));

// Add the waiting task to the sleeping queue.
// FIXME: This assumes 1ms tick interval.
let wake_at_tick = time::get_tick() + timeout_ms;
time::add_task_to_sleep_queue(cur_task, wake_at_tick);
});
self.inner.with_suspended_scheduler(|queue, sched_guard| {
queue.must_with_full_access(|full_access| {
let mut locked_wait_task = full_access.wait_task.lock_now_or_die();

// A sanity check to prevent more than one task to try to wait on
// the same mailbox.
assert!(locked_wait_task.is_none());

// If the counter is currently positive, decrement the counter and
// do not block.
if full_access.count.load(Ordering::SeqCst) > 0 {
full_access.count.fetch_sub(1, Ordering::SeqCst);
should_block = false;
return;
}

// Otherwise the task is going to be blocked. Reset the flag.
full_access.task_notified.store(false, Ordering::SeqCst);

current::with_cur_task_arc_and_suspended_sched(sched_guard, |cur_task| {
cur_task.set_state(TaskState::Blocked);

// Record the waiting task on this mailbox.
*locked_wait_task = Some(Arc::clone(&cur_task));

// Add the waiting task to the sleeping queue.
// FIXME: This assumes 1ms tick interval.
let wake_at_tick = time::get_tick() + timeout_ms;
time::add_task_to_sleep_queue(cur_task, wake_at_tick);
});
})
});

if should_block {
Expand All @@ -205,13 +207,15 @@ impl Mailbox {
// waiting time reaches timeout.

// Suspend scheduling and acquire full access to the mailbox fields.
self.inner.lock().must_with_full_access(|full_access| {
// Clear the waiting task field. This field was not cleared if
// the task wakes up because of the timeout.
full_access.wait_task.lock_now_or_die().take();
self.inner.with_suspended_scheduler(|queue, _guard| {
queue.must_with_full_access(|full_access| {
// Clear the waiting task field. This field was not cleared if
// the task wakes up because of the timeout.
full_access.wait_task.lock_now_or_die().take();

// Return whether the task wakes up because of notification.
full_access.task_notified.load(Ordering::SeqCst)
// Return whether the task wakes up because of notification.
full_access.task_notified.load(Ordering::SeqCst)
})
})
} else {
// If the task need not block, it consumed a notification count and
Expand All @@ -227,28 +231,32 @@ impl Mailbox {
/// This method is allowed in ISR context.
pub fn notify_allow_isr(&self) {
// Suspend scheduling and get access to the mailbox fields.
self.inner.lock().with_access(|access| match access {
// If we have full access to the inner fields, we directly wake up
// the waiting task or increment the counter.
Access::Full { full_access } => match full_access.wait_task.lock_now_or_die().take() {
// If there is a waiting task, wake it up.
Some(wait_task) => {
time::remove_task_from_sleep_queue_allow_isr(wait_task);
full_access.task_notified.store(true, Ordering::SeqCst);
self.inner.with_suspended_scheduler(|queue, _guard| {
queue.with_access(|access| match access {
// If we have full access to the inner fields, we directly wake up
// the waiting task or increment the counter.
Access::Full { full_access } => {
match full_access.wait_task.lock_now_or_die().take() {
// If there is a waiting task, wake it up.
Some(wait_task) => {
time::remove_task_from_sleep_queue_allow_isr(wait_task);
full_access.task_notified.store(true, Ordering::SeqCst);
}
// If there is not a waiting task, increment the counter.
None => {
full_access.count.fetch_add(1, Ordering::SeqCst);
full_access.task_notified.store(true, Ordering::SeqCst);
}
}
}
// If there is not a waiting task, increment the counter.
None => {
full_access.count.fetch_add(1, Ordering::SeqCst);
full_access.task_notified.store(true, Ordering::SeqCst);
// If other context is running with the full access and we preempt
// it, we get pend-only access. We increment the `pending_count` so
// that the full access owner can later help us update the counter
// or notify the waiting task on behalf.
Access::PendOnly { pend_access } => {
pend_access.pending_count.fetch_add(1, Ordering::SeqCst);
}
},
// If other context is running with the full access and we preempt
// it, we get pend-only access. We increment the `pending_count` so
// that the full access owner can later help us update the counter
// or notify the waiting task on behalf.
Access::PendOnly { pend_access } => {
pend_access.pending_count.fetch_add(1, Ordering::SeqCst);
}
})
});
}
}
4 changes: 2 additions & 2 deletions src/sync/mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ where
// task pointer.
.and_then(|guard| {
if !current::is_in_isr_context() {
current::with_current_task_arc(|cur_task| {
current::with_cur_task_arc(|cur_task| {
self.owner.lock_now_or_die().replace(cur_task)
});
}
Expand Down Expand Up @@ -153,7 +153,7 @@ where
}

// Priority inheritance.
current::with_current_task(|cur_task| {
current::with_cur_task(|cur_task| {
let locked_owner = self.owner.lock_now_or_die();
if let Some(owner) = locked_owner.as_ref() {
owner.ceil_priority_from(cur_task);
Expand Down
28 changes: 7 additions & 21 deletions src/sync/refcell_sched_safe.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use crate::schedule::scheduler::{SchedSuspendGuard, Scheduler};
use core::ops::Deref;

pub(crate) struct RefCellSchedSafe<T>
where
Expand All @@ -9,28 +8,15 @@ where
}

impl<T> RefCellSchedSafe<T> {
pub const fn new(val: T) -> Self {
pub(crate) const fn new(val: T) -> Self {
Self { val }
}

pub fn lock(&self) -> RefSchedSafe<T> {
let _guard = Scheduler::suspend();
RefSchedSafe {
val_ref: &self.val,
_guard,
}
}
}

pub(crate) struct RefSchedSafe<'a, T> {
val_ref: &'a T,
_guard: SchedSuspendGuard,
}

impl<'a, T> Deref for RefSchedSafe<'a, T> {
type Target = T;

fn deref(&self) -> &Self::Target {
&self.val_ref
pub(crate) fn with_suspended_scheduler<F, R>(&self, op: F) -> R
where
F: FnOnce(&T, SchedSuspendGuard) -> R,
{
let guard = Scheduler::suspend();
op(&self.val, guard)
}
}
Loading

0 comments on commit 53c67d6

Please sign in to comment.