diff --git a/src/schedule/current.rs b/src/schedule/current.rs index 50c39d0..e32ea45 100644 --- a/src/schedule/current.rs +++ b/src/schedule/current.rs @@ -1,4 +1,4 @@ -use super::scheduler::Scheduler; +use super::scheduler::{SchedSuspendGuard, Scheduler}; use crate::{ sync::{RwSpin, RwSpinReadGuard, RwSpinWriteGuard}, task::{Task, TaskCtxt}, @@ -65,20 +65,13 @@ pub(crate) static CUR_TASK_CTXT_PTR: AtomicPtr = AtomicPtr::new(core:: /// /// [`with_current_task_arc`] has slightly better performance than this /// function. Use that function if `&Task` suffices. -pub(crate) fn with_current_task_arc(closure: F) -> R +pub(crate) fn with_cur_task_arc(op: F) -> R where F: FnOnce(Arc) -> R, { // Suspend the scheduler and lock the current task `Arc` in reader mode. - let _sched_suspend_guard = Scheduler::suspend(); - let read_guard = CUR_TASK.read(); - - // Run the closure. - if let Some(cur_task) = &*read_guard { - closure(cur_task.clone()) - } else { - unrecoverable::die(); - } + let guard = Scheduler::suspend(); + with_cur_task_arc_and_suspended_sched(guard, op) } /// Do things with the current task struct. When the given closure is being @@ -86,17 +79,34 @@ where /// context switch will happen during this period. /// /// This function has slightly better performance than [`with_current_task_arc`]. -pub(crate) fn with_current_task(closure: F) -> R +pub(crate) fn with_cur_task(op: F) -> R where F: FnOnce(&Task) -> R, { // Suspend the scheduler and lock the current task `Arc` in reader mode. - let _sched_suspend_guard = Scheduler::suspend(); + let guard = Scheduler::suspend(); + with_cur_task_and_suspended_sched(guard, op) +} + +pub(crate) fn with_cur_task_arc_and_suspended_sched(_guard: SchedSuspendGuard, op: F) -> R +where + F: FnOnce(Arc) -> R, +{ let read_guard: RwSpinReadGuard<_> = CUR_TASK.read(); + if let Some(cur_task) = &*read_guard { + op(cur_task.clone()) + } else { + unrecoverable::die(); + } +} - // Run the closure. +pub(crate) fn with_cur_task_and_suspended_sched(_guard: SchedSuspendGuard, op: F) -> R +where + F: FnOnce(&Task) -> R, +{ + let read_guard: RwSpinReadGuard<_> = CUR_TASK.read(); if let Some(cur_task) = &*read_guard { - closure(cur_task) + op(cur_task) } else { unrecoverable::die(); } diff --git a/src/schedule/scheduler.rs b/src/schedule/scheduler.rs index 7ed59aa..373dfba 100644 --- a/src/schedule/scheduler.rs +++ b/src/schedule/scheduler.rs @@ -59,7 +59,7 @@ struct InnerPendAccessor<'a> { /// them linked. impl<'a> RunPendedOp for InnerFullAccessor<'a> { fn run_pended_op(&mut self) { - current::with_current_task(|cur_task| { + current::with_cur_task(|cur_task| { let mut locked_list = self.ready_linked_list.lock_now_or_die(); while let Some(task) = self.insert_buffer.dequeue() { if task.should_preempt(cur_task) { @@ -182,13 +182,12 @@ impl Scheduler { unrecoverable::die(); } - READY_TASK_QUEUE - .lock() - .must_with_full_access(|full_access| { + READY_TASK_QUEUE.with_suspended_scheduler(|queue, sched_guard| { + queue.must_with_full_access(|full_access| { let mut locked_list = full_access.ready_linked_list.lock_now_or_die(); // Clean up for the current task. - current::with_current_task_arc(|cur_task| { + current::with_cur_task_arc_and_suspended_sched(sched_guard, |cur_task| { match cur_task.get_state() { // Put the current task back to the ready queue only if the // task is in `Running` state. @@ -260,6 +259,7 @@ impl Scheduler { // performed one. PENDING_CTXT_SWITCH.store(false, Ordering::SeqCst); }) + }) } /// Return if the scheduler has been started. @@ -281,34 +281,36 @@ impl Scheduler { /// Internal implementation to insert a task to the ready queue. fn insert_task_to_ready_queue(task: Arc) { - READY_TASK_QUEUE.lock().with_access(|access| match access { - // The queue is not under contention. Directly put the task to the - // linked list. - Access::Full { full_access } => { - // Request a context switch if the incoming ready task has a - // higher priority than the current task. Check it only when - // the scheduler has started otherwise there will be no current - // task. - if Scheduler::has_started() { - current::with_current_task(|cur_task| { - if task.should_preempt(cur_task) { - PENDING_CTXT_SWITCH.store(true, Ordering::SeqCst); - } - }); - } + READY_TASK_QUEUE.with_suspended_scheduler(|queue, sched_guard| { + queue.with_access(|access| match access { + // The queue is not under contention. Directly put the task to the + // linked list. + Access::Full { full_access } => { + // Request a context switch if the incoming ready task has a + // higher priority than the current task. Check it only when + // the scheduler has started otherwise there will be no current + // task. + if Scheduler::has_started() { + current::with_cur_task_and_suspended_sched(sched_guard, |cur_task| { + if task.should_preempt(cur_task) { + PENDING_CTXT_SWITCH.store(true, Ordering::SeqCst); + } + }); + } - // Put the ready task to the linked list. - task.set_state(TaskState::Ready); - let mut locked_list = full_access.ready_linked_list.lock_now_or_die(); - locked_list.push_back(task); - } - // The queue is under contention. The current execution context, which - // must be an ISR, preempted another context that is holding the full - // access. Place the task in the lock-free buffer. The full access - // holder will later put it back to the linked list. - Access::PendOnly { pend_access } => { - pend_access.insert_buffer.enqueue(task).unwrap_or_die(); - } + // Put the ready task to the linked list. + task.set_state(TaskState::Ready); + let mut locked_list = full_access.ready_linked_list.lock_now_or_die(); + locked_list.push_back(task); + } + // The queue is under contention. The current execution context, which + // must be an ISR, preempted another context that is holding the full + // access. Place the task in the lock-free buffer. The full access + // holder will later put it back to the linked list. + Access::PendOnly { pend_access } => { + pend_access.insert_buffer.enqueue(task).unwrap_or_die(); + } + }) }); } @@ -349,7 +351,7 @@ impl Scheduler { pub(crate) fn drop_current_task_from_svc() { // Mark the task state as `Destructing` so that the scheduler will drop // the task struct upon a later context switch. - current::with_current_task(|cur_task| cur_task.set_state(TaskState::Destructing)); + current::with_cur_task(|cur_task| cur_task.set_state(TaskState::Destructing)); // Tail chain a PendSV to perform a context switch. cortex_m::peripheral::SCB::set_pendsv() diff --git a/src/sync/mailbox.rs b/src/sync/mailbox.rs index ed7fee9..eab5f7b 100644 --- a/src/sync/mailbox.rs +++ b/src/sync/mailbox.rs @@ -166,35 +166,37 @@ impl Mailbox { let mut should_block = true; // Suspend scheduling and acquire full access to the mailbox fields. - self.inner.lock().must_with_full_access(|full_access| { - let mut locked_wait_task = full_access.wait_task.lock_now_or_die(); - - // A sanity check to prevent more than one task to try to wait on - // the same mailbox. - assert!(locked_wait_task.is_none()); - - // If the counter is currently positive, decrement the counter and - // do not block. - if full_access.count.load(Ordering::SeqCst) > 0 { - full_access.count.fetch_sub(1, Ordering::SeqCst); - should_block = false; - return; - } - - // Otherwise the task is going to be blocked. Reset the flag. - full_access.task_notified.store(false, Ordering::SeqCst); - - current::with_current_task_arc(|cur_task| { - cur_task.set_state(TaskState::Blocked); - - // Record the waiting task on this mailbox. - *locked_wait_task = Some(Arc::clone(&cur_task)); - - // Add the waiting task to the sleeping queue. - // FIXME: This assumes 1ms tick interval. - let wake_at_tick = time::get_tick() + timeout_ms; - time::add_task_to_sleep_queue(cur_task, wake_at_tick); - }); + self.inner.with_suspended_scheduler(|queue, sched_guard| { + queue.must_with_full_access(|full_access| { + let mut locked_wait_task = full_access.wait_task.lock_now_or_die(); + + // A sanity check to prevent more than one task to try to wait on + // the same mailbox. + assert!(locked_wait_task.is_none()); + + // If the counter is currently positive, decrement the counter and + // do not block. + if full_access.count.load(Ordering::SeqCst) > 0 { + full_access.count.fetch_sub(1, Ordering::SeqCst); + should_block = false; + return; + } + + // Otherwise the task is going to be blocked. Reset the flag. + full_access.task_notified.store(false, Ordering::SeqCst); + + current::with_cur_task_arc_and_suspended_sched(sched_guard, |cur_task| { + cur_task.set_state(TaskState::Blocked); + + // Record the waiting task on this mailbox. + *locked_wait_task = Some(Arc::clone(&cur_task)); + + // Add the waiting task to the sleeping queue. + // FIXME: This assumes 1ms tick interval. + let wake_at_tick = time::get_tick() + timeout_ms; + time::add_task_to_sleep_queue(cur_task, wake_at_tick); + }); + }) }); if should_block { @@ -205,13 +207,15 @@ impl Mailbox { // waiting time reaches timeout. // Suspend scheduling and acquire full access to the mailbox fields. - self.inner.lock().must_with_full_access(|full_access| { - // Clear the waiting task field. This field was not cleared if - // the task wakes up because of the timeout. - full_access.wait_task.lock_now_or_die().take(); + self.inner.with_suspended_scheduler(|queue, _guard| { + queue.must_with_full_access(|full_access| { + // Clear the waiting task field. This field was not cleared if + // the task wakes up because of the timeout. + full_access.wait_task.lock_now_or_die().take(); - // Return whether the task wakes up because of notification. - full_access.task_notified.load(Ordering::SeqCst) + // Return whether the task wakes up because of notification. + full_access.task_notified.load(Ordering::SeqCst) + }) }) } else { // If the task need not block, it consumed a notification count and @@ -227,28 +231,32 @@ impl Mailbox { /// This method is allowed in ISR context. pub fn notify_allow_isr(&self) { // Suspend scheduling and get access to the mailbox fields. - self.inner.lock().with_access(|access| match access { - // If we have full access to the inner fields, we directly wake up - // the waiting task or increment the counter. - Access::Full { full_access } => match full_access.wait_task.lock_now_or_die().take() { - // If there is a waiting task, wake it up. - Some(wait_task) => { - time::remove_task_from_sleep_queue_allow_isr(wait_task); - full_access.task_notified.store(true, Ordering::SeqCst); + self.inner.with_suspended_scheduler(|queue, _guard| { + queue.with_access(|access| match access { + // If we have full access to the inner fields, we directly wake up + // the waiting task or increment the counter. + Access::Full { full_access } => { + match full_access.wait_task.lock_now_or_die().take() { + // If there is a waiting task, wake it up. + Some(wait_task) => { + time::remove_task_from_sleep_queue_allow_isr(wait_task); + full_access.task_notified.store(true, Ordering::SeqCst); + } + // If there is not a waiting task, increment the counter. + None => { + full_access.count.fetch_add(1, Ordering::SeqCst); + full_access.task_notified.store(true, Ordering::SeqCst); + } + } } - // If there is not a waiting task, increment the counter. - None => { - full_access.count.fetch_add(1, Ordering::SeqCst); - full_access.task_notified.store(true, Ordering::SeqCst); + // If other context is running with the full access and we preempt + // it, we get pend-only access. We increment the `pending_count` so + // that the full access owner can later help us update the counter + // or notify the waiting task on behalf. + Access::PendOnly { pend_access } => { + pend_access.pending_count.fetch_add(1, Ordering::SeqCst); } - }, - // If other context is running with the full access and we preempt - // it, we get pend-only access. We increment the `pending_count` so - // that the full access owner can later help us update the counter - // or notify the waiting task on behalf. - Access::PendOnly { pend_access } => { - pend_access.pending_count.fetch_add(1, Ordering::SeqCst); - } + }) }); } } diff --git a/src/sync/mutex.rs b/src/sync/mutex.rs index a55dfd0..9da43e8 100644 --- a/src/sync/mutex.rs +++ b/src/sync/mutex.rs @@ -122,7 +122,7 @@ where // task pointer. .and_then(|guard| { if !current::is_in_isr_context() { - current::with_current_task_arc(|cur_task| { + current::with_cur_task_arc(|cur_task| { self.owner.lock_now_or_die().replace(cur_task) }); } @@ -153,7 +153,7 @@ where } // Priority inheritance. - current::with_current_task(|cur_task| { + current::with_cur_task(|cur_task| { let locked_owner = self.owner.lock_now_or_die(); if let Some(owner) = locked_owner.as_ref() { owner.ceil_priority_from(cur_task); diff --git a/src/sync/refcell_sched_safe.rs b/src/sync/refcell_sched_safe.rs index dcd63cb..a909453 100644 --- a/src/sync/refcell_sched_safe.rs +++ b/src/sync/refcell_sched_safe.rs @@ -1,5 +1,4 @@ use crate::schedule::scheduler::{SchedSuspendGuard, Scheduler}; -use core::ops::Deref; pub(crate) struct RefCellSchedSafe where @@ -9,28 +8,15 @@ where } impl RefCellSchedSafe { - pub const fn new(val: T) -> Self { + pub(crate) const fn new(val: T) -> Self { Self { val } } - pub fn lock(&self) -> RefSchedSafe { - let _guard = Scheduler::suspend(); - RefSchedSafe { - val_ref: &self.val, - _guard, - } - } -} - -pub(crate) struct RefSchedSafe<'a, T> { - val_ref: &'a T, - _guard: SchedSuspendGuard, -} - -impl<'a, T> Deref for RefSchedSafe<'a, T> { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.val_ref + pub(crate) fn with_suspended_scheduler(&self, op: F) -> R + where + F: FnOnce(&T, SchedSuspendGuard) -> R, + { + let guard = Scheduler::suspend(); + op(&self.val, guard) } } diff --git a/src/sync/wait_queue.rs b/src/sync/wait_queue.rs index 526e170..63d2342 100644 --- a/src/sync/wait_queue.rs +++ b/src/sync/wait_queue.rs @@ -109,13 +109,15 @@ impl WaitQueue { #[inline(never)] fn add_cur_task_to_block_queue(wq: &WaitQueue) { // Should always grant full access to a task. - wq.inner.lock().must_with_full_access(|full_access| { - // Put the current task into the queue. - current::with_current_task_arc(|cur_task| { - cur_task.set_state(TaskState::Blocked); - let mut locked_queue = full_access.queue.lock_now_or_die(); - locked_queue.push_back(cur_task); - }); + wq.inner.with_suspended_scheduler(|queue, sched_guard| { + queue.must_with_full_access(|full_access| { + // Put the current task into the queue. + current::with_cur_task_arc_and_suspended_sched(sched_guard, |cur_task| { + cur_task.set_state(TaskState::Blocked); + let mut locked_queue = full_access.queue.lock_now_or_die(); + locked_queue.push_back(cur_task); + }); + }) }); } } @@ -164,24 +166,26 @@ impl WaitQueue { F: FnMut() -> Option, { // Should always grant full access to a task. - wq.inner.lock().must_with_full_access(|full_access| { - // Must lock the queue here before evaluating the condition to - // prevent deadlock. - let mut locked_queue = full_access.queue.lock_now_or_die(); + wq.inner.with_suspended_scheduler(|queue, sched_guard| { + queue.must_with_full_access(|full_access| { + // Must lock the queue here before evaluating the condition to + // prevent deadlock. + let mut locked_queue = full_access.queue.lock_now_or_die(); - // Check if the predicate is satisfied and if yes return the value - // contained in `Some`. - if let Some(ret) = condition() { - return Some(ret); - } + // Check if the predicate is satisfied and if yes return the value + // contained in `Some`. + if let Some(ret) = condition() { + return Some(ret); + } - // Otherwise, put the current task into the queue. - current::with_current_task_arc(|cur_task| { - cur_task.set_state(TaskState::Blocked); - locked_queue.push_back(cur_task); - }); + // Otherwise, put the current task into the queue. + current::with_cur_task_arc_and_suspended_sched(sched_guard, |cur_task| { + cur_task.set_state(TaskState::Blocked); + locked_queue.push_back(cur_task); + }); - None + None + }) }) } } @@ -245,27 +249,29 @@ impl WaitQueue { L: Lockable = G> + 'a, { // Should always grant full access to a task. - wq.inner.lock().must_with_full_access(|full_access| { - // Must lock the queue here before evaluating the condition and - // releasing the `guard` passed in argument to prevent deadlock. - let mut locked_queue = full_access.queue.lock_now_or_die(); + wq.inner.with_suspended_scheduler(|queue, sched_guard| { + queue.must_with_full_access(|full_access| { + // Must lock the queue here before evaluating the condition and + // releasing the `guard` passed in argument to prevent deadlock. + let mut locked_queue = full_access.queue.lock_now_or_die(); - // Check if the predicate is satisfied and if yes return the value - // contained in `Some` with the lock guard. - if let Some(ret) = condition(&mut guard) { - return Err((guard, ret)); - } + // Check if the predicate is satisfied and if yes return the value + // contained in `Some` with the lock guard. + if let Some(ret) = condition(&mut guard) { + return Err((guard, ret)); + } - // Otherwise, release the lock guard and get the lock itself. - let mutex = guard.unlock_and_into_lock_ref(); + // Otherwise, release the lock guard and get the lock itself. + let mutex = guard.unlock_and_into_lock_ref(); - // Put the current task into the queue. - current::with_current_task_arc(|cur_task| { - cur_task.set_state(TaskState::Blocked); - locked_queue.push_back(cur_task); - }); + // Put the current task into the queue. + current::with_cur_task_arc_and_suspended_sched(sched_guard, |cur_task| { + cur_task.set_state(TaskState::Blocked); + locked_queue.push_back(cur_task); + }); - Ok(mutex) + Ok(mutex) + }) }) } } @@ -280,21 +286,23 @@ impl WaitQueue { /// will be in turn notified, i.e., the notification is treated as spurious and /// is discarded. pub(super) fn notify_one_allow_isr(&self) { - self.inner.lock().with_access(|access| match access { - // If we have full access to the inner components, we directly operate - // on the queue to make the popped task ready. - Access::Full { full_access } => { - let mut locked_queue = full_access.queue.lock_now_or_die(); - if let Some(task) = locked_queue.pop_highest_priority() { - Scheduler::accept_task(task); + self.inner.with_suspended_scheduler(|queue, _guard| { + queue.with_access(|access| match access { + // If we have full access to the inner components, we directly operate + // on the queue to make the popped task ready. + Access::Full { full_access } => { + let mut locked_queue = full_access.queue.lock_now_or_die(); + if let Some(task) = locked_queue.pop_highest_priority() { + Scheduler::accept_task(task); + } } - } - // If other context is running with the full access and we preempt it, - // we get pend-only access. We increment the counter so that the full - // access owner can later pop out the task on our behalf. - Access::PendOnly { pend_access } => { - pend_access.notify_cnt.fetch_add(1, Ordering::SeqCst); - } + // If other context is running with the full access and we preempt it, + // we get pend-only access. We increment the counter so that the full + // access owner can later pop out the task on our behalf. + Access::PendOnly { pend_access } => { + pend_access.notify_cnt.fetch_add(1, Ordering::SeqCst); + } + }) }); } } diff --git a/src/task/current.rs b/src/task/current.rs index 175efaa..f2de6d5 100644 --- a/src/task/current.rs +++ b/src/task/current.rs @@ -24,7 +24,7 @@ pub fn change_current_priority(prio: u8) -> Result<(), ()> { if prio >= config::TASK_PRIORITY_LEVELS - 1 { return Err(()); } - current::with_current_task(|cur_task| cur_task.change_intrinsic_priority(prio)); + current::with_cur_task(|cur_task| cur_task.change_intrinsic_priority(prio)); svc::svc_yield_current_task(); Ok(()) } @@ -32,5 +32,5 @@ pub fn change_current_priority(prio: u8) -> Result<(), ()> { /// Return the ID of the current task. The ID is only for diagnostic purpose /// and does not have any functional purpose. pub fn get_current_id() -> u8 { - current::with_current_task(|cur_task| cur_task.get_id()) + current::with_cur_task(|cur_task| cur_task.get_id()) } diff --git a/src/task/segmented_stack.rs b/src/task/segmented_stack.rs index b5a3a90..b8f3b02 100644 --- a/src/task/segmented_stack.rs +++ b/src/task/segmented_stack.rs @@ -265,7 +265,7 @@ pub(crate) fn more_stack(tf: &mut TrapFrame, ctxt: &mut TaskSVCCtxt, reason: Mor #[cfg(not(feature = "unwind"))] let abort = false; - current::with_current_task(|cur_task| { + current::with_cur_task(|cur_task| { // Define a closure to be invoked when the stack size limit is // exceeded. #[cfg(feature = "unwind")] @@ -456,7 +456,7 @@ pub(crate) fn less_stack(tf: &TrapFrame, ctxt: &mut TaskSVCCtxt) { ctxt.tls.stklet_bound = meta.prev_stklet_bound; ctxt.sp = meta.prev_sp; - current::with_current_task(|cur_task| { + current::with_cur_task(|cur_task| { cur_task.with_stack_ctrl_block(|scb| { // Update hot split alleviation information. svc_less_stack_anti_hot_split(prev_tf, scb); diff --git a/src/task/trampoline.rs b/src/task/trampoline.rs index 3149acc..434e24b 100644 --- a/src/task/trampoline.rs +++ b/src/task/trampoline.rs @@ -65,7 +65,7 @@ where // If the task panicked, check if it has already been restarted with // another task struct. If yes, we break the loop to let the current // task struct terminates. - if current::with_current_task(|cur_task| cur_task.has_restarted()) { + if current::with_cur_task(|cur_task| cur_task.has_restarted()) { break; } @@ -86,7 +86,7 @@ where // The following catch is to detect such pathological case. If it // happens, give up restarting the task. if let Err(_) = unw_catch::catch_unwind(|| { - current::with_current_task(|cur_task| cur_task.set_unwind_flag(false)) + current::with_cur_task(|cur_task| cur_task.set_unwind_flag(false)) }) { break; } diff --git a/src/time/mod.rs b/src/time/mod.rs index f12641a..9abd91a 100644 --- a/src/time/mod.rs +++ b/src/time/mod.rs @@ -116,13 +116,15 @@ pub fn get_tick() -> u32 { /// Wake up those sleeping tasks that have their sleeping time expired. pub(crate) fn wake_sleeping_tasks() { - SLEEP_TASK_QUEUE.lock().with_access(|access| match access { - Access::Full { full_access } => { - full_access.wake_expired_tasks(); - } - Access::PendOnly { pend_access } => { - pend_access.time_to_wakeup.store(true, Ordering::SeqCst) - } + SLEEP_TASK_QUEUE.with_suspended_scheduler(|queue, _guard| { + queue.with_access(|access| match access { + Access::Full { full_access } => { + full_access.wake_expired_tasks(); + } + Access::PendOnly { pend_access } => { + pend_access.time_to_wakeup.store(true, Ordering::SeqCst) + } + }) }); } @@ -154,7 +156,7 @@ fn sleep_ms_unchecked(ms: u32) { // Outline the logic to reduce the stack frame size of `sleep_ms`. #[inline(never)] fn add_cur_task_to_sleep_queue(wake_at_tick: u32) { - current::with_current_task_arc(|cur_task| { + current::with_cur_task_arc(|cur_task| { cur_task.set_state(TaskState::Blocked); add_task_to_sleep_queue(cur_task, wake_at_tick); }) @@ -162,26 +164,28 @@ fn sleep_ms_unchecked(ms: u32) { } pub(crate) fn add_task_to_sleep_queue(task: Arc, wake_at_tick: u32) { - SLEEP_TASK_QUEUE - .lock() - .must_with_full_access(|full_access| { + SLEEP_TASK_QUEUE.with_suspended_scheduler(|queue, _guard| { + queue.must_with_full_access(|full_access| { task.set_wake_tick(wake_at_tick); let mut locked_queue = full_access.time_sorted_queue.lock_now_or_die(); locked_queue.push_back_tick_sorted(task); - }); + }) + }); } pub(crate) fn remove_task_from_sleep_queue_allow_isr(task: Arc) { - SLEEP_TASK_QUEUE.lock().with_access(|access| match access { - Access::Full { full_access } => { - let mut locked_queue = full_access.time_sorted_queue.lock_now_or_die(); - if let Some(task) = locked_queue.remove_task(&task) { - Scheduler::accept_task(task); + SLEEP_TASK_QUEUE.with_suspended_scheduler(|queue, _guard| { + queue.with_access(|access| match access { + Access::Full { full_access } => { + let mut locked_queue = full_access.time_sorted_queue.lock_now_or_die(); + if let Some(task) = locked_queue.remove_task(&task) { + Scheduler::accept_task(task); + } } - } - Access::PendOnly { pend_access } => { - pend_access.delete_buffer.enqueue(task).unwrap_or_die(); - } + Access::PendOnly { pend_access } => { + pend_access.delete_buffer.enqueue(task).unwrap_or_die(); + } + }) }); } diff --git a/src/unwind/unwind.rs b/src/unwind/unwind.rs index 3744f15..91c03aa 100644 --- a/src/unwind/unwind.rs +++ b/src/unwind/unwind.rs @@ -395,7 +395,7 @@ impl<'a> Debug for UnwindState<'a> { #[inline(never)] fn try_concurrent_restart() { - current::with_current_task_arc(|cur_task| { + current::with_cur_task_arc(|cur_task| { // We will limit the concurrent restart rate to at most one concurrent // instance. If this task is a restarted instance, and also if the original // instance has not finished unwinding, i.e. the task struct reference @@ -510,7 +510,7 @@ impl UnwindState<'static> { // with the current task. There was something wrong with the IRQ // handler but do not touch the task. if !current::is_in_isr_context() { - current::with_current_task(|cur_task| { + current::with_cur_task(|cur_task| { if cur_task.is_restartable() { try_concurrent_restart(); } @@ -719,7 +719,7 @@ impl<'a> UnwindState<'a> { self.stklet_boundary = stklet_meta.prev_stklet_bound as u32; // Update the stack usage. - current::with_current_task(|cur_task| { + current::with_cur_task(|cur_task| { cur_task.with_stack_ctrl_block(|scb| { scb.cumulated_size .fetch_sub(stklet_meta.count_size, Ordering::SeqCst) @@ -905,11 +905,11 @@ pub fn is_isr_unwinding() -> bool { } pub fn set_cur_task_unwinding(val: bool) { - current::with_current_task(|cur_task| cur_task.set_unwind_flag(val)); + current::with_cur_task(|cur_task| cur_task.set_unwind_flag(val)); } pub fn is_cur_task_unwinding() -> bool { - current::with_current_task(|cur_task| cur_task.is_unwinding()) + current::with_cur_task(|cur_task| cur_task.is_unwinding()) } pub fn is_unwinding() -> bool {