Skip to content

Commit

Permalink
Add test cases for forced unwinding.
Browse files Browse the repository at this point in the history
  • Loading branch information
zyma98 committed Aug 10, 2024
1 parent 3d1832a commit ca57bae
Show file tree
Hide file tree
Showing 10 changed files with 385 additions and 3 deletions.
16 changes: 16 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -117,3 +117,19 @@ path = "examples/tests/sync/semaphore/initialization.rs"
[[example]]
name = "test-task-priority-reduce_priority"
path = "examples/tests/task/priority/reduce_priority.rs"

[[example]]
name = "test-task-unwind-diverted"
path = "examples/tests/task/unwind/diverted.rs"

[[example]]
name = "test-task-unwind-deferred_direct_drop"
path = "examples/tests/task/unwind/deferred_direct_drop.rs"

[[example]]
name = "test-task-unwind-deferred_indirect_drop"
path = "examples/tests/task/unwind/deferred_indirect_drop.rs"

[[example]]
name = "test-task-unwind-deferred_nested_drop"
path = "examples/tests/task/unwind/deferred_nested_drop.rs"
88 changes: 88 additions & 0 deletions examples/tests/task/unwind/deferred_direct_drop.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
//! A deferred forced unwinding should occur when a drop handler function
//! overflows the call stack of a task that does not enable dynamic stack
//! extension.
#![no_std]
#![no_main]

extern crate alloc;
use core::{
mem::MaybeUninit,
sync::atomic::{AtomicUsize, Ordering},
};
use hopter::{boot::main, debug::semihosting, hprintln, task};

#[main]
fn main(_: cortex_m::Peripherals) {
task::build()
.set_entry(test_task)
.deny_dynamic_stack()
.set_stack_size(512)
.spawn_restartable()
.unwrap();
}

fn test_task() {
// A persistent counter.
static CNT: AtomicUsize = AtomicUsize::new(0);

// Every time the task runs we increment it by 1.
let cnt = CNT.fetch_add(1, Ordering::SeqCst);

// When the task is executed for the first time, run the drop function.
// Even if this drop function uses a large stack frame and will overflow
// the task's stack while dynamic stack extension is not enabled, the
// segmented stack runtime should still allow the drop function to proceed
// because we cannot initiate an unwinding inside a drop function. A
// deferred forced unwinding will be executed after the drop handler
// finishes.
if cnt == 0 {
core::mem::drop(HasDrop);
}

if cnt == 0 {
// The task should have been unwound so this print should not be
// reachable.
hprintln!("Should not print this.");
}

if cnt > 0 {
hprintln!("Task successfully restarted after a deferred forced unwinding.");
semihosting::terminate(true);
} else {
semihosting::terminate(false);
}
}

struct HasDrop;

// A drop function that uses a large stack frame.
impl Drop for HasDrop {
#[inline(never)]
fn drop(&mut self) {
let _padding = StackFramePadding::new();
hprintln!("Drop executed.");
}
}

/// A padding that causes large stack frame.
struct StackFramePadding {
_padding: [u8; 1024],
}

impl StackFramePadding {
/// Use volatile write to prevent the compiler from optimizing away the
/// padding.
fn new() -> Self {
let mut padding = MaybeUninit::<[u8; 1024]>::uninit();
let mut ptr = unsafe { (*padding.as_mut_ptr()).as_mut_ptr() };
for _ in 0..1024 {
unsafe {
ptr.write_volatile(0);
ptr = ptr.offset(1);
}
}
let padding = unsafe { padding.assume_init() };
Self { _padding: padding }
}
}
2 changes: 2 additions & 0 deletions examples/tests/task/unwind/deferred_direct_drop.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Drop executed.
Task successfully restarted after a deferred forced unwinding.
94 changes: 94 additions & 0 deletions examples/tests/task/unwind/deferred_indirect_drop.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
//! A deferred forced unwinding should occur when a drop handler function
//! is active in the call stack and it further calls a function that overflows
//! the stack, and when the task does not enable dynamic stack extension.
#![no_std]
#![no_main]

extern crate alloc;
use core::{
mem::MaybeUninit,
sync::atomic::{AtomicUsize, Ordering},
};
use hopter::{boot::main, debug::semihosting, hprintln, task};

#[main]
fn main(_: cortex_m::Peripherals) {
task::build()
.set_entry(test_task)
.deny_dynamic_stack()
.set_stack_size(512)
.spawn_restartable()
.unwrap();
}

fn test_task() {
// A persistent counter.
static CNT: AtomicUsize = AtomicUsize::new(0);

// Every time the task runs we increment it by 1.
let cnt = CNT.fetch_add(1, Ordering::SeqCst);

// When the task is executed for the first time, run the drop function.
// Even if this drop function uses a large stack frame and will overflow
// the task's stack while dynamic stack extension is not enabled, the
// segmented stack runtime should still allow the drop function to proceed
// because we cannot initiate an unwinding inside a drop function. A
// deferred forced unwinding will be executed after the drop handler
// finishes.
if cnt == 0 {
core::mem::drop(HasDrop);
}

if cnt == 0 {
// The task should have been unwound so this print should not be
// reachable.
hprintln!("Should not print this.");
}

if cnt > 0 {
hprintln!("Task successfully restarted after a deferred forced unwinding.");
semihosting::terminate(true);
} else {
semihosting::terminate(false);
}
}

#[inline(never)]
fn large_func() {
let _padding = StackFramePadding::new();
hprintln!("Large function executed.");
}

struct HasDrop;

// A drop function that uses a large stack frame.
impl Drop for HasDrop {
#[inline(never)]
fn drop(&mut self) {
large_func();
hprintln!("Drop executed.");
}
}

/// A padding that causes large stack frame.
struct StackFramePadding {
_padding: [u8; 1024],
}

impl StackFramePadding {
/// Use volatile write to prevent the compiler from optimizing away the
/// padding.
fn new() -> Self {
let mut padding = MaybeUninit::<[u8; 1024]>::uninit();
let mut ptr = unsafe { (*padding.as_mut_ptr()).as_mut_ptr() };
for _ in 0..1024 {
unsafe {
ptr.write_volatile(0);
ptr = ptr.offset(1);
}
}
let padding = unsafe { padding.assume_init() };
Self { _padding: padding }
}
}
3 changes: 3 additions & 0 deletions examples/tests/task/unwind/deferred_indirect_drop.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Large function executed.
Drop executed.
Task successfully restarted after a deferred forced unwinding.
100 changes: 100 additions & 0 deletions examples/tests/task/unwind/deferred_nested_drop.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
//! A deferred forced unwinding should occur when a drop handler function
//! overflows the call stack of a task that does not enable dynamic stack
//! extension. This test further tests the case where the drop handler is
//! nested inside another drop handler. The forced unwinding should begin
//! only after the return of the outmost drop handler finishes.
#![no_std]
#![no_main]

extern crate alloc;
use core::{
mem::MaybeUninit,
sync::atomic::{AtomicUsize, Ordering},
};
use hopter::{boot::main, debug::semihosting, hprintln, task};

#[main]
fn main(_: cortex_m::Peripherals) {
task::build()
.set_entry(test_task)
.deny_dynamic_stack()
.set_stack_size(512)
.spawn_restartable()
.unwrap();
}

fn test_task() {
// A persistent counter.
static CNT: AtomicUsize = AtomicUsize::new(0);

// Every time the task runs we increment it by 1.
let cnt = CNT.fetch_add(1, Ordering::SeqCst);

// When the task is executed for the first time, run the drop function.
// Even if this drop function uses a large stack frame and will overflow
// the task's stack while dynamic stack extension is not enabled, the
// segmented stack runtime should still allow the drop function to proceed
// because we cannot initiate an unwinding inside a drop function. A
// deferred forced unwinding will be executed after the drop handler
// finishes.
if cnt == 0 {
core::mem::drop(OuterDrop(InnerDrop));
}

if cnt == 0 {
// The task should have been unwound so this print should not be
// reachable.
hprintln!("Should not print this.");
}

if cnt > 0 {
hprintln!("Task successfully restarted after a deferred forced unwinding.");
semihosting::terminate(true);
} else {
semihosting::terminate(false);
}
}

struct OuterDrop(InnerDrop);

// Outter drop implicitly also invokes inner drop.
impl Drop for OuterDrop {
#[inline(never)]
fn drop(&mut self) {
hprintln!("Outter drop executed.");
}
}

struct InnerDrop;

// A drop function that uses a large stack frame.
impl Drop for InnerDrop {
#[inline(never)]
fn drop(&mut self) {
let _padding = StackFramePadding::new();
hprintln!("Inner drop executed.");
}
}

/// A padding that causes large stack frame.
struct StackFramePadding {
_padding: [u8; 1024],
}

impl StackFramePadding {
/// Use volatile write to prevent the compiler from optimizing away the
/// padding.
fn new() -> Self {
let mut padding = MaybeUninit::<[u8; 1024]>::uninit();
let mut ptr = unsafe { (*padding.as_mut_ptr()).as_mut_ptr() };
for _ in 0..1024 {
unsafe {
ptr.write_volatile(0);
ptr = ptr.offset(1);
}
}
let padding = unsafe { padding.assume_init() };
Self { _padding: padding }
}
}
3 changes: 3 additions & 0 deletions examples/tests/task/unwind/deferred_nested_drop.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Outter drop executed.
Inner drop executed.
Task successfully restarted after a deferred forced unwinding.
75 changes: 75 additions & 0 deletions examples/tests/task/unwind/diverted.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
//! A forced unwinding should occur when a task without dynamic stack extension
//! is going to overflow its stack.
#![no_std]
#![no_main]

extern crate alloc;
use core::{
mem::MaybeUninit,
sync::atomic::{AtomicUsize, Ordering},
};
use hopter::{boot::main, debug::semihosting, hprintln, task};

#[main]
fn main(_: cortex_m::Peripherals) {
task::build()
.set_entry(test_task)
.deny_dynamic_stack()
.set_stack_size(512)
.spawn_restartable()
.unwrap();
}

fn test_task() {
// A persistent counter.
static CNT: AtomicUsize = AtomicUsize::new(0);

// Every time the task is started we increment it by 1.
let cnt = CNT.fetch_add(1, Ordering::SeqCst);

// Call the large function when the task is executed for the first time.
// The function will require a large stack frame causing a stack overflow.
// The function call should be diverted to a forced stack unwinding.
if cnt == 0 {
large_func();
}

if cnt == 0 {
// The task should have been unwound so this print should not be
// reachable.
hprintln!("Should not print this.");
}

if cnt > 0 {
hprintln!("Task successfully restarted after a diverted forced unwinding.");
semihosting::terminate(true);
} else {
semihosting::terminate(false);
}
}

/// A function that allocates a large stack frame.
#[inline(never)]
fn large_func() {
let _padding = StackFramePadding::new();
}

struct StackFramePadding {
_padding: [u8; 1024],
}

impl StackFramePadding {
fn new() -> Self {
let mut padding = MaybeUninit::<[u8; 1024]>::uninit();
let mut ptr = unsafe { (*padding.as_mut_ptr()).as_mut_ptr() };
for _ in 0..1024 {
unsafe {
ptr.write_volatile(0);
ptr = ptr.offset(1);
}
}
let padding = unsafe { padding.assume_init() };
Self { _padding: padding }
}
}
1 change: 1 addition & 0 deletions examples/tests/task/unwind/diverted.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Task successfully restarted after a diverted forced unwinding.
Loading

0 comments on commit ca57bae

Please sign in to comment.