Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rehabilitate _current usage #83674

Merged
merged 3 commits into from
Jan 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion arch/arc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ config ARC_CURRENT_THREAD_USE_NO_TLS
RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization
requires significant time, and it slows down performance.
ARCMWDT works with tls pointer in different way then GCC. Optimized access to
TLS pointer via arch_current_thread() does not provide significant advantages
TLS pointer via the _current symbol does not provide significant advantages
in case of MetaWare.

config GEN_ISR_TABLES
Expand Down
2 changes: 1 addition & 1 deletion arch/arc/core/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
{
#if defined(CONFIG_MULTITHREADING)
uint32_t guard_end, guard_start;
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;

if (!thread) {
/* TODO: Under what circumstances could we get here ? */
Expand Down
4 changes: 2 additions & 2 deletions arch/arc/core/irq_offload.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)

__asm__ volatile("sync");

/* If arch_current_thread() was aborted in the offload routine, we shouldn't be here */
__ASSERT_NO_MSG((arch_current_thread()->base.thread_state & _THREAD_DEAD) == 0);
/* If _current was aborted in the offload routine, we shouldn't be here */
__ASSERT_NO_MSG((_current->base.thread_state & _THREAD_DEAD) == 0);
}

/* need to be executed on every core in the system */
Expand Down
16 changes: 8 additions & 8 deletions arch/arc/core/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#ifdef CONFIG_MULTITHREADING
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
*old_thread = arch_current_thread();
*old_thread = _current;

return z_get_next_switch_handle(NULL);
}
Expand All @@ -227,16 +227,16 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
setup_stack_vars(arch_current_thread());
setup_stack_vars(_current);

/* possible optimizaiton: no need to load mem domain anymore */
/* need to lock cpu here ? */
configure_mpu_thread(arch_current_thread());
configure_mpu_thread(_current);

z_arc_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)arch_current_thread()->stack_info.start,
(arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta), arch_current_thread());
(uint32_t)_current->stack_info.start,
(_current->stack_info.size -
_current->stack_info.delta), _current);
CODE_UNREACHABLE;
}
#endif
Expand Down Expand Up @@ -336,7 +336,7 @@ int arc_vpx_lock(k_timeout_t timeout)

id = _current_cpu->id;
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
#endif
k_spin_unlock(&lock, key);

Expand All @@ -355,7 +355,7 @@ void arc_vpx_unlock(void)

key = k_spin_lock(&lock);
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
#endif
id = _current_cpu->id;
k_spin_unlock(&lock, key);
Expand Down
2 changes: 1 addition & 1 deletion arch/arc/core/tls.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr)

void *_Preserve_flags _mwget_tls(void)
{
return (void *)(arch_current_thread()->tls);
return (void *)(_current->tls);
}

#else
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/core/cortex_a_r/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ bool z_arm_fault_undef_instruction_fp(void)
* context because it is about to be overwritten.
*/
if (((_current_cpu->nested == 2)
&& (arch_current_thread()->base.user_options & K_FP_REGS))
&& (_current->base.user_options & K_FP_REGS))
|| ((_current_cpu->nested > 2)
&& (spill_esf->undefined & FPEXC_EN))) {
/*
Expand All @@ -196,7 +196,7 @@ bool z_arm_fault_undef_instruction_fp(void)
* means that a thread that uses the VFP does not have to,
* but should, set K_FP_REGS on thread creation.
*/
arch_current_thread()->base.user_options |= K_FP_REGS;
_current->base.user_options |= K_FP_REGS;
}

return false;
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/core/cortex_a_r/swap_helper.S
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap)

#if defined(CONFIG_FPU_SHARING)
ldrb r0, [r2, #_thread_offset_to_user_options]
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
beq out_fp_inactive

mov ip, #FPEXC_EN
Expand Down Expand Up @@ -152,7 +152,7 @@ out_fp_inactive:

#if defined(CONFIG_FPU_SHARING)
ldrb r0, [r2, #_thread_offset_to_user_options]
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
beq in_fp_inactive

mov r3, #FPEXC_EN
Expand Down
36 changes: 18 additions & 18 deletions arch/arm/core/cortex_a_r/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
{

/* Set up privileged stack before entering user mode */
arch_current_thread()->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
_current->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(_current->stack_obj);
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_THREAD_STACK_INFO)
/* We're dropping to user mode which means the guard area is no
Expand All @@ -208,37 +208,37 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* which accounted for memory borrowed from the thread stack.
*/
#if FP_GUARD_EXTRA_SIZE > 0
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
}
#endif /* FP_GUARD_EXTRA_SIZE */
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_THREAD_STACK_INFO */

/* Stack guard area reserved at the bottom of the thread's
* privileged stack. Adjust the available (writable) stack
* buffer area accordingly.
*/
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_current_thread()->arch.priv_stack_start +=
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
_current->arch.priv_stack_start +=
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#endif /* CONFIG_MPU_STACK_GUARD */

#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
arch_current_thread()->arch.priv_stack_end =
arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
_current->arch.priv_stack_end =
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
#endif

z_arm_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)arch_current_thread()->stack_info.start,
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
(uint32_t)_current->stack_info.start,
_current->stack_info.size -
_current->stack_info.delta);
CODE_UNREACHABLE;
}

Expand Down Expand Up @@ -304,7 +304,7 @@ EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
{
#if defined(CONFIG_MULTITHREADING)
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;

if (thread == NULL) {
return 0;
Expand All @@ -314,7 +314,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
defined(CONFIG_MPU_STACK_GUARD)
uint32_t guard_len =
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
/* If MPU_STACK_GUARD is not enabled, the guard length is
Expand Down Expand Up @@ -377,7 +377,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
if (thread != arch_current_thread()) {
if (thread != _current) {
return -EINVAL;
}

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/core/cortex_m/swap_helper.S
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ in_fp_endif:
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
/* Re-program dynamic memory map */
push {r2,lr}
mov r0, r2 /* arch_current_thread() thread */
mov r0, r2 /* _current thread */
bl z_arm_configure_dynamic_mpu_regions
pop {r2,lr}
#endif
Expand Down
34 changes: 17 additions & 17 deletions arch/arm/core/cortex_m/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,8 +231,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
{

/* Set up privileged stack before entering user mode */
arch_current_thread()->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
_current->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(_current->stack_obj);
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_THREAD_STACK_INFO)
/* We're dropping to user mode which means the guard area is no
Expand All @@ -241,32 +241,32 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* which accounted for memory borrowed from the thread stack.
*/
#if FP_GUARD_EXTRA_SIZE > 0
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
}
#endif /* FP_GUARD_EXTRA_SIZE */
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_THREAD_STACK_INFO */

/* Stack guard area reserved at the bottom of the thread's
* privileged stack. Adjust the available (writable) stack
* buffer area accordingly.
*/
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_current_thread()->arch.priv_stack_start +=
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
_current->arch.priv_stack_start +=
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#endif /* CONFIG_MPU_STACK_GUARD */

z_arm_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)arch_current_thread()->stack_info.start,
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
(uint32_t)_current->stack_info.start,
_current->stack_info.size -
_current->stack_info.delta);
CODE_UNREACHABLE;
}

Expand Down Expand Up @@ -379,7 +379,7 @@ void configure_builtin_stack_guard(struct k_thread *thread)
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
{
#if defined(CONFIG_MULTITHREADING)
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;

if (thread == NULL) {
return 0;
Expand All @@ -389,7 +389,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
defined(CONFIG_MPU_STACK_GUARD)
uint32_t guard_len =
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
/* If MPU_STACK_GUARD is not enabled, the guard length is
Expand Down Expand Up @@ -452,7 +452,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
if (thread != arch_current_thread()) {
if (thread != _current) {
return -EINVAL;
}

Expand Down Expand Up @@ -522,7 +522,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
{
z_arm_prepare_switch_to_main();

arch_current_thread_set(main_thread);
z_current_thread_set(main_thread);

#if defined(CONFIG_THREAD_LOCAL_STORAGE)
/* On Cortex-M, TLS uses a global variable as pointer to
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/core/cortex_m/thread_abort.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ void z_impl_k_thread_abort(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);

if (arch_current_thread() == thread) {
if (_current == thread) {
if (arch_is_in_isr()) {
/* ARM is unlike most arches in that this is true
* even for non-peripheral interrupts, even though
Expand Down
6 changes: 3 additions & 3 deletions arch/arm/include/cortex_a_r/kernel_arch_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,16 @@ static ALWAYS_INLINE void arch_kernel_init(void)
static ALWAYS_INLINE int arch_swap(unsigned int key)
{
/* store off key and return value */
arch_current_thread()->arch.basepri = key;
arch_current_thread()->arch.swap_return_value = -EAGAIN;
_current->arch.basepri = key;
_current->arch.swap_return_value = -EAGAIN;

z_arm_cortex_r_svc();
irq_unlock(key);

/* Context switch is performed here. Returning implies the
* thread has been context-switched-in again.
*/
return arch_current_thread()->arch.swap_return_value;
return _current->arch.swap_return_value;
}

static ALWAYS_INLINE void
Expand Down
6 changes: 3 additions & 3 deletions arch/arm/include/cortex_m/kernel_arch_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
static ALWAYS_INLINE int arch_swap(unsigned int key)
{
/* store off key and return value */
arch_current_thread()->arch.basepri = key;
arch_current_thread()->arch.swap_return_value = -EAGAIN;
_current->arch.basepri = key;
_current->arch.swap_return_value = -EAGAIN;

/* set pending bit to make sure we will take a PendSV exception */
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
Expand All @@ -99,7 +99,7 @@ static ALWAYS_INLINE int arch_swap(unsigned int key)
/* Context switch is performed here. Returning implies the
* thread has been context-switched-in again.
*/
return arch_current_thread()->arch.swap_return_value;
return _current->arch.swap_return_value;
}


Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/core/cortex_r/arm_mpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -727,7 +727,7 @@ static int configure_dynamic_mpu_regions(struct k_thread *thread)
*/
thread->arch.region_num = (uint8_t)region_num;

if (thread == arch_current_thread()) {
if (thread == _current) {
ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
}

Expand Down Expand Up @@ -795,7 +795,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)

ret = configure_dynamic_mpu_regions(thread);
#ifdef CONFIG_SMP
if (ret == 0 && thread != arch_current_thread()) {
if (ret == 0 && thread != _current) {
/* the thread could be running on another CPU right now */
z_arm64_mem_cfg_ipi();
}
Expand All @@ -810,7 +810,7 @@ int arch_mem_domain_thread_remove(struct k_thread *thread)

ret = configure_dynamic_mpu_regions(thread);
#ifdef CONFIG_SMP
if (ret == 0 && thread != arch_current_thread()) {
if (ret == 0 && thread != _current) {
/* the thread could be running on another CPU right now */
z_arm64_mem_cfg_ipi();
}
Expand Down
Loading
Loading