Browse Source

Revert "arch: deprecate `_current`"

Mostly a revert of commit b1def7145f ("arch: deprecate `_current`").

This commit was part of PR #80716 whose initial purpose was about providing
an architecture specific optimization for _current. The actual deprecation
was sneaked in later on without proper discussion.

The Zephyr core always used _current before and that was fine. It is quite
prevalent as well and the alternative is proving rather verbose.
Furthermore, as a concept, the "current thread" is not something that is
necessarily architecture specific. Therefore the primary abstraction
should not carry the arch_ prefix.

Hence this revert.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
pull/81536/head
Nicolas Pitre 6 months ago committed by Benjamin Cabé
parent
commit
46aa6717ff
  1. 2
      arch/arc/Kconfig
  2. 2
      arch/arc/core/fault.c
  3. 4
      arch/arc/core/irq_offload.c
  4. 16
      arch/arc/core/thread.c
  5. 2
      arch/arc/core/tls.c
  6. 4
      arch/arm/core/cortex_a_r/fault.c
  7. 4
      arch/arm/core/cortex_a_r/swap_helper.S
  8. 36
      arch/arm/core/cortex_a_r/thread.c
  9. 2
      arch/arm/core/cortex_m/swap_helper.S
  10. 32
      arch/arm/core/cortex_m/thread.c
  11. 2
      arch/arm/core/cortex_m/thread_abort.c
  12. 6
      arch/arm/include/cortex_a_r/kernel_arch_func.h
  13. 6
      arch/arm/include/cortex_m/kernel_arch_func.h
  14. 6
      arch/arm64/core/cortex_r/arm_mpu.c
  15. 7
      arch/arm64/core/fatal.c
  16. 14
      arch/arm64/core/fpu.c
  17. 2
      arch/arm64/core/mmu.c
  18. 2
      arch/arm64/core/smp.c
  19. 10
      arch/arm64/core/thread.c
  20. 12
      arch/posix/core/swap.c
  21. 2
      arch/posix/core/thread.c
  22. 2
      arch/riscv/Kconfig
  23. 28
      arch/riscv/core/fatal.c
  24. 24
      arch/riscv/core/fpu.c
  25. 4
      arch/riscv/core/isr.S
  26. 6
      arch/riscv/core/pmp.c
  27. 13
      arch/riscv/core/stacktrace.c
  28. 27
      arch/riscv/core/thread.c
  29. 2
      arch/sparc/core/thread.c
  30. 18
      arch/x86/core/fatal.c
  31. 8
      arch/x86/core/ia32/float.c
  32. 14
      arch/x86/core/userspace.c
  33. 15
      arch/x86/core/x86_mmu.c
  34. 2
      arch/xtensa/core/fatal.c
  35. 2
      arch/xtensa/core/ptables.c
  36. 2
      arch/xtensa/core/thread.c
  37. 2
      arch/xtensa/core/vector_handlers.c
  38. 2
      boards/native/native_posix/irq_handler.c
  39. 2
      boards/native/native_sim/irq_handler.c
  40. 2
      boards/native/nrf_bsim/irq_handler.c
  41. 2
      doc/kernel/services/smp/smp.rst
  42. 4
      doc/releases/migration-guide-4.1.rst
  43. 4
      doc/releases/release-notes-4.1.rst
  44. 4
      drivers/wifi/eswifi/eswifi.h
  45. 2
      include/zephyr/arch/arch_interface.h
  46. 2
      include/zephyr/arch/common/arch_inlines.h
  47. 2
      include/zephyr/arch/x86/ia32/arch.h
  48. 4
      include/zephyr/internal/syscall_handler.h
  49. 8
      include/zephyr/kernel_structs.h
  50. 2
      kernel/Kconfig
  51. 4
      kernel/errno.c
  52. 2
      kernel/fatal.c
  53. 4
      kernel/idle.c
  54. 2
      kernel/include/kernel_internal.h
  55. 4
      kernel/include/ksched.h
  56. 8
      kernel/include/kswap.h
  57. 6
      kernel/include/kthread.h
  58. 20
      kernel/include/priority_q.h
  59. 2
      kernel/ipi.c
  60. 10
      kernel/mailbox.c
  61. 2
      kernel/mem_domain.c
  62. 2
      kernel/mem_slab.c
  63. 2
      kernel/mempool.c
  64. 2
      kernel/mmu.c
  65. 4
      kernel/msg_q.c
  66. 16
      kernel/mutex.c
  67. 12
      kernel/pipes.c
  68. 2
      kernel/poll.c
  69. 4
      kernel/queue.c
  70. 138
      kernel/sched.c
  71. 10
      kernel/smp.c
  72. 8
      kernel/spinlock_validate.c
  73. 2
      kernel/stack.c
  74. 53
      kernel/thread.c
  75. 6
      kernel/timeslicing.c
  76. 12
      kernel/userspace.c
  77. 2
      kernel/userspace_handler.c
  78. 2
      kernel/work.c
  79. 2
      lib/libc/armstdc/src/libc-hooks.c
  80. 12
      lib/os/p4wq.c
  81. 8
      scripts/build/gen_syscalls.py
  82. 2
      soc/espressif/esp32/soc.c
  83. 2
      soc/espressif/esp32/soc_appcpu.c
  84. 2
      soc/espressif/esp32s2/soc.c
  85. 2
      soc/espressif/esp32s3/soc.c
  86. 2
      soc/espressif/esp32s3/soc_appcpu.c
  87. 2
      subsys/net/lib/sockets/sockets.c
  88. 6
      subsys/portability/cmsis_rtos_v2/kernel.c
  89. 4
      subsys/profiling/perf/backends/perf_riscv.c
  90. 2
      subsys/profiling/perf/backends/perf_x86.c
  91. 8
      subsys/profiling/perf/backends/perf_x86_64.c
  92. 2
      subsys/shell/modules/kernel_service/thread/unwind.c
  93. 6
      tests/arch/arm/arm_interrupt/src/arm_interrupt.c
  94. 28
      tests/arch/arm/arm_thread_swap/src/arm_syscalls.c
  95. 54
      tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c
  96. 2
      tests/arch/riscv/userspace/riscv_gp/src/main.c
  97. 4
      tests/benchmarks/footprints/src/system_thread.c
  98. 8
      tests/kernel/context/src/main.c
  99. 2
      tests/kernel/fatal/exception/src/main.c
  100. 2
      tests/kernel/fatal/message_capture/src/main.c
  101. Some files were not shown because too many files have changed in this diff Show More

2
arch/arc/Kconfig

@ -262,7 +262,7 @@ config ARC_CURRENT_THREAD_USE_NO_TLS @@ -262,7 +262,7 @@ config ARC_CURRENT_THREAD_USE_NO_TLS
RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization
requires significant time, and it slows down performance.
ARCMWDT works with tls pointer in different way then GCC. Optimized access to
TLS pointer via arch_current_thread() does not provide significant advantages
TLS pointer via the _current symbol does not provide significant advantages
in case of MetaWare.
config GEN_ISR_TABLES

2
arch/arc/core/fault.c

@ -55,7 +55,7 @@ static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp) @@ -55,7 +55,7 @@ static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
{
#if defined(CONFIG_MULTITHREADING)
uint32_t guard_end, guard_start;
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;
if (!thread) {
/* TODO: Under what circumstances could we get here ? */

4
arch/arc/core/irq_offload.c

@ -49,8 +49,8 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) @@ -49,8 +49,8 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
__asm__ volatile("sync");
/* If arch_current_thread() was aborted in the offload routine, we shouldn't be here */
__ASSERT_NO_MSG((arch_current_thread()->base.thread_state & _THREAD_DEAD) == 0);
/* If _current was aborted in the offload routine, we shouldn't be here */
__ASSERT_NO_MSG((_current->base.thread_state & _THREAD_DEAD) == 0);
}
/* need to be executed on every core in the system */

16
arch/arc/core/thread.c

@ -210,7 +210,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, @@ -210,7 +210,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#ifdef CONFIG_MULTITHREADING
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
*old_thread = arch_current_thread();
*old_thread = _current;
return z_get_next_switch_handle(NULL);
}
@ -227,16 +227,16 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread) @@ -227,16 +227,16 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
setup_stack_vars(arch_current_thread());
setup_stack_vars(_current);
/* possible optimizaiton: no need to load mem domain anymore */
/* need to lock cpu here ? */
configure_mpu_thread(arch_current_thread());
configure_mpu_thread(_current);
z_arc_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)arch_current_thread()->stack_info.start,
(arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta), arch_current_thread());
(uint32_t)_current->stack_info.start,
(_current->stack_info.size -
_current->stack_info.delta), _current);
CODE_UNREACHABLE;
}
#endif
@ -336,7 +336,7 @@ int arc_vpx_lock(k_timeout_t timeout) @@ -336,7 +336,7 @@ int arc_vpx_lock(k_timeout_t timeout)
id = _current_cpu->id;
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
#endif
k_spin_unlock(&lock, key);
@ -355,7 +355,7 @@ void arc_vpx_unlock(void) @@ -355,7 +355,7 @@ void arc_vpx_unlock(void)
key = k_spin_lock(&lock);
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
#endif
id = _current_cpu->id;
k_spin_unlock(&lock, key);

2
arch/arc/core/tls.c

@ -29,7 +29,7 @@ size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr) @@ -29,7 +29,7 @@ size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr)
void *_Preserve_flags _mwget_tls(void)
{
return (void *)(arch_current_thread()->tls);
return (void *)(_current->tls);
}
#else

4
arch/arm/core/cortex_a_r/fault.c

@ -178,7 +178,7 @@ bool z_arm_fault_undef_instruction_fp(void) @@ -178,7 +178,7 @@ bool z_arm_fault_undef_instruction_fp(void)
* context because it is about to be overwritten.
*/
if (((_current_cpu->nested == 2)
&& (arch_current_thread()->base.user_options & K_FP_REGS))
&& (_current->base.user_options & K_FP_REGS))
|| ((_current_cpu->nested > 2)
&& (spill_esf->undefined & FPEXC_EN))) {
/*
@ -196,7 +196,7 @@ bool z_arm_fault_undef_instruction_fp(void) @@ -196,7 +196,7 @@ bool z_arm_fault_undef_instruction_fp(void)
* means that a thread that uses the VFP does not have to,
* but should, set K_FP_REGS on thread creation.
*/
arch_current_thread()->base.user_options |= K_FP_REGS;
_current->base.user_options |= K_FP_REGS;
}
return false;

4
arch/arm/core/cortex_a_r/swap_helper.S

@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap) @@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap)
#if defined(CONFIG_FPU_SHARING)
ldrb r0, [r2, #_thread_offset_to_user_options]
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
beq out_fp_inactive
mov ip, #FPEXC_EN
@ -152,7 +152,7 @@ out_fp_inactive: @@ -152,7 +152,7 @@ out_fp_inactive:
#if defined(CONFIG_FPU_SHARING)
ldrb r0, [r2, #_thread_offset_to_user_options]
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
beq in_fp_inactive
mov r3, #FPEXC_EN

36
arch/arm/core/cortex_a_r/thread.c

@ -198,8 +198,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -198,8 +198,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
{
/* Set up privileged stack before entering user mode */
arch_current_thread()->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
_current->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(_current->stack_obj);
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_THREAD_STACK_INFO)
/* We're dropping to user mode which means the guard area is no
@ -208,13 +208,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -208,13 +208,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* which accounted for memory borrowed from the thread stack.
*/
#if FP_GUARD_EXTRA_SIZE > 0
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
}
#endif /* FP_GUARD_EXTRA_SIZE */
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_THREAD_STACK_INFO */
/* Stack guard area reserved at the bottom of the thread's
@ -222,23 +222,23 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -222,23 +222,23 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* buffer area accordingly.
*/
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_current_thread()->arch.priv_stack_start +=
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
_current->arch.priv_stack_start +=
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#endif /* CONFIG_MPU_STACK_GUARD */
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
arch_current_thread()->arch.priv_stack_end =
arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
_current->arch.priv_stack_end =
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
#endif
z_arm_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)arch_current_thread()->stack_info.start,
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
(uint32_t)_current->stack_info.start,
_current->stack_info.size -
_current->stack_info.delta);
CODE_UNREACHABLE;
}
@ -304,7 +304,7 @@ EXPORT_SYMBOL(z_arm_thread_is_in_user_mode); @@ -304,7 +304,7 @@ EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
{
#if defined(CONFIG_MULTITHREADING)
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;
if (thread == NULL) {
return 0;
@ -314,7 +314,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp @@ -314,7 +314,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
defined(CONFIG_MPU_STACK_GUARD)
uint32_t guard_len =
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
/* If MPU_STACK_GUARD is not enabled, the guard length is
@ -377,7 +377,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp @@ -377,7 +377,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
if (thread != arch_current_thread()) {
if (thread != _current) {
return -EINVAL;
}

2
arch/arm/core/cortex_m/swap_helper.S

@ -288,7 +288,7 @@ in_fp_endif: @@ -288,7 +288,7 @@ in_fp_endif:
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
/* Re-program dynamic memory map */
push {r2,lr}
mov r0, r2 /* arch_current_thread() thread */
mov r0, r2 /* _current thread */
bl z_arm_configure_dynamic_mpu_regions
pop {r2,lr}
#endif

32
arch/arm/core/cortex_m/thread.c

@ -231,8 +231,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -231,8 +231,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
{
/* Set up privileged stack before entering user mode */
arch_current_thread()->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
_current->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(_current->stack_obj);
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_THREAD_STACK_INFO)
/* We're dropping to user mode which means the guard area is no
@ -241,13 +241,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -241,13 +241,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* which accounted for memory borrowed from the thread stack.
*/
#if FP_GUARD_EXTRA_SIZE > 0
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
}
#endif /* FP_GUARD_EXTRA_SIZE */
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_THREAD_STACK_INFO */
/* Stack guard area reserved at the bottom of the thread's
@ -255,18 +255,18 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -255,18 +255,18 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* buffer area accordingly.
*/
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_current_thread()->arch.priv_stack_start +=
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
_current->arch.priv_stack_start +=
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#endif /* CONFIG_MPU_STACK_GUARD */
z_arm_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)arch_current_thread()->stack_info.start,
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
(uint32_t)_current->stack_info.start,
_current->stack_info.size -
_current->stack_info.delta);
CODE_UNREACHABLE;
}
@ -379,7 +379,7 @@ void configure_builtin_stack_guard(struct k_thread *thread) @@ -379,7 +379,7 @@ void configure_builtin_stack_guard(struct k_thread *thread)
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
{
#if defined(CONFIG_MULTITHREADING)
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;
if (thread == NULL) {
return 0;
@ -389,7 +389,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp @@ -389,7 +389,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
defined(CONFIG_MPU_STACK_GUARD)
uint32_t guard_len =
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
/* If MPU_STACK_GUARD is not enabled, the guard length is
@ -452,7 +452,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp @@ -452,7 +452,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
if (thread != arch_current_thread()) {
if (thread != _current) {
return -EINVAL;
}

2
arch/arm/core/cortex_m/thread_abort.c

@ -27,7 +27,7 @@ void z_impl_k_thread_abort(k_tid_t thread) @@ -27,7 +27,7 @@ void z_impl_k_thread_abort(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
if (arch_current_thread() == thread) {
if (_current == thread) {
if (arch_is_in_isr()) {
/* ARM is unlike most arches in that this is true
* even for non-peripheral interrupts, even though

6
arch/arm/include/cortex_a_r/kernel_arch_func.h

@ -40,8 +40,8 @@ static ALWAYS_INLINE void arch_kernel_init(void) @@ -40,8 +40,8 @@ static ALWAYS_INLINE void arch_kernel_init(void)
static ALWAYS_INLINE int arch_swap(unsigned int key)
{
/* store off key and return value */
arch_current_thread()->arch.basepri = key;
arch_current_thread()->arch.swap_return_value = -EAGAIN;
_current->arch.basepri = key;
_current->arch.swap_return_value = -EAGAIN;
z_arm_cortex_r_svc();
irq_unlock(key);
@ -49,7 +49,7 @@ static ALWAYS_INLINE int arch_swap(unsigned int key) @@ -49,7 +49,7 @@ static ALWAYS_INLINE int arch_swap(unsigned int key)
/* Context switch is performed here. Returning implies the
* thread has been context-switched-in again.
*/
return arch_current_thread()->arch.swap_return_value;
return _current->arch.swap_return_value;
}
static ALWAYS_INLINE void

6
arch/arm/include/cortex_m/kernel_arch_func.h

@ -87,8 +87,8 @@ extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); @@ -87,8 +87,8 @@ extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
static ALWAYS_INLINE int arch_swap(unsigned int key)
{
/* store off key and return value */
arch_current_thread()->arch.basepri = key;
arch_current_thread()->arch.swap_return_value = -EAGAIN;
_current->arch.basepri = key;
_current->arch.swap_return_value = -EAGAIN;
/* set pending bit to make sure we will take a PendSV exception */
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
@ -99,7 +99,7 @@ static ALWAYS_INLINE int arch_swap(unsigned int key) @@ -99,7 +99,7 @@ static ALWAYS_INLINE int arch_swap(unsigned int key)
/* Context switch is performed here. Returning implies the
* thread has been context-switched-in again.
*/
return arch_current_thread()->arch.swap_return_value;
return _current->arch.swap_return_value;
}

6
arch/arm64/core/cortex_r/arm_mpu.c

@ -727,7 +727,7 @@ static int configure_dynamic_mpu_regions(struct k_thread *thread) @@ -727,7 +727,7 @@ static int configure_dynamic_mpu_regions(struct k_thread *thread)
*/
thread->arch.region_num = (uint8_t)region_num;
if (thread == arch_current_thread()) {
if (thread == _current) {
ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
}
@ -795,7 +795,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread) @@ -795,7 +795,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
ret = configure_dynamic_mpu_regions(thread);
#ifdef CONFIG_SMP
if (ret == 0 && thread != arch_current_thread()) {
if (ret == 0 && thread != _current) {
/* the thread could be running on another CPU right now */
z_arm64_mem_cfg_ipi();
}
@ -810,7 +810,7 @@ int arch_mem_domain_thread_remove(struct k_thread *thread) @@ -810,7 +810,7 @@ int arch_mem_domain_thread_remove(struct k_thread *thread)
ret = configure_dynamic_mpu_regions(thread);
#ifdef CONFIG_SMP
if (ret == 0 && thread != arch_current_thread()) {
if (ret == 0 && thread != _current) {
/* the thread could be running on another CPU right now */
z_arm64_mem_cfg_ipi();
}

7
arch/arm64/core/fatal.c

@ -306,9 +306,8 @@ static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, u @@ -306,9 +306,8 @@ static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, u
}
}
#ifdef CONFIG_USERSPACE
else if ((arch_current_thread()->base.user_options & K_USER) != 0 &&
GET_ESR_EC(esr) == 0x24) {
sp_limit = (uint64_t)arch_current_thread()->stack_info.start;
else if ((_current->base.user_options & K_USER) != 0 && GET_ESR_EC(esr) == 0x24) {
sp_limit = (uint64_t)_current->stack_info.start;
guard_start = sp_limit - Z_ARM64_STACK_GUARD_SIZE;
sp = esf->sp;
if (sp <= sp_limit || (guard_start <= far && far <= sp_limit)) {
@ -435,7 +434,7 @@ void z_arm64_do_kernel_oops(struct arch_esf *esf) @@ -435,7 +434,7 @@ void z_arm64_do_kernel_oops(struct arch_esf *esf)
* User mode is only allowed to induce oopses and stack check
* failures via software-triggered system fatal exceptions.
*/
if (((arch_current_thread()->base.user_options & K_USER) != 0) &&
if (((_current->base.user_options & K_USER) != 0) &&
reason != K_ERR_STACK_CHK_FAIL) {
reason = K_ERR_KERNEL_OOPS;
}

14
arch/arm64/core/fpu.c

@ -36,7 +36,7 @@ static void DBG(char *msg, struct k_thread *th) @@ -36,7 +36,7 @@ static void DBG(char *msg, struct k_thread *th)
strcpy(buf, "CPU# exc# ");
buf[3] = '0' + _current_cpu->id;
buf[8] = '0' + arch_exception_depth();
strcat(buf, arch_current_thread()->name);
strcat(buf, _current->name);
strcat(buf, ": ");
strcat(buf, msg);
strcat(buf, " ");
@ -125,7 +125,7 @@ static void flush_owned_fpu(struct k_thread *thread) @@ -125,7 +125,7 @@ static void flush_owned_fpu(struct k_thread *thread)
* replace it, and this avoids a deadlock where
* two CPUs want to pull each other's FPU context.
*/
if (thread == arch_current_thread()) {
if (thread == _current) {
arch_flush_local_fpu();
while (atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner) == thread) {
barrier_dsync_fence_full();
@ -260,15 +260,15 @@ void z_arm64_fpu_trap(struct arch_esf *esf) @@ -260,15 +260,15 @@ void z_arm64_fpu_trap(struct arch_esf *esf)
* Make sure the FPU context we need isn't live on another CPU.
* The current CPU's FPU context is NULL at this point.
*/
flush_owned_fpu(arch_current_thread());
flush_owned_fpu(_current);
#endif
/* become new owner */
atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread());
atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current);
/* restore our content */
z_arm64_fpu_restore(&arch_current_thread()->arch.saved_fp_context);
DBG("restore", arch_current_thread());
z_arm64_fpu_restore(&_current->arch.saved_fp_context);
DBG("restore", _current);
}
/*
@ -287,7 +287,7 @@ static void fpu_access_update(unsigned int exc_update_level) @@ -287,7 +287,7 @@ static void fpu_access_update(unsigned int exc_update_level)
if (arch_exception_depth() == exc_update_level) {
/* We're about to execute non-exception code */
if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == arch_current_thread()) {
if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == _current) {
/* turn on FPU access */
write_cpacr_el1(cpacr | CPACR_EL1_FPEN_NOTRAP);
} else {

2
arch/arm64/core/mmu.c

@ -1309,7 +1309,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread) @@ -1309,7 +1309,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
}
thread->arch.ptables = domain_ptables;
if (thread == arch_current_thread()) {
if (thread == _current) {
z_arm64_swap_ptables(thread);
} else {
#ifdef CONFIG_SMP

2
arch/arm64/core/smp.c

@ -240,7 +240,7 @@ void mem_cfg_ipi_handler(const void *unused) @@ -240,7 +240,7 @@ void mem_cfg_ipi_handler(const void *unused)
* This is a no-op if the page table is already the right one.
* Lock irq to prevent the interrupt during mem region switch.
*/
z_arm64_swap_mem_domains(arch_current_thread());
z_arm64_swap_mem_domains(_current);
arch_irq_unlock(key);
}

10
arch/arm64/core/thread.c

@ -159,15 +159,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -159,15 +159,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
uint64_t tmpreg;
/* Map the thread stack */
z_arm64_thread_mem_domains_init(arch_current_thread());
z_arm64_thread_mem_domains_init(_current);
/* Top of the user stack area */
stack_el0 = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
stack_el0 = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta);
/* Top of the privileged non-user-accessible part of the stack */
stack_el1 = (uintptr_t)(arch_current_thread()->stack_obj + ARCH_THREAD_STACK_RESERVED);
stack_el1 = (uintptr_t)(_current->stack_obj + ARCH_THREAD_STACK_RESERVED);
register void *x0 __asm__("x0") = user_entry;
register void *x1 __asm__("x1") = p1;

12
arch/posix/core/swap.c

@ -23,7 +23,7 @@ @@ -23,7 +23,7 @@
int arch_swap(unsigned int key)
{
/*
* struct k_thread * arch_current_thread() is the currently running thread
* struct k_thread * _current is the currently running thread
* struct k_thread * _kernel.ready_q.cache contains the next thread to
* run (cannot be NULL)
*
@ -34,8 +34,8 @@ int arch_swap(unsigned int key) @@ -34,8 +34,8 @@ int arch_swap(unsigned int key)
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_out();
#endif
arch_current_thread()->callee_saved.key = key;
arch_current_thread()->callee_saved.retval = -EAGAIN;
_current->callee_saved.key = key;
_current->callee_saved.retval = -EAGAIN;
/* retval may be modified with a call to
* arch_thread_return_value_set()
@ -47,7 +47,7 @@ int arch_swap(unsigned int key) @@ -47,7 +47,7 @@ int arch_swap(unsigned int key)
posix_thread_status_t *this_thread_ptr =
(posix_thread_status_t *)
arch_current_thread()->callee_saved.thread_status;
_current->callee_saved.thread_status;
arch_current_thread_set(_kernel.ready_q.cache);
@ -66,9 +66,9 @@ int arch_swap(unsigned int key) @@ -66,9 +66,9 @@ int arch_swap(unsigned int key)
/* When we continue, _kernel->current points back to this thread */
irq_unlock(arch_current_thread()->callee_saved.key);
irq_unlock(_current->callee_saved.key);
return arch_current_thread()->callee_saved.retval;
return _current->callee_saved.retval;
}

2
arch/posix/core/thread.c

@ -131,7 +131,7 @@ void z_impl_k_thread_abort(k_tid_t thread) @@ -131,7 +131,7 @@ void z_impl_k_thread_abort(k_tid_t thread)
key = irq_lock();
if (arch_current_thread() == thread) {
if (_current == thread) {
if (tstatus->aborted == 0) { /* LCOV_EXCL_BR_LINE */
tstatus->aborted = 1;
} else {

2
arch/riscv/Kconfig

@ -38,7 +38,7 @@ config RISCV_CURRENT_VIA_GP @@ -38,7 +38,7 @@ config RISCV_CURRENT_VIA_GP
select ARCH_HAS_CUSTOM_CURRENT_IMPL
help
Store the current thread's pointer into the global pointer (GP) register.
When is enabled, calls to `arch_current_thread()` & `k_sched_current_thread_query()` will
When is enabled, calls to `_current` & `k_sched_current_thread_query()` will
be reduced to a single register read.
endchoice # RISCV_GP_PURPOSE

28
arch/riscv/core/fatal.c

@ -158,23 +158,23 @@ static bool bad_stack_pointer(struct arch_esf *esf) @@ -158,23 +158,23 @@ static bool bad_stack_pointer(struct arch_esf *esf)
uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
#ifdef CONFIG_USERSPACE
if (arch_current_thread()->arch.priv_stack_start != 0 &&
sp >= arch_current_thread()->arch.priv_stack_start &&
sp < arch_current_thread()->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) {
if (_current->arch.priv_stack_start != 0 &&
sp >= _current->arch.priv_stack_start &&
sp < _current->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) {
return true;
}
if (z_stack_is_user_capable(arch_current_thread()->stack_obj) &&
sp >= arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED &&
sp < arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED
if (z_stack_is_user_capable(_current->stack_obj) &&
sp >= _current->stack_info.start - K_THREAD_STACK_RESERVED &&
sp < _current->stack_info.start - K_THREAD_STACK_RESERVED
+ Z_RISCV_STACK_GUARD_SIZE) {
return true;
}
#endif /* CONFIG_USERSPACE */
#if CONFIG_MULTITHREADING
if (sp >= arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED &&
sp < arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED
if (sp >= _current->stack_info.start - K_KERNEL_STACK_RESERVED &&
sp < _current->stack_info.start - K_KERNEL_STACK_RESERVED
+ Z_RISCV_STACK_GUARD_SIZE) {
return true;
}
@ -191,10 +191,10 @@ static bool bad_stack_pointer(struct arch_esf *esf) @@ -191,10 +191,10 @@ static bool bad_stack_pointer(struct arch_esf *esf)
#ifdef CONFIG_USERSPACE
if ((esf->mstatus & MSTATUS_MPP) == 0 &&
(esf->sp < arch_current_thread()->stack_info.start ||
esf->sp > arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta)) {
(esf->sp < _current->stack_info.start ||
esf->sp > _current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta)) {
/* user stack pointer moved outside of its allowed stack */
return true;
}
@ -246,9 +246,9 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) @@ -246,9 +246,9 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
void z_impl_user_fault(unsigned int reason)
{
struct arch_esf *oops_esf = arch_current_thread()->syscall_frame;
struct arch_esf *oops_esf = _current->syscall_frame;
if (((arch_current_thread()->base.user_options & K_USER) != 0) &&
if (((_current->base.user_options & K_USER) != 0) &&
reason != K_ERR_STACK_CHK_FAIL) {
reason = K_ERR_KERNEL_OOPS;
}

24
arch/riscv/core/fpu.c

@ -36,8 +36,8 @@ static void DBG(char *msg, struct k_thread *th) @@ -36,8 +36,8 @@ static void DBG(char *msg, struct k_thread *th)
strcpy(buf, "CPU# exc# ");
buf[3] = '0' + _current_cpu->id;
buf[8] = '0' + arch_current_thread()->arch.exception_depth;
strcat(buf, arch_current_thread()->name);
buf[8] = '0' + _current->arch.exception_depth;
strcat(buf, _current->name);
strcat(buf, ": ");
strcat(buf, msg);
strcat(buf, " ");
@ -82,12 +82,12 @@ static void z_riscv_fpu_load(void) @@ -82,12 +82,12 @@ static void z_riscv_fpu_load(void)
"must be called with FPU access disabled");
/* become new owner */
atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread());
atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current);
/* restore our content */
csr_set(mstatus, MSTATUS_FS_INIT);
z_riscv_fpu_restore(&arch_current_thread()->arch.saved_fp_context);
DBG("restore", arch_current_thread());
z_riscv_fpu_restore(&_current->arch.saved_fp_context);
DBG("restore", _current);
}
/*
@ -168,7 +168,7 @@ static void flush_owned_fpu(struct k_thread *thread) @@ -168,7 +168,7 @@ static void flush_owned_fpu(struct k_thread *thread)
* replace it, and this avoids a deadlock where
* two CPUs want to pull each other's FPU context.
*/
if (thread == arch_current_thread()) {
if (thread == _current) {
z_riscv_fpu_disable();
arch_flush_local_fpu();
do {
@ -213,7 +213,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf) @@ -213,7 +213,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf)
/* save current owner's content if any */
arch_flush_local_fpu();
if (arch_current_thread()->arch.exception_depth > 0) {
if (_current->arch.exception_depth > 0) {
/*
* We were already in exception when the FPU access trapped.
* We give it access and prevent any further IRQ recursion
@ -233,7 +233,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf) @@ -233,7 +233,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf)
* Make sure the FPU context we need isn't live on another CPU.
* The current CPU's FPU context is NULL at this point.
*/
flush_owned_fpu(arch_current_thread());
flush_owned_fpu(_current);
#endif
/* make it accessible and clean to the returning context */
@ -256,13 +256,13 @@ static bool fpu_access_allowed(unsigned int exc_update_level) @@ -256,13 +256,13 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
__ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0,
"must be called with IRQs disabled");
if (arch_current_thread()->arch.exception_depth == exc_update_level) {
if (_current->arch.exception_depth == exc_update_level) {
/* We're about to execute non-exception code */
if (_current_cpu->arch.fpu_owner == arch_current_thread()) {
if (_current_cpu->arch.fpu_owner == _current) {
/* everything is already in place */
return true;
}
if (arch_current_thread()->arch.fpu_recently_used) {
if (_current->arch.fpu_recently_used) {
/*
* Before this thread was context-switched out,
* it made active use of the FPU, but someone else
@ -273,7 +273,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level) @@ -273,7 +273,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
z_riscv_fpu_disable();
arch_flush_local_fpu();
#ifdef CONFIG_SMP
flush_owned_fpu(arch_current_thread());
flush_owned_fpu(_current);
#endif
z_riscv_fpu_load();
_current_cpu->arch.fpu_state = MSTATUS_FS_CLEAN;

4
arch/riscv/core/isr.S

@ -299,7 +299,7 @@ is_fp: /* Process the FP trap and quickly return from exception */ @@ -299,7 +299,7 @@ is_fp: /* Process the FP trap and quickly return from exception */
mv a0, sp
tail z_riscv_fpu_trap
2:
no_fp: /* increment arch_current_thread()->arch.exception_depth */
no_fp: /* increment _current->arch.exception_depth */
lr t0, ___cpu_t_current_OFFSET(s0)
lb t1, _thread_offset_to_exception_depth(t0)
add t1, t1, 1
@ -726,7 +726,7 @@ no_reschedule: @@ -726,7 +726,7 @@ no_reschedule:
mv a0, sp
call z_riscv_fpu_exit_exc
/* decrement arch_current_thread()->arch.exception_depth */
/* decrement _current->arch.exception_depth */
lr t0, ___cpu_t_current_OFFSET(s0)
lb t1, _thread_offset_to_exception_depth(t0)
add t1, t1, -1

6
arch/riscv/core/pmp.c

@ -752,8 +752,8 @@ int arch_buffer_validate(const void *addr, size_t size, int write) @@ -752,8 +752,8 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
int ret = -1;
/* Check if this is on the stack */
if (IS_WITHIN(start, size, arch_current_thread()->stack_info.start,
arch_current_thread()->stack_info.size)) {
if (IS_WITHIN(start, size,
_current->stack_info.start, _current->stack_info.size)) {
return 0;
}
@ -768,7 +768,7 @@ int arch_buffer_validate(const void *addr, size_t size, int write) @@ -768,7 +768,7 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
}
/* Look for a matching partition in our memory domain */
struct k_mem_domain *domain = arch_current_thread()->mem_domain_info.mem_domain;
struct k_mem_domain *domain = _current->mem_domain_info.mem_domain;
int p_idx, remaining_partitions;
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);

13
arch/riscv/core/stacktrace.c

@ -108,7 +108,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k @@ -108,7 +108,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k
/* Unwind the provided exception stack frame */
fp = esf->s0;
ra = esf->mepc;
} else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) {
} else if ((csf == NULL) || (csf == &_current->callee_saved)) {
/* Unwind current thread (default case when nothing is provided ) */
fp = (uintptr_t)__builtin_frame_address(0);
ra = (uintptr_t)walk_stackframe;
@ -181,7 +181,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k @@ -181,7 +181,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k
/* Unwind the provided exception stack frame */
sp = z_riscv_get_sp_before_exc(esf);
ra = esf->mepc;
} else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) {
} else if ((csf == NULL) || (csf == &_current->callee_saved)) {
/* Unwind current thread (default case when nothing is provided ) */
sp = current_stack_pointer;
ra = (uintptr_t)walk_stackframe;
@ -215,10 +215,8 @@ void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie, @@ -215,10 +215,8 @@ void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
const struct k_thread *thread, const struct arch_esf *esf)
{
if (thread == NULL) {
/* In case `thread` is NULL, default that to `arch_current_thread()`
* and try to unwind
*/
thread = arch_current_thread();
/* In case `thread` is NULL, default that to `_current` and try to unwind */
thread = _current;
}
walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound,
@ -282,8 +280,7 @@ void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf @@ -282,8 +280,7 @@ void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf
int i = 0;
LOG_ERR("call trace:");
walk_stackframe(print_trace_address, &i, arch_current_thread(), esf, in_fatal_stack_bound,
csf);
walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf);
LOG_ERR("");
}
#endif /* CONFIG_EXCEPTION_STACK_TRACE */

27
arch/riscv/core/thread.c

@ -132,29 +132,28 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -132,29 +132,28 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
/* Set up privileged stack */
#ifdef CONFIG_GEN_PRIV_STACKS
arch_current_thread()->arch.priv_stack_start =
(unsigned long)z_priv_stack_find(arch_current_thread()->stack_obj);
_current->arch.priv_stack_start =
(unsigned long)z_priv_stack_find(_current->stack_obj);
/* remove the stack guard from the main stack */
arch_current_thread()->stack_info.start -= K_THREAD_STACK_RESERVED;
arch_current_thread()->stack_info.size += K_THREAD_STACK_RESERVED;
_current->stack_info.start -= K_THREAD_STACK_RESERVED;
_current->stack_info.size += K_THREAD_STACK_RESERVED;
#else
arch_current_thread()->arch.priv_stack_start =
(unsigned long)arch_current_thread()->stack_obj;
_current->arch.priv_stack_start = (unsigned long)_current->stack_obj;
#endif /* CONFIG_GEN_PRIV_STACKS */
top_of_priv_stack = Z_STACK_PTR_ALIGN(arch_current_thread()->arch.priv_stack_start +
top_of_priv_stack = Z_STACK_PTR_ALIGN(_current->arch.priv_stack_start +
K_KERNEL_STACK_RESERVED +
CONFIG_PRIVILEGED_STACK_SIZE);
#ifdef CONFIG_INIT_STACKS
/* Initialize the privileged stack */
(void)memset((void *)arch_current_thread()->arch.priv_stack_start, 0xaa,
(void)memset((void *)_current->arch.priv_stack_start, 0xaa,
Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE));
#endif /* CONFIG_INIT_STACKS */
top_of_user_stack = Z_STACK_PTR_ALIGN(
arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
_current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta);
status = csr_read(mstatus);
@ -170,12 +169,12 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -170,12 +169,12 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
#ifdef CONFIG_PMP_STACK_GUARD
/* reconfigure as the kernel mode stack will be different */
z_riscv_pmp_stackguard_prepare(arch_current_thread());
z_riscv_pmp_stackguard_prepare(_current);
#endif
/* Set up Physical Memory Protection */
z_riscv_pmp_usermode_prepare(arch_current_thread());
z_riscv_pmp_usermode_enable(arch_current_thread());
z_riscv_pmp_usermode_prepare(_current);
z_riscv_pmp_usermode_enable(_current);
/* preserve stack pointer for next exception entry */
arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack;

2
arch/sparc/core/thread.c

@ -61,7 +61,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, @@ -61,7 +61,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
*old_thread = arch_current_thread();
*old_thread = _current;
return z_get_next_switch_handle(*old_thread);
}

18
arch/x86/core/fatal.c

@ -49,7 +49,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) @@ -49,7 +49,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
{
uintptr_t start, end;
if (arch_current_thread() == NULL || arch_is_in_isr()) {
if (_current == NULL || arch_is_in_isr()) {
/* We were servicing an interrupt or in early boot environment
* and are supposed to be on the interrupt stack */
int cpu_id;
@ -64,7 +64,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) @@ -64,7 +64,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
end = start + CONFIG_ISR_STACK_SIZE;
#ifdef CONFIG_USERSPACE
} else if ((cs & 0x3U) == 0U &&
(arch_current_thread()->base.user_options & K_USER) != 0) {
(_current->base.user_options & K_USER) != 0) {
/* The low two bits of the CS register is the privilege
* level. It will be 0 in supervisor mode and 3 in user mode
* corresponding to ring 0 / ring 3.
@ -72,14 +72,14 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) @@ -72,14 +72,14 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
* If we get here, we must have been doing a syscall, check
* privilege elevation stack bounds
*/
start = arch_current_thread()->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
end = arch_current_thread()->stack_info.start;
start = _current->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
end = _current->stack_info.start;
#endif /* CONFIG_USERSPACE */
} else {
/* Normal thread operation, check its stack buffer */
start = arch_current_thread()->stack_info.start;
end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size);
start = _current->stack_info.start;
end = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size);
}
return (addr <= start) || (addr + size > end);
@ -97,7 +97,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) @@ -97,7 +97,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
__pinned_func
bool z_x86_check_guard_page(uintptr_t addr)
{
struct k_thread *thread = arch_current_thread();
struct k_thread *thread = _current;
uintptr_t start, end;
/* Front guard size - before thread stack area */
@ -233,7 +233,7 @@ static inline uintptr_t get_cr3(const struct arch_esf *esf) @@ -233,7 +233,7 @@ static inline uintptr_t get_cr3(const struct arch_esf *esf)
* switch when we took the exception via z_x86_trampoline_to_kernel
*/
if ((esf->cs & 0x3) != 0) {
return arch_current_thread()->arch.ptables;
return _current->arch.ptables;
}
#else
ARG_UNUSED(esf);

8
arch/x86/core/ia32/float.c

@ -207,7 +207,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options) @@ -207,7 +207,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options)
/* Associate the new FP context with the specified thread */
if (thread == arch_current_thread()) {
if (thread == _current) {
/*
* When enabling FP support for the current thread, just claim
* ownership of the FPU and leave CR0[TS] unset.
@ -222,7 +222,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options) @@ -222,7 +222,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options)
* of the FPU to them (unless we need it ourselves).
*/
if ((arch_current_thread()->base.user_options & _FP_USER_MASK) == 0) {
if ((_current->base.user_options & _FP_USER_MASK) == 0) {
/*
* We are not FP-capable, so mark FPU as owned by the
* thread we've just enabled FP support for, then
@ -278,7 +278,7 @@ int z_float_disable(struct k_thread *thread) @@ -278,7 +278,7 @@ int z_float_disable(struct k_thread *thread)
thread->base.user_options &= ~_FP_USER_MASK;
if (thread == arch_current_thread()) {
if (thread == _current) {
z_FpAccessDisable();
_kernel.current_fp = (struct k_thread *)0;
} else {
@ -314,7 +314,7 @@ void _FpNotAvailableExcHandler(struct arch_esf *pEsf) @@ -314,7 +314,7 @@ void _FpNotAvailableExcHandler(struct arch_esf *pEsf)
/* Enable highest level of FP capability configured into the kernel */
k_float_enable(arch_current_thread(), _FP_USER_MASK);
k_float_enable(_current, _FP_USER_MASK);
}
_EXCEPTION_CONNECT_NOCODE(_FpNotAvailableExcHandler,
IV_DEVICE_NOT_AVAILABLE, 0);

14
arch/x86/core/userspace.c

@ -132,9 +132,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -132,9 +132,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
/* Transition will reset stack pointer to initial, discarding
* any old context since this is a one-way operation
*/
stack_end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
stack_end = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta);
#ifdef CONFIG_X86_64
/* x86_64 SysV ABI requires 16 byte stack alignment, which
@ -156,15 +156,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -156,15 +156,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* Note that this also needs to page in the reserved
* portion of the stack (which is usually the page just
* before the beginning of stack in
* arch_current_thread()->stack_info.start.
* _current->stack_info.start.
*/
uintptr_t stack_start;
size_t stack_size;
uintptr_t stack_aligned_start;
size_t stack_aligned_size;
stack_start = POINTER_TO_UINT(arch_current_thread()->stack_obj);
stack_size = K_THREAD_STACK_LEN(arch_current_thread()->stack_info.size);
stack_start = POINTER_TO_UINT(_current->stack_obj);
stack_size = K_THREAD_STACK_LEN(_current->stack_info.size);
#if defined(CONFIG_X86_STACK_PROTECTION)
/* With hardware stack protection, the first page of stack
@ -182,7 +182,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, @@ -182,7 +182,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
#endif
z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,
arch_current_thread()->stack_info.start);
_current->stack_info.start);
CODE_UNREACHABLE;
}

15
arch/x86/core/x86_mmu.c

@ -421,7 +421,7 @@ void z_x86_tlb_ipi(const void *arg) @@ -421,7 +421,7 @@ void z_x86_tlb_ipi(const void *arg)
/* We might have been moved to another memory domain, so always invoke
* z_x86_thread_page_tables_get() instead of using current CR3 value.
*/
ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(arch_current_thread()));
ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(_current));
#endif
/*
* In the future, we can consider making this smarter, such as
@ -1440,7 +1440,7 @@ static inline void bcb_fence(void) @@ -1440,7 +1440,7 @@ static inline void bcb_fence(void)
__pinned_func
int arch_buffer_validate(const void *addr, size_t size, int write)
{
pentry_t *ptables = z_x86_thread_page_tables_get(arch_current_thread());
pentry_t *ptables = z_x86_thread_page_tables_get(_current);
uint8_t *virt;
size_t aligned_size;
int ret = 0;
@ -1958,7 +1958,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread) @@ -1958,7 +1958,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
* IPI takes care of this if the thread is currently running on some
* other CPU.
*/
if (thread == arch_current_thread() && thread->arch.ptables != z_x86_cr3_get()) {
if (thread == _current && thread->arch.ptables != z_x86_cr3_get()) {
z_x86_cr3_set(thread->arch.ptables);
}
#endif /* CONFIG_X86_KPTI */
@ -1980,9 +1980,8 @@ void z_x86_current_stack_perms(void) @@ -1980,9 +1980,8 @@ void z_x86_current_stack_perms(void)
/* Clear any previous context in the stack buffer to prevent
* unintentional data leakage.
*/
(void)memset((void *)arch_current_thread()->stack_info.start, 0xAA,
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
(void)memset((void *)_current->stack_info.start, 0xAA,
_current->stack_info.size - _current->stack_info.delta);
/* Only now is it safe to grant access to the stack buffer since any
* previous context has been erased.
@ -1992,13 +1991,13 @@ void z_x86_current_stack_perms(void) @@ -1992,13 +1991,13 @@ void z_x86_current_stack_perms(void)
* This will grant stack and memory domain access if it wasn't set
* already (in which case this returns very quickly).
*/
z_x86_swap_update_common_page_table(arch_current_thread());
z_x86_swap_update_common_page_table(_current);
#else
/* Memory domain access is already programmed into the page tables.
* Need to enable access to this new user thread's stack buffer in
* its domain-specific page tables.
*/
set_stack_perms(arch_current_thread(), z_x86_thread_page_tables_get(arch_current_thread()));
set_stack_perms(_current, z_x86_thread_page_tables_get(_current));
#endif
}
#endif /* CONFIG_USERSPACE */

2
arch/xtensa/core/fatal.c

@ -140,7 +140,7 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf) @@ -140,7 +140,7 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf)
#ifdef CONFIG_USERSPACE
void z_impl_xtensa_user_fault(unsigned int reason)
{
if ((arch_current_thread()->base.user_options & K_USER) != 0) {
if ((_current->base.user_options & K_USER) != 0) {
if ((reason != K_ERR_KERNEL_OOPS) &&
(reason != K_ERR_STACK_CHK_FAIL)) {
reason = K_ERR_KERNEL_OOPS;

2
arch/xtensa/core/ptables.c

@ -1086,7 +1086,7 @@ static int mem_buffer_validate(const void *addr, size_t size, int write, int rin @@ -1086,7 +1086,7 @@ static int mem_buffer_validate(const void *addr, size_t size, int write, int rin
int ret = 0;
uint8_t *virt;
size_t aligned_size;
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;
uint32_t *ptables = thread_page_tables_get(thread);
/* addr/size arbitrary, fix this up into an aligned region */

2
arch/xtensa/core/thread.c

@ -156,7 +156,7 @@ int arch_float_enable(struct k_thread *thread, unsigned int options) @@ -156,7 +156,7 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
struct k_thread *current = arch_current_thread();
struct k_thread *current = _current;
size_t stack_end;
/* Transition will reset stack pointer to initial, discarding

2
arch/xtensa/core/vector_handlers.c

@ -34,7 +34,7 @@ extern char xtensa_arch_kernel_oops_epc[]; @@ -34,7 +34,7 @@ extern char xtensa_arch_kernel_oops_epc[];
bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps)
{
uintptr_t start, end;
struct k_thread *thread = arch_current_thread();
struct k_thread *thread = _current;
bool was_in_isr, invalid;
/* Without userspace, there is no privileged stack so the thread stack

2
boards/native/native_posix/irq_handler.c

@ -105,7 +105,7 @@ void posix_irq_handler(void) @@ -105,7 +105,7 @@ void posix_irq_handler(void)
*/
if (may_swap
&& (hw_irq_ctrl_get_cur_prio() == 256)
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) {
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) {
(void)z_swap_irqlock(irq_lock);
}

2
boards/native/native_sim/irq_handler.c

@ -113,7 +113,7 @@ void posix_irq_handler(void) @@ -113,7 +113,7 @@ void posix_irq_handler(void)
*/
if (may_swap
&& (hw_irq_ctrl_get_cur_prio() == 256)
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) {
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) {
(void)z_swap_irqlock(irq_lock);
}

2
boards/native/nrf_bsim/irq_handler.c

@ -135,7 +135,7 @@ void posix_irq_handler(void) @@ -135,7 +135,7 @@ void posix_irq_handler(void)
if (may_swap
&& (hw_irq_ctrl_get_cur_prio(cpu_n) == 256)
&& (CPU_will_be_awaken_from_WFE == false)
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) {
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) {
z_swap_irqlock(irq_lock);
}

2
doc/kernel/services/smp/smp.rst

@ -276,7 +276,7 @@ Per-CPU data @@ -276,7 +276,7 @@ Per-CPU data
============
Many elements of the core kernel data need to be implemented for each
CPU in SMP mode. For example, the ``arch_current_thread()`` thread pointer obviously
CPU in SMP mode. For example, the ``_current`` thread pointer obviously
needs to reflect what is running locally, there are many threads
running concurrently. Likewise a kernel-provided interrupt stack
needs to be created and assigned for each physical CPU, as does the

4
doc/releases/migration-guide-4.1.rst

@ -409,10 +409,6 @@ Stream Flash @@ -409,10 +409,6 @@ Stream Flash
Architectures
*************
* Common
* ``_current`` is deprecated, used :c:func:`arch_current_thread` instead.
* native/POSIX
* :kconfig:option:`CONFIG_NATIVE_APPLICATION` has been deprecated. Out-of-tree boards using this

4
doc/releases/release-notes-4.1.rst

@ -59,9 +59,9 @@ Architectures @@ -59,9 +59,9 @@ Architectures
* Common
* Introduced :kconfig:option:`CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL`, which can be selected when
an architecture implemented and enabled its own :c:func:`arch_current_thread` and
an architecture implements :c:func:`arch_current_thread` and
:c:func:`arch_current_thread_set` functions for faster retrieval of the current CPU's thread
pointer. When enabled, ``_current`` variable will be routed to the
pointer. When enabled, the ``_current`` symbol will be routed to
:c:func:`arch_current_thread` (:github:`80716`).
* ARC

4
drivers/wifi/eswifi/eswifi.h

@ -92,9 +92,9 @@ static inline int eswifi_request(struct eswifi_dev *eswifi, char *cmd, @@ -92,9 +92,9 @@ static inline int eswifi_request(struct eswifi_dev *eswifi, char *cmd,
static inline void eswifi_lock(struct eswifi_dev *eswifi)
{
/* Nested locking */
if (atomic_get(&eswifi->mutex_owner) != (atomic_t)(uintptr_t)arch_current_thread()) {
if (atomic_get(&eswifi->mutex_owner) != (atomic_t)(uintptr_t)_current) {
k_mutex_lock(&eswifi->mutex, K_FOREVER);
atomic_set(&eswifi->mutex_owner, (atomic_t)(uintptr_t)arch_current_thread());
atomic_set(&eswifi->mutex_owner, (atomic_t)(uintptr_t)_current);
eswifi->mutex_depth = 1;
} else {
eswifi->mutex_depth++;

2
include/zephyr/arch/arch_interface.h

@ -1289,7 +1289,7 @@ typedef bool (*stack_trace_callback_fn)(void *cookie, unsigned long addr); @@ -1289,7 +1289,7 @@ typedef bool (*stack_trace_callback_fn)(void *cookie, unsigned long addr);
* ============ ======= ============================================
* thread esf
* ============ ======= ============================================
* thread NULL Stack trace from thread (can be arch_current_thread())
* thread NULL Stack trace from thread (can be _current)
* thread esf Stack trace starting on esf
* ============ ======= ============================================
*/

2
include/zephyr/arch/common/arch_inlines.h

@ -19,7 +19,7 @@ @@ -19,7 +19,7 @@
static ALWAYS_INLINE struct k_thread *arch_current_thread(void)
{
#ifdef CONFIG_SMP
/* In SMP, arch_current_thread() is a field read from _current_cpu, which
/* In SMP, _current is a field read from _current_cpu, which
* can race with preemption before it is read. We must lock
* local interrupts when reading it.
*/

2
include/zephyr/arch/x86/ia32/arch.h

@ -305,7 +305,7 @@ static inline void arch_isr_direct_footer(int swap) @@ -305,7 +305,7 @@ static inline void arch_isr_direct_footer(int swap)
* 3) Next thread to run in the ready queue is not this thread
*/
if (swap != 0 && _kernel.cpus[0].nested == 0 &&
_kernel.ready_q.cache != arch_current_thread()) {
_kernel.ready_q.cache != _current) {
unsigned int flags;
/* Fetch EFLAGS argument to z_swap() */

4
include/zephyr/internal/syscall_handler.h

@ -62,7 +62,7 @@ static inline bool k_is_in_user_syscall(void) @@ -62,7 +62,7 @@ static inline bool k_is_in_user_syscall(void)
* calls from supervisor mode bypass everything directly to
* the implementation function.
*/
return !k_is_in_isr() && (arch_current_thread()->syscall_frame != NULL);
return !k_is_in_isr() && (_current->syscall_frame != NULL);
}
/**
@ -350,7 +350,7 @@ int k_usermode_string_copy(char *dst, const char *src, size_t maxlen); @@ -350,7 +350,7 @@ int k_usermode_string_copy(char *dst, const char *src, size_t maxlen);
#define K_OOPS(expr) \
do { \
if (expr) { \
arch_syscall_oops(arch_current_thread()->syscall_frame); \
arch_syscall_oops(_current->syscall_frame); \
} \
} while (false)

8
include/zephyr/kernel_structs.h

@ -174,7 +174,7 @@ struct _cpu { @@ -174,7 +174,7 @@ struct _cpu {
#endif
#ifdef CONFIG_SMP
/* True when arch_current_thread() is allowed to context switch */
/* True when _current is allowed to context switch */
uint8_t swap_ok;
#endif
@ -263,12 +263,12 @@ bool z_smp_cpu_mobile(void); @@ -263,12 +263,12 @@ bool z_smp_cpu_mobile(void);
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
arch_curr_cpu(); })
#define _current arch_current_thread()
#else
#define _current_cpu (&_kernel.cpus[0])
#endif /* CONFIG_SMP */
#define _current arch_current_thread() __DEPRECATED_MACRO
#define _current _kernel.cpus[0].current
#endif
/* kernel wait queue record */
#ifdef CONFIG_WAITQ_SCALABLE

2
kernel/Kconfig

@ -211,7 +211,7 @@ config THREAD_ABORT_NEED_CLEANUP @@ -211,7 +211,7 @@ config THREAD_ABORT_NEED_CLEANUP
bool
help
This option enables the bits to clean up the current thread if
k_thread_abort(arch_current_thread()) is called, as the cleanup cannot be
k_thread_abort(_current) is called, as the cleanup cannot be
running in the current thread stack.
config THREAD_CUSTOM_DATA

4
kernel/errno.c

@ -36,7 +36,7 @@ int *z_impl_z_errno(void) @@ -36,7 +36,7 @@ int *z_impl_z_errno(void)
/* Initialized to the lowest address in the stack so the thread can
* directly read/write it
*/
return &arch_current_thread()->userspace_local_data->errno_var;
return &_current->userspace_local_data->errno_var;
}
static inline int *z_vrfy_z_errno(void)
@ -48,7 +48,7 @@ static inline int *z_vrfy_z_errno(void) @@ -48,7 +48,7 @@ static inline int *z_vrfy_z_errno(void)
#else
int *z_impl_z_errno(void)
{
return &arch_current_thread()->errno_var;
return &_current->errno_var;
}
#endif /* CONFIG_USERSPACE */

2
kernel/fatal.c

@ -90,7 +90,7 @@ void z_fatal_error(unsigned int reason, const struct arch_esf *esf) @@ -90,7 +90,7 @@ void z_fatal_error(unsigned int reason, const struct arch_esf *esf)
*/
unsigned int key = arch_irq_lock();
struct k_thread *thread = IS_ENABLED(CONFIG_MULTITHREADING) ?
arch_current_thread() : NULL;
_current : NULL;
/* twister looks for the "ZEPHYR FATAL ERROR" string, don't
* change it without also updating twister

4
kernel/idle.c

@ -24,7 +24,7 @@ void idle(void *unused1, void *unused2, void *unused3) @@ -24,7 +24,7 @@ void idle(void *unused1, void *unused2, void *unused3)
ARG_UNUSED(unused2);
ARG_UNUSED(unused3);
__ASSERT_NO_MSG(arch_current_thread()->base.prio >= 0);
__ASSERT_NO_MSG(_current->base.prio >= 0);
while (true) {
/* SMP systems without a working IPI can't actual
@ -85,7 +85,7 @@ void idle(void *unused1, void *unused2, void *unused3) @@ -85,7 +85,7 @@ void idle(void *unused1, void *unused2, void *unused3)
* explicitly yield in the idle thread otherwise
* nothing else will run once it starts.
*/
if (_kernel.ready_q.cache != arch_current_thread()) {
if (_kernel.ready_q.cache != _current) {
z_swap_unlocked();
}
# endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */

2
kernel/include/kernel_internal.h

@ -286,7 +286,7 @@ int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats); @@ -286,7 +286,7 @@ int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
* where these steps require that the thread is no longer running.
* If the target thread is not the current running thread, the cleanup
* steps will be performed immediately. However, if the target thread is
* the current running thread (e.g. k_thread_abort(arch_current_thread())), it defers
* the current running thread (e.g. k_thread_abort(_current)), it defers
* the cleanup steps to later when the work will be finished in another
* context.
*

4
kernel/include/ksched.h

@ -141,9 +141,9 @@ static inline bool _is_valid_prio(int prio, void *entry_point) @@ -141,9 +141,9 @@ static inline bool _is_valid_prio(int prio, void *entry_point)
static inline void z_sched_lock(void)
{
__ASSERT(!arch_is_in_isr(), "");
__ASSERT(arch_current_thread()->base.sched_locked != 1U, "");
__ASSERT(_current->base.sched_locked != 1U, "");
--arch_current_thread()->base.sched_locked;
--_current->base.sched_locked;
compiler_barrier();
}

8
kernel/include/kswap.h

@ -96,12 +96,12 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, @@ -96,12 +96,12 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
*/
# ifndef CONFIG_ARM64
__ASSERT(arch_irq_unlocked(key) ||
arch_current_thread()->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
_current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
"Context switching while holding lock!");
# endif /* CONFIG_ARM64 */
#endif /* CONFIG_SPIN_VALIDATE */
old_thread = arch_current_thread();
old_thread = _current;
z_check_stack_sentinel();
@ -146,7 +146,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, @@ -146,7 +146,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
arch_cohere_stacks(old_thread, NULL, new_thread);
#ifdef CONFIG_SMP
/* Now add arch_current_thread() back to the run queue, once we are
/* Now add _current back to the run queue, once we are
* guaranteed to reach the context switch in finite
* time. See z_sched_switch_spin().
*/
@ -174,7 +174,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, @@ -174,7 +174,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
irq_unlock(key);
}
return arch_current_thread()->swap_retval;
return _current->swap_retval;
}
static inline int z_swap_irqlock(unsigned int key)

6
kernel/include/kthread.h

@ -211,17 +211,17 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, @@ -211,17 +211,17 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
return true;
}
__ASSERT(arch_current_thread() != NULL, "");
__ASSERT(_current != NULL, "");
/* Or if we're pended/suspended/dummy (duh) */
if (z_is_thread_prevented_from_running(arch_current_thread())) {
if (z_is_thread_prevented_from_running(_current)) {
return true;
}
/* Otherwise we have to be running a preemptible thread or
* switching to a metairq
*/
if (thread_is_preemptible(arch_current_thread()) || thread_is_metairq(thread)) {
if (thread_is_preemptible(_current) || thread_is_metairq(thread)) {
return true;
}

20
kernel/include/priority_q.h

@ -131,9 +131,9 @@ static ALWAYS_INLINE void z_priq_dumb_yield(sys_dlist_t *pq) @@ -131,9 +131,9 @@ static ALWAYS_INLINE void z_priq_dumb_yield(sys_dlist_t *pq)
#ifndef CONFIG_SMP
sys_dnode_t *n;
n = sys_dlist_peek_next_no_check(pq, &arch_current_thread()->base.qnode_dlist);
n = sys_dlist_peek_next_no_check(pq, &_current->base.qnode_dlist);
sys_dlist_dequeue(&arch_current_thread()->base.qnode_dlist);
sys_dlist_dequeue(&_current->base.qnode_dlist);
struct k_thread *t;
@ -145,15 +145,15 @@ static ALWAYS_INLINE void z_priq_dumb_yield(sys_dlist_t *pq) @@ -145,15 +145,15 @@ static ALWAYS_INLINE void z_priq_dumb_yield(sys_dlist_t *pq)
while (n != NULL) {
t = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
if (z_sched_prio_cmp(arch_current_thread(), t) > 0) {
if (z_sched_prio_cmp(_current, t) > 0) {
sys_dlist_insert(&t->base.qnode_dlist,
&arch_current_thread()->base.qnode_dlist);
&_current->base.qnode_dlist);
return;
}
n = sys_dlist_peek_next_no_check(pq, n);
}
sys_dlist_append(pq, &arch_current_thread()->base.qnode_dlist);
sys_dlist_append(pq, &_current->base.qnode_dlist);
#endif
}
@ -229,8 +229,8 @@ static ALWAYS_INLINE void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread @@ -229,8 +229,8 @@ static ALWAYS_INLINE void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread
static ALWAYS_INLINE void z_priq_rb_yield(struct _priq_rb *pq)
{
#ifndef CONFIG_SMP
z_priq_rb_remove(pq, arch_current_thread());
z_priq_rb_add(pq, arch_current_thread());
z_priq_rb_remove(pq, _current);
z_priq_rb_add(pq, _current);
#endif
}
@ -319,11 +319,11 @@ static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, @@ -319,11 +319,11 @@ static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
static ALWAYS_INLINE void z_priq_mq_yield(struct _priq_mq *pq)
{
#ifndef CONFIG_SMP
struct prio_info pos = get_prio_info(arch_current_thread()->base.prio);
struct prio_info pos = get_prio_info(_current->base.prio);
sys_dlist_dequeue(&arch_current_thread()->base.qnode_dlist);
sys_dlist_dequeue(&_current->base.qnode_dlist);
sys_dlist_append(&pq->queues[pos.offset_prio],
&arch_current_thread()->base.qnode_dlist);
&_current->base.qnode_dlist);
#endif
}

2
kernel/ipi.c

@ -101,7 +101,7 @@ void z_sched_ipi(void) @@ -101,7 +101,7 @@ void z_sched_ipi(void)
#endif /* CONFIG_TRACE_SCHED_IPI */
#ifdef CONFIG_TIMESLICING
if (thread_is_sliceable(arch_current_thread())) {
if (thread_is_sliceable(_current)) {
z_time_slice();
}
#endif /* CONFIG_TIMESLICING */

10
kernel/mailbox.c

@ -216,7 +216,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -216,7 +216,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
k_spinlock_key_t key;
/* save sender id so it can be used during message matching */
tx_msg->rx_source_thread = arch_current_thread();
tx_msg->rx_source_thread = _current;
/* finish readying sending thread (actual or dummy) for send */
sending_thread = tx_msg->_syncing_thread;
@ -296,7 +296,7 @@ int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -296,7 +296,7 @@ int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
k_timeout_t timeout)
{
/* configure things for a synchronous send, then send the message */
tx_msg->_syncing_thread = arch_current_thread();
tx_msg->_syncing_thread = _current;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout);
@ -321,7 +321,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -321,7 +321,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
*/
mbox_async_alloc(&async);
async->thread.prio = arch_current_thread()->base.prio;
async->thread.prio = _current->base.prio;
async->tx_msg = *tx_msg;
async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
@ -388,7 +388,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, @@ -388,7 +388,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
int result;
/* save receiver id so it can be used during message matching */
rx_msg->tx_target_thread = arch_current_thread();
rx_msg->tx_target_thread = _current;
/* search mailbox's tx queue for a compatible sender */
key = k_spin_lock(&mbox->lock);
@ -425,7 +425,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, @@ -425,7 +425,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout);
/* wait until a matching sender appears or a timeout occurs */
arch_current_thread()->base.swap_data = rx_msg;
_current->base.swap_data = rx_msg;
result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
/* consume message data immediately, if needed */

2
kernel/mem_domain.c

@ -299,7 +299,7 @@ void z_mem_domain_init_thread(struct k_thread *thread) @@ -299,7 +299,7 @@ void z_mem_domain_init_thread(struct k_thread *thread)
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
/* New threads inherit memory domain configuration from parent */
ret = add_thread_locked(arch_current_thread()->mem_domain_info.mem_domain, thread);
ret = add_thread_locked(_current->mem_domain_info.mem_domain, thread);
__ASSERT_NO_MSG(ret == 0);
ARG_UNUSED(ret);

2
kernel/mem_slab.c

@ -252,7 +252,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) @@ -252,7 +252,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
/* wait for a free block or timeout */
result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout);
if (result == 0) {
*mem = arch_current_thread()->base.swap_data;
*mem = _current->base.swap_data;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result);

2
kernel/mempool.c

@ -165,7 +165,7 @@ void *z_thread_aligned_alloc(size_t align, size_t size) @@ -165,7 +165,7 @@ void *z_thread_aligned_alloc(size_t align, size_t size)
if (k_is_in_isr()) {
heap = _SYSTEM_HEAP;
} else {
heap = arch_current_thread()->resource_pool;
heap = _current->resource_pool;
}
if (heap != NULL) {

2
kernel/mmu.c

@ -1674,7 +1674,7 @@ static bool do_page_fault(void *addr, bool pin) @@ -1674,7 +1674,7 @@ static bool do_page_fault(void *addr, bool pin)
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
key = k_spin_lock(&z_mm_lock);
faulting_thread = arch_current_thread();
faulting_thread = _current;
status = arch_page_location_get(addr, &page_in_location);
if (status == ARCH_PAGE_LOCATION_BAD) {

4
kernel/msg_q.c

@ -169,7 +169,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout @@ -169,7 +169,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, put, msgq, timeout);
/* wait for put message success, failure, or timeout */
arch_current_thread()->base.swap_data = (void *) data;
_current->base.swap_data = (void *) data;
result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, result);
@ -267,7 +267,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout) @@ -267,7 +267,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout);
/* wait for get message success or timeout */
arch_current_thread()->base.swap_data = data;
_current->base.swap_data = data;
result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, get, msgq, timeout, result);

16
kernel/mutex.c

@ -114,17 +114,17 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) @@ -114,17 +114,17 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
key = k_spin_lock(&lock);
if (likely((mutex->lock_count == 0U) || (mutex->owner == arch_current_thread()))) {
if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) {
mutex->owner_orig_prio = (mutex->lock_count == 0U) ?
arch_current_thread()->base.prio :
_current->base.prio :
mutex->owner_orig_prio;
mutex->lock_count++;
mutex->owner = arch_current_thread();
mutex->owner = _current;
LOG_DBG("%p took mutex %p, count: %d, orig prio: %d",
arch_current_thread(), mutex, mutex->lock_count,
_current, mutex, mutex->lock_count,
mutex->owner_orig_prio);
k_spin_unlock(&lock, key);
@ -144,7 +144,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) @@ -144,7 +144,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mutex, lock, mutex, timeout);
new_prio = new_prio_for_inheritance(arch_current_thread()->base.prio,
new_prio = new_prio_for_inheritance(_current->base.prio,
mutex->owner->base.prio);
LOG_DBG("adjusting prio up on mutex %p", mutex);
@ -157,7 +157,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) @@ -157,7 +157,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex);
LOG_DBG("%p got mutex %p (y/n): %c", arch_current_thread(), mutex,
LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex,
got_mutex ? 'y' : 'n');
if (got_mutex == 0) {
@ -167,7 +167,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) @@ -167,7 +167,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
/* timed out */
LOG_DBG("%p timeout on mutex %p", arch_current_thread(), mutex);
LOG_DBG("%p timeout on mutex %p", _current, mutex);
key = k_spin_lock(&lock);
@ -224,7 +224,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex) @@ -224,7 +224,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex)
/*
* The current thread does not own the mutex.
*/
CHECKIF(mutex->owner != arch_current_thread()) {
CHECKIF(mutex->owner != _current) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EPERM);
return -EPERM;

12
kernel/pipes.c

@ -443,11 +443,11 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data, @@ -443,11 +443,11 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data,
* invoked from within an ISR as that is not safe to do.
*/
src_desc = k_is_in_isr() ? &isr_desc : &arch_current_thread()->pipe_desc;
src_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc;
src_desc->buffer = (unsigned char *)data;
src_desc->bytes_to_xfer = bytes_to_write;
src_desc->thread = arch_current_thread();
src_desc->thread = _current;
sys_dlist_append(&src_list, &src_desc->node);
*bytes_written = pipe_write(pipe, &src_list,
@ -488,7 +488,7 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data, @@ -488,7 +488,7 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data,
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, put, pipe, timeout);
arch_current_thread()->base.swap_data = src_desc;
_current->base.swap_data = src_desc;
z_sched_wait(&pipe->lock, key, &pipe->wait_q.writers, timeout, NULL);
@ -581,11 +581,11 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe, @@ -581,11 +581,11 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
* invoked from within an ISR as that is not safe to do.
*/
dest_desc = k_is_in_isr() ? &isr_desc : &arch_current_thread()->pipe_desc;
dest_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc;
dest_desc->buffer = data;
dest_desc->bytes_to_xfer = bytes_to_read;
dest_desc->thread = arch_current_thread();
dest_desc->thread = _current;
src_desc = (struct _pipe_desc *)sys_dlist_get(&src_list);
while (src_desc != NULL) {
@ -674,7 +674,7 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe, @@ -674,7 +674,7 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, get, pipe, timeout);
arch_current_thread()->base.swap_data = dest_desc;
_current->base.swap_data = dest_desc;
z_sched_wait(&pipe->lock, key, &pipe->wait_q.readers, timeout, NULL);

2
kernel/poll.c

@ -290,7 +290,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, @@ -290,7 +290,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events,
{
int events_registered;
k_spinlock_key_t key;
struct z_poller *poller = &arch_current_thread()->poller;
struct z_poller *poller = &_current->poller;
poller->is_polling = true;
poller->mode = MODE_POLL;

4
kernel/queue.c

@ -346,9 +346,9 @@ void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout) @@ -346,9 +346,9 @@ void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout,
(ret != 0) ? NULL : arch_current_thread()->base.swap_data);
(ret != 0) ? NULL : _current->base.swap_data);
return (ret != 0) ? NULL : arch_current_thread()->base.swap_data;
return (ret != 0) ? NULL : _current->base.swap_data;
}
bool k_queue_remove(struct k_queue *queue, void *data)

138
kernel/sched.c

@ -31,7 +31,7 @@ extern struct k_thread *pending_current; @@ -31,7 +31,7 @@ extern struct k_thread *pending_current;
struct k_spinlock _sched_spinlock;
/* Storage to "complete" the context switch from an invalid/incomplete thread
* context (ex: exiting an ISR that aborted arch_current_thread())
* context (ex: exiting an ISR that aborted _current)
*/
__incoherent struct k_thread _thread_dummy;
@ -98,12 +98,12 @@ static ALWAYS_INLINE struct k_thread *runq_best(void) @@ -98,12 +98,12 @@ static ALWAYS_INLINE struct k_thread *runq_best(void)
return _priq_run_best(curr_cpu_runq());
}
/* arch_current_thread() is never in the run queue until context switch on
/* _current is never in the run queue until context switch on
* SMP configurations, see z_requeue_current()
*/
static inline bool should_queue_thread(struct k_thread *thread)
{
return !IS_ENABLED(CONFIG_SMP) || (thread != arch_current_thread());
return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
}
static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
@ -113,7 +113,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread) @@ -113,7 +113,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
runq_add(thread);
}
#ifdef CONFIG_SMP
if (thread == arch_current_thread()) {
if (thread == _current) {
/* add current to end of queue means "yield" */
_current_cpu->swap_ok = true;
}
@ -167,8 +167,8 @@ static inline void clear_halting(struct k_thread *thread) @@ -167,8 +167,8 @@ static inline void clear_halting(struct k_thread *thread)
static ALWAYS_INLINE struct k_thread *next_up(void)
{
#ifdef CONFIG_SMP
if (is_halting(arch_current_thread())) {
halt_thread(arch_current_thread(), is_aborting(arch_current_thread()) ?
if (is_halting(_current)) {
halt_thread(_current, is_aborting(_current) ?
_THREAD_DEAD : _THREAD_SUSPENDED);
}
#endif /* CONFIG_SMP */
@ -207,42 +207,42 @@ static ALWAYS_INLINE struct k_thread *next_up(void) @@ -207,42 +207,42 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
#else
/* Under SMP, the "cache" mechanism for selecting the next
* thread doesn't work, so we have more work to do to test
* arch_current_thread() against the best choice from the queue. Here, the
* _current against the best choice from the queue. Here, the
* thread selected above represents "the best thread that is
* not current".
*
* Subtle note on "queued": in SMP mode, arch_current_thread() does not
* Subtle note on "queued": in SMP mode, _current does not
* live in the queue, so this isn't exactly the same thing as
* "ready", it means "is arch_current_thread() already added back to the
* "ready", it means "is _current already added back to the
* queue such that we don't want to re-add it".
*/
bool queued = z_is_thread_queued(arch_current_thread());
bool active = !z_is_thread_prevented_from_running(arch_current_thread());
bool queued = z_is_thread_queued(_current);
bool active = !z_is_thread_prevented_from_running(_current);
if (thread == NULL) {
thread = _current_cpu->idle_thread;
}
if (active) {
int32_t cmp = z_sched_prio_cmp(arch_current_thread(), thread);
int32_t cmp = z_sched_prio_cmp(_current, thread);
/* Ties only switch if state says we yielded */
if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
thread = arch_current_thread();
thread = _current;
}
if (!should_preempt(thread, _current_cpu->swap_ok)) {
thread = arch_current_thread();
thread = _current;
}
}
/* Put arch_current_thread() back into the queue */
if ((thread != arch_current_thread()) && active &&
!z_is_idle_thread_object(arch_current_thread()) && !queued) {
queue_thread(arch_current_thread());
/* Put _current back into the queue */
if ((thread != _current) && active &&
!z_is_idle_thread_object(_current) && !queued) {
queue_thread(_current);
}
/* Take the new arch_current_thread() out of the queue */
/* Take the new _current out of the queue */
if (z_is_thread_queued(thread)) {
dequeue_thread(thread);
}
@ -258,7 +258,7 @@ void move_thread_to_end_of_prio_q(struct k_thread *thread) @@ -258,7 +258,7 @@ void move_thread_to_end_of_prio_q(struct k_thread *thread)
dequeue_thread(thread);
}
queue_thread(thread);
update_cache(thread == arch_current_thread());
update_cache(thread == _current);
}
/* Track cooperative threads preempted by metairqs so we can return to
@ -269,10 +269,10 @@ static void update_metairq_preempt(struct k_thread *thread) @@ -269,10 +269,10 @@ static void update_metairq_preempt(struct k_thread *thread)
{
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
if (thread_is_metairq(thread) && !thread_is_metairq(arch_current_thread()) &&
!thread_is_preemptible(arch_current_thread())) {
if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
!thread_is_preemptible(_current)) {
/* Record new preemption */
_current_cpu->metairq_preempted = arch_current_thread();
_current_cpu->metairq_preempted = _current;
} else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) {
/* Returning from existing preemption */
_current_cpu->metairq_preempted = NULL;
@ -292,14 +292,14 @@ static ALWAYS_INLINE void update_cache(int preempt_ok) @@ -292,14 +292,14 @@ static ALWAYS_INLINE void update_cache(int preempt_ok)
if (should_preempt(thread, preempt_ok)) {
#ifdef CONFIG_TIMESLICING
if (thread != arch_current_thread()) {
if (thread != _current) {
z_reset_time_slice(thread);
}
#endif /* CONFIG_TIMESLICING */
update_metairq_preempt(thread);
_kernel.ready_q.cache = thread;
} else {
_kernel.ready_q.cache = arch_current_thread();
_kernel.ready_q.cache = _current;
}
#else
@ -378,9 +378,9 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread) @@ -378,9 +378,9 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
*/
static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
{
if (is_halting(arch_current_thread())) {
halt_thread(arch_current_thread(),
is_aborting(arch_current_thread()) ? _THREAD_DEAD : _THREAD_SUSPENDED);
if (is_halting(_current)) {
halt_thread(_current,
is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
}
k_spin_unlock(&_sched_spinlock, key);
while (is_halting(thread)) {
@ -394,7 +394,7 @@ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key) @@ -394,7 +394,7 @@ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
/* Shared handler for k_thread_{suspend,abort}(). Called with the
* scheduler lock held and the key passed (which it may
* release/reacquire!) which will be released before a possible return
* (aborting arch_current_thread() will not return, obviously), which may be after
* (aborting _current will not return, obviously), which may be after
* a context switch.
*/
static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
@ -427,14 +427,14 @@ static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_ @@ -427,14 +427,14 @@ static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_
if (arch_is_in_isr()) {
thread_halt_spin(thread, key);
} else {
add_to_waitq_locked(arch_current_thread(), wq);
add_to_waitq_locked(_current, wq);
z_swap(&_sched_spinlock, key);
}
} else {
halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
if ((thread == arch_current_thread()) && !arch_is_in_isr()) {
if ((thread == _current) && !arch_is_in_isr()) {
z_swap(&_sched_spinlock, key);
__ASSERT(!terminate, "aborted arch_current_thread() back from dead");
__ASSERT(!terminate, "aborted _current back from dead");
} else {
k_spin_unlock(&_sched_spinlock, key);
}
@ -453,7 +453,7 @@ void z_impl_k_thread_suspend(k_tid_t thread) @@ -453,7 +453,7 @@ void z_impl_k_thread_suspend(k_tid_t thread)
/* Special case "suspend the current thread" as it doesn't
* need the async complexity below.
*/
if (thread == arch_current_thread() && !arch_is_in_isr() && !IS_ENABLED(CONFIG_SMP)) {
if (thread == _current && !arch_is_in_isr() && !IS_ENABLED(CONFIG_SMP)) {
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
z_mark_thread_as_suspended(thread);
@ -521,7 +521,7 @@ static void unready_thread(struct k_thread *thread) @@ -521,7 +521,7 @@ static void unready_thread(struct k_thread *thread)
if (z_is_thread_queued(thread)) {
dequeue_thread(thread);
}
update_cache(thread == arch_current_thread());
update_cache(thread == _current);
}
/* _sched_spinlock must be held */
@ -558,7 +558,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q, @@ -558,7 +558,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
k_timeout_t timeout)
{
__ASSERT_NO_MSG(thread == arch_current_thread() || is_thread_dummy(thread));
__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
K_SPINLOCK(&_sched_spinlock) {
pend_locked(thread, wait_q, timeout);
}
@ -616,7 +616,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, @@ -616,7 +616,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, k_timeout_t timeout)
{
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = arch_current_thread();
pending_current = _current;
#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
@ -629,7 +629,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, @@ -629,7 +629,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
* held.
*/
(void) k_spin_lock(&_sched_spinlock);
pend_locked(arch_current_thread(), wait_q, timeout);
pend_locked(_current, wait_q, timeout);
k_spin_release(lock);
return z_swap(&_sched_spinlock, key);
}
@ -727,7 +727,7 @@ static inline bool need_swap(void) @@ -727,7 +727,7 @@ static inline bool need_swap(void)
/* Check if the next ready thread is the same as the current thread */
new_thread = _kernel.ready_q.cache;
return new_thread != arch_current_thread();
return new_thread != _current;
#endif /* CONFIG_SMP */
}
@ -763,15 +763,15 @@ void k_sched_lock(void) @@ -763,15 +763,15 @@ void k_sched_lock(void)
void k_sched_unlock(void)
{
K_SPINLOCK(&_sched_spinlock) {
__ASSERT(arch_current_thread()->base.sched_locked != 0U, "");
__ASSERT(_current->base.sched_locked != 0U, "");
__ASSERT(!arch_is_in_isr(), "");
++arch_current_thread()->base.sched_locked;
++_current->base.sched_locked;
update_cache(0);
}
LOG_DBG("scheduler unlocked (%p:%d)",
arch_current_thread(), arch_current_thread()->base.sched_locked);
_current, _current->base.sched_locked);
SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
@ -783,10 +783,10 @@ struct k_thread *z_swap_next_thread(void) @@ -783,10 +783,10 @@ struct k_thread *z_swap_next_thread(void)
#ifdef CONFIG_SMP
struct k_thread *ret = next_up();
if (ret == arch_current_thread()) {
if (ret == _current) {
/* When not swapping, have to signal IPIs here. In
* the context switch case it must happen later, after
* arch_current_thread() gets requeued.
* _current gets requeued.
*/
signal_pending_ipi();
}
@ -827,7 +827,7 @@ static inline void set_current(struct k_thread *new_thread) @@ -827,7 +827,7 @@ static inline void set_current(struct k_thread *new_thread)
* function.
*
* @warning
* The arch_current_thread() value may have changed after this call and not refer
* The _current value may have changed after this call and not refer
* to the interrupted thread anymore. It might be necessary to make a local
* copy before calling this function.
*
@ -843,7 +843,7 @@ void *z_get_next_switch_handle(void *interrupted) @@ -843,7 +843,7 @@ void *z_get_next_switch_handle(void *interrupted)
void *ret = NULL;
K_SPINLOCK(&_sched_spinlock) {
struct k_thread *old_thread = arch_current_thread(), *new_thread;
struct k_thread *old_thread = _current, *new_thread;
if (IS_ENABLED(CONFIG_SMP)) {
old_thread->switch_handle = NULL;
@ -869,7 +869,7 @@ void *z_get_next_switch_handle(void *interrupted) @@ -869,7 +869,7 @@ void *z_get_next_switch_handle(void *interrupted)
#endif /* CONFIG_TIMESLICING */
#ifdef CONFIG_SPIN_VALIDATE
/* Changed arch_current_thread()! Update the spinlock
/* Changed _current! Update the spinlock
* bookkeeping so the validation doesn't get
* confused when the "wrong" thread tries to
* release the lock.
@ -904,9 +904,9 @@ void *z_get_next_switch_handle(void *interrupted) @@ -904,9 +904,9 @@ void *z_get_next_switch_handle(void *interrupted)
return ret;
#else
z_sched_usage_switch(_kernel.ready_q.cache);
arch_current_thread()->switch_handle = interrupted;
_current->switch_handle = interrupted;
set_current(_kernel.ready_q.cache);
return arch_current_thread()->switch_handle;
return _current->switch_handle;
#endif /* CONFIG_SMP */
}
#endif /* CONFIG_USE_SWITCH */
@ -952,7 +952,7 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio) @@ -952,7 +952,7 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
(arch_current_thread()->base.sched_locked == 0U))) {
(_current->base.sched_locked == 0U))) {
z_reschedule_unlocked();
}
}
@ -1036,7 +1036,7 @@ static inline void z_vrfy_k_reschedule(void) @@ -1036,7 +1036,7 @@ static inline void z_vrfy_k_reschedule(void)
bool k_can_yield(void)
{
return !(k_is_pre_kernel() || k_is_in_isr() ||
z_is_idle_thread_object(arch_current_thread()));
z_is_idle_thread_object(_current));
}
void z_impl_k_yield(void)
@ -1048,7 +1048,7 @@ void z_impl_k_yield(void) @@ -1048,7 +1048,7 @@ void z_impl_k_yield(void)
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
#ifdef CONFIG_SMP
z_mark_thread_as_queued(arch_current_thread());
z_mark_thread_as_queued(_current);
#endif
runq_yield();
@ -1070,7 +1070,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks) @@ -1070,7 +1070,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
__ASSERT(!arch_is_in_isr(), "");
LOG_DBG("thread %p for %lu ticks", arch_current_thread(), (unsigned long)ticks);
LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
/* wait of 0 ms is treated as a 'yield' */
if (ticks == 0) {
@ -1088,11 +1088,11 @@ static int32_t z_tick_sleep(k_ticks_t ticks) @@ -1088,11 +1088,11 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = arch_current_thread();
pending_current = _current;
#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
unready_thread(arch_current_thread());
z_add_thread_timeout(arch_current_thread(), timeout);
z_mark_thread_as_sleeping(arch_current_thread());
unready_thread(_current);
z_add_thread_timeout(_current, timeout);
z_mark_thread_as_sleeping(_current);
(void)z_swap(&_sched_spinlock, key);
@ -1195,7 +1195,7 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread) @@ -1195,7 +1195,7 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread)
k_tid_t z_impl_k_sched_current_thread_query(void)
{
return arch_current_thread();
return _current;
}
#ifdef CONFIG_USERSPACE
@ -1250,13 +1250,13 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state @@ -1250,13 +1250,13 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
z_abort_thread_timeout(thread);
unpend_all(&thread->join_queue);
/* Edge case: aborting arch_current_thread() from within an
/* Edge case: aborting _current from within an
* ISR that preempted it requires clearing the
* arch_current_thread() pointer so the upcoming context
* _current pointer so the upcoming context
* switch doesn't clobber the now-freed
* memory
*/
if (thread == arch_current_thread() && arch_is_in_isr()) {
if (thread == _current && arch_is_in_isr()) {
dummify = true;
}
}
@ -1299,10 +1299,10 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state @@ -1299,10 +1299,10 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
k_thread_abort_cleanup(thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
/* Do this "set arch_current_thread() to dummy" step last so that
* subsystems above can rely on arch_current_thread() being
/* Do this "set _current to dummy" step last so that
* subsystems above can rely on _current being
* unchanged. Disabled for posix as that arch
* continues to use the arch_current_thread() pointer in its swap
* continues to use the _current pointer in its swap
* code. Note that we must leave a non-null switch
* handle for any threads spinning in join() (this can
* never be used, as our thread is flagged dead, but
@ -1310,7 +1310,7 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state @@ -1310,7 +1310,7 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
*/
if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
#ifdef CONFIG_USE_SWITCH
arch_current_thread()->switch_handle = arch_current_thread();
_current->switch_handle = _current;
#endif
z_dummy_thread_init(&_thread_dummy);
@ -1368,13 +1368,13 @@ int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) @@ -1368,13 +1368,13 @@ int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
ret = 0;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
ret = -EBUSY;
} else if ((thread == arch_current_thread()) ||
(thread->base.pended_on == &arch_current_thread()->join_queue)) {
} else if ((thread == _current) ||
(thread->base.pended_on == &_current->join_queue)) {
ret = -EDEADLK;
} else {
__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
add_to_waitq_locked(arch_current_thread(), &thread->join_queue);
add_thread_timeout(arch_current_thread(), timeout);
add_to_waitq_locked(_current, &thread->join_queue);
add_thread_timeout(_current, timeout);
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
ret = z_swap(&_sched_spinlock, key);
@ -1473,7 +1473,7 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, @@ -1473,7 +1473,7 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
int ret = z_pend_curr(lock, key, wait_q, timeout);
if (data != NULL) {
*data = arch_current_thread()->base.swap_data;
*data = _current->base.swap_data;
}
return ret;
}

10
kernel/smp.c

@ -58,23 +58,23 @@ unsigned int z_smp_global_lock(void) @@ -58,23 +58,23 @@ unsigned int z_smp_global_lock(void)
{
unsigned int key = arch_irq_lock();
if (!arch_current_thread()->base.global_lock_count) {
if (!_current->base.global_lock_count) {
while (!atomic_cas(&global_lock, 0, 1)) {
arch_spin_relax();
}
}
arch_current_thread()->base.global_lock_count++;
_current->base.global_lock_count++;
return key;
}
void z_smp_global_unlock(unsigned int key)
{
if (arch_current_thread()->base.global_lock_count != 0U) {
arch_current_thread()->base.global_lock_count--;
if (_current->base.global_lock_count != 0U) {
_current->base.global_lock_count--;
if (!arch_current_thread()->base.global_lock_count) {
if (!_current->base.global_lock_count) {
(void)atomic_clear(&global_lock);
}
}

8
kernel/spinlock_validate.c

@ -24,11 +24,11 @@ bool z_spin_unlock_valid(struct k_spinlock *l) @@ -24,11 +24,11 @@ bool z_spin_unlock_valid(struct k_spinlock *l)
l->thread_cpu = 0;
if (arch_is_in_isr() && arch_current_thread()->base.thread_state & _THREAD_DUMMY) {
/* Edge case where an ISR aborted arch_current_thread() */
if (arch_is_in_isr() && _current->base.thread_state & _THREAD_DUMMY) {
/* Edge case where an ISR aborted _current */
return true;
}
if (tcpu != (_current_cpu->id | (uintptr_t)arch_current_thread())) {
if (tcpu != (_current_cpu->id | (uintptr_t)_current)) {
return false;
}
return true;
@ -36,7 +36,7 @@ bool z_spin_unlock_valid(struct k_spinlock *l) @@ -36,7 +36,7 @@ bool z_spin_unlock_valid(struct k_spinlock *l)
void z_spin_lock_set_owner(struct k_spinlock *l)
{
l->thread_cpu = _current_cpu->id | (uintptr_t)arch_current_thread();
l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
}
#ifdef CONFIG_KERNEL_COHERENCE

2
kernel/stack.c

@ -182,7 +182,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, @@ -182,7 +182,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data,
return -EAGAIN;
}
*data = (stack_data_t)arch_current_thread()->base.swap_data;
*data = (stack_data_t)_current->base.swap_data;
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, 0);

53
kernel/thread.c

@ -82,7 +82,7 @@ EXPORT_SYMBOL(k_is_in_isr); @@ -82,7 +82,7 @@ EXPORT_SYMBOL(k_is_in_isr);
#ifdef CONFIG_THREAD_CUSTOM_DATA
void z_impl_k_thread_custom_data_set(void *value)
{
arch_current_thread()->custom_data = value;
_current->custom_data = value;
}
#ifdef CONFIG_USERSPACE
@ -95,7 +95,7 @@ static inline void z_vrfy_k_thread_custom_data_set(void *data) @@ -95,7 +95,7 @@ static inline void z_vrfy_k_thread_custom_data_set(void *data)
void *z_impl_k_thread_custom_data_get(void)
{
return arch_current_thread()->custom_data;
return _current->custom_data;
}
#ifdef CONFIG_USERSPACE
@ -110,7 +110,7 @@ static inline void *z_vrfy_k_thread_custom_data_get(void) @@ -110,7 +110,7 @@ static inline void *z_vrfy_k_thread_custom_data_get(void)
int z_impl_k_is_preempt_thread(void)
{
return !arch_is_in_isr() && thread_is_preemptible(arch_current_thread());
return !arch_is_in_isr() && thread_is_preemptible(_current);
}
#ifdef CONFIG_USERSPACE
@ -139,7 +139,7 @@ int z_impl_k_thread_name_set(k_tid_t thread, const char *str) @@ -139,7 +139,7 @@ int z_impl_k_thread_name_set(k_tid_t thread, const char *str)
{
#ifdef CONFIG_THREAD_NAME
if (thread == NULL) {
thread = arch_current_thread();
thread = _current;
}
strncpy(thread->name, str, CONFIG_THREAD_MAX_NAME_LEN - 1);
@ -334,11 +334,11 @@ void z_check_stack_sentinel(void) @@ -334,11 +334,11 @@ void z_check_stack_sentinel(void)
{
uint32_t *stack;
if ((arch_current_thread()->base.thread_state & _THREAD_DUMMY) != 0) {
if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
return;
}
stack = (uint32_t *)arch_current_thread()->stack_info.start;
stack = (uint32_t *)_current->stack_info.start;
if (*stack != STACK_SENTINEL) {
/* Restore it so further checks don't trigger this same error */
*stack = STACK_SENTINEL;
@ -614,8 +614,8 @@ char *z_setup_new_thread(struct k_thread *new_thread, @@ -614,8 +614,8 @@ char *z_setup_new_thread(struct k_thread *new_thread,
}
#endif /* CONFIG_SCHED_CPU_MASK */
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/* arch_current_thread() may be null if the dummy thread is not used */
if (!arch_current_thread()) {
/* _current may be null if the dummy thread is not used */
if (!_current) {
new_thread->resource_pool = NULL;
return stack_ptr;
}
@ -624,13 +624,13 @@ char *z_setup_new_thread(struct k_thread *new_thread, @@ -624,13 +624,13 @@ char *z_setup_new_thread(struct k_thread *new_thread,
z_mem_domain_init_thread(new_thread);
if ((options & K_INHERIT_PERMS) != 0U) {
k_thread_perms_inherit(arch_current_thread(), new_thread);
k_thread_perms_inherit(_current, new_thread);
}
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_SCHED_DEADLINE
new_thread->base.prio_deadline = 0;
#endif /* CONFIG_SCHED_DEADLINE */
new_thread->resource_pool = arch_current_thread()->resource_pool;
new_thread->resource_pool = _current->resource_pool;
#ifdef CONFIG_SMP
z_waitq_init(&new_thread->halt_queue);
@ -725,7 +725,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, @@ -725,7 +725,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
*/
K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
arch_current_thread()->base.prio)));
_current->base.prio)));
z_setup_new_thread(new_thread, stack, stack_size,
entry, p1, p2, p3, prio, options, NULL);
@ -770,25 +770,25 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, @@ -770,25 +770,25 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
{
SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
arch_current_thread()->base.user_options |= K_USER;
z_thread_essential_clear(arch_current_thread());
_current->base.user_options |= K_USER;
z_thread_essential_clear(_current);
#ifdef CONFIG_THREAD_MONITOR
arch_current_thread()->entry.pEntry = entry;
arch_current_thread()->entry.parameter1 = p1;
arch_current_thread()->entry.parameter2 = p2;
arch_current_thread()->entry.parameter3 = p3;
_current->entry.pEntry = entry;
_current->entry.parameter1 = p1;
_current->entry.parameter2 = p2;
_current->entry.parameter3 = p3;
#endif /* CONFIG_THREAD_MONITOR */
#ifdef CONFIG_USERSPACE
__ASSERT(z_stack_is_user_capable(arch_current_thread()->stack_obj),
__ASSERT(z_stack_is_user_capable(_current->stack_obj),
"dropping to user mode with kernel-only stack object");
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
memset(arch_current_thread()->userspace_local_data, 0,
memset(_current->userspace_local_data, 0,
sizeof(struct _thread_userspace_local_data));
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
#ifdef CONFIG_THREAD_LOCAL_STORAGE
arch_tls_stack_setup(arch_current_thread(),
(char *)(arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size));
arch_tls_stack_setup(_current,
(char *)(_current->stack_info.start +
_current->stack_info.size));
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
arch_user_mode_enter(entry, p1, p2, p3);
#else
@ -916,7 +916,7 @@ static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks( @@ -916,7 +916,7 @@ static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
void z_thread_mark_switched_in(void)
{
#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
z_sched_usage_start(arch_current_thread());
z_sched_usage_start(_current);
#endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
#ifdef CONFIG_TRACING
@ -933,9 +933,10 @@ void z_thread_mark_switched_out(void) @@ -933,9 +933,10 @@ void z_thread_mark_switched_out(void)
#ifdef CONFIG_TRACING
#ifdef CONFIG_THREAD_LOCAL_STORAGE
/* Dummy thread won't have TLS set up to run arbitrary code */
if (!arch_current_thread() ||
(arch_current_thread()->base.thread_state & _THREAD_DUMMY) != 0)
if (!_current ||
(_current->base.thread_state & _THREAD_DUMMY) != 0) {
return;
}
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
SYS_PORT_TRACING_FUNC(k_thread, switched_out);
#endif /* CONFIG_TRACING */
@ -1084,7 +1085,7 @@ void k_thread_abort_cleanup(struct k_thread *thread) @@ -1084,7 +1085,7 @@ void k_thread_abort_cleanup(struct k_thread *thread)
thread_to_cleanup = NULL;
}
if (thread == arch_current_thread()) {
if (thread == _current) {
/* Need to defer for current running thread as the cleanup
* might result in exception. Actual cleanup will be done
* at the next time k_thread_abort() is called, or at thread

6
kernel/timeslicing.c

@ -15,7 +15,7 @@ static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS]; @@ -15,7 +15,7 @@ static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
#ifdef CONFIG_SWAP_NONATOMIC
/* If z_swap() isn't atomic, then it's possible for a timer interrupt
* to try to timeslice away arch_current_thread() after it has already pended
* to try to timeslice away _current after it has already pended
* itself but before the corresponding context switch. Treat that as
* a noop condition in z_time_slice().
*/
@ -82,7 +82,7 @@ void k_sched_time_slice_set(int32_t slice, int prio) @@ -82,7 +82,7 @@ void k_sched_time_slice_set(int32_t slice, int prio)
K_SPINLOCK(&_sched_spinlock) {
slice_ticks = k_ms_to_ticks_ceil32(slice);
slice_max_prio = prio;
z_reset_time_slice(arch_current_thread());
z_reset_time_slice(_current);
}
}
@ -103,7 +103,7 @@ void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks @@ -103,7 +103,7 @@ void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks
void z_time_slice(void)
{
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
struct k_thread *curr = arch_current_thread();
struct k_thread *curr = _current;
#ifdef CONFIG_SWAP_NONATOMIC
if (pending_current == curr) {

12
kernel/userspace.c

@ -437,7 +437,7 @@ static void *z_object_alloc(enum k_objects otype, size_t size) @@ -437,7 +437,7 @@ static void *z_object_alloc(enum k_objects otype, size_t size)
/* The allocating thread implicitly gets permission on kernel objects
* that it allocates
*/
k_thread_perms_set(zo, arch_current_thread());
k_thread_perms_set(zo, _current);
/* Activates reference counting logic for automatic disposal when
* all permissions have been revoked
@ -654,7 +654,7 @@ static int thread_perms_test(struct k_object *ko) @@ -654,7 +654,7 @@ static int thread_perms_test(struct k_object *ko)
return 1;
}
index = thread_index_get(arch_current_thread());
index = thread_index_get(_current);
if (index != -1) {
return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
}
@ -663,9 +663,9 @@ static int thread_perms_test(struct k_object *ko) @@ -663,9 +663,9 @@ static int thread_perms_test(struct k_object *ko)
static void dump_permission_error(struct k_object *ko)
{
int index = thread_index_get(arch_current_thread());
int index = thread_index_get(_current);
LOG_ERR("thread %p (%d) does not have permission on %s %p",
arch_current_thread(), index,
_current, index,
otype_to_str(ko->type), ko->name);
LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
}
@ -718,7 +718,7 @@ void k_object_access_revoke(const void *object, struct k_thread *thread) @@ -718,7 +718,7 @@ void k_object_access_revoke(const void *object, struct k_thread *thread)
void z_impl_k_object_release(const void *object)
{
k_object_access_revoke(object, arch_current_thread());
k_object_access_revoke(object, _current);
}
void k_object_access_all_grant(const void *object)
@ -794,7 +794,7 @@ void k_object_recycle(const void *obj) @@ -794,7 +794,7 @@ void k_object_recycle(const void *obj)
if (ko != NULL) {
(void)memset(ko->perms, 0, sizeof(ko->perms));
k_thread_perms_set(ko, arch_current_thread());
k_thread_perms_set(ko, _current);
ko->flags |= K_OBJ_FLAG_INITIALIZED;
}
}

2
kernel/userspace_handler.c

@ -72,7 +72,7 @@ static inline void z_vrfy_k_object_release(const void *object) @@ -72,7 +72,7 @@ static inline void z_vrfy_k_object_release(const void *object)
ko = validate_any_object(object);
K_OOPS(K_SYSCALL_VERIFY_MSG(ko != NULL, "object %p access denied", object));
k_thread_perms_clear(ko, arch_current_thread());
k_thread_perms_clear(ko, _current);
}
#include <zephyr/syscalls/k_object_release_mrsh.c>

2
kernel/work.c

@ -262,7 +262,7 @@ static inline int queue_submit_locked(struct k_work_q *queue, @@ -262,7 +262,7 @@ static inline int queue_submit_locked(struct k_work_q *queue,
}
int ret;
bool chained = (arch_current_thread() == &queue->thread) && !k_is_in_isr();
bool chained = (_current == &queue->thread) && !k_is_in_isr();
bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);

2
lib/libc/armstdc/src/libc-hooks.c

@ -23,7 +23,7 @@ void __stdout_hook_install(int (*hook)(int)) @@ -23,7 +23,7 @@ void __stdout_hook_install(int (*hook)(int))
volatile int *__aeabi_errno_addr(void)
{
return &arch_current_thread()->errno_var;
return &_current->errno_var;
}
int fputc(int c, FILE *f)

12
lib/os/p4wq.c

@ -87,10 +87,10 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2) @@ -87,10 +87,10 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2)
= CONTAINER_OF(r, struct k_p4wq_work, rbnode);
rb_remove(&queue->queue, r);
w->thread = arch_current_thread();
w->thread = _current;
sys_dlist_append(&queue->active, &w->dlnode);
set_prio(arch_current_thread(), w);
thread_clear_requeued(arch_current_thread());
set_prio(_current, w);
thread_clear_requeued(_current);
k_spin_unlock(&queue->lock, k);
@ -101,7 +101,7 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2) @@ -101,7 +101,7 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2)
/* Remove from the active list only if it
* wasn't resubmitted already
*/
if (!thread_was_requeued(arch_current_thread())) {
if (!thread_was_requeued(_current)) {
sys_dlist_remove(&w->dlnode);
w->thread = NULL;
k_sem_give(&w->done_sem);
@ -223,9 +223,9 @@ void k_p4wq_submit(struct k_p4wq *queue, struct k_p4wq_work *item) @@ -223,9 +223,9 @@ void k_p4wq_submit(struct k_p4wq *queue, struct k_p4wq_work *item)
item->deadline += k_cycle_get_32();
/* Resubmission from within handler? Remove from active list */
if (item->thread == arch_current_thread()) {
if (item->thread == _current) {
sys_dlist_remove(&item->dlnode);
thread_set_requeued(arch_current_thread());
thread_set_requeued(_current);
item->thread = NULL;
} else {
k_sem_init(&item->done_sem, 0, 1);

8
scripts/build/gen_syscalls.py

@ -362,7 +362,7 @@ def marshall_defs(func_name, func_type, args): @@ -362,7 +362,7 @@ def marshall_defs(func_name, func_type, args):
else:
mrsh += "\t\t" + "uintptr_t arg3, uintptr_t arg4, void *more, void *ssf)\n"
mrsh += "{\n"
mrsh += "\t" + "arch_current_thread()->syscall_frame = ssf;\n"
mrsh += "\t" + "_current->syscall_frame = ssf;\n"
for unused_arg in range(nmrsh, 6):
mrsh += "\t(void) arg%d;\t/* unused */\n" % unused_arg
@ -388,7 +388,7 @@ def marshall_defs(func_name, func_type, args): @@ -388,7 +388,7 @@ def marshall_defs(func_name, func_type, args):
if func_type == "void":
mrsh += "\t" + "%s;\n" % vrfy_call
mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n"
mrsh += "\t" + "_current->syscall_frame = NULL;\n"
mrsh += "\t" + "return 0;\n"
else:
mrsh += "\t" + "%s ret = %s;\n" % (func_type, vrfy_call)
@ -397,10 +397,10 @@ def marshall_defs(func_name, func_type, args): @@ -397,10 +397,10 @@ def marshall_defs(func_name, func_type, args):
ptr = "((uint64_t *)%s)" % mrsh_rval(nmrsh - 1, nmrsh)
mrsh += "\t" + "K_OOPS(K_SYSCALL_MEMORY_WRITE(%s, 8));\n" % ptr
mrsh += "\t" + "*%s = ret;\n" % ptr
mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n"
mrsh += "\t" + "_current->syscall_frame = NULL;\n"
mrsh += "\t" + "return 0;\n"
else:
mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n"
mrsh += "\t" + "_current->syscall_frame = NULL;\n"
mrsh += "\t" + "return (uintptr_t) ret;\n"
mrsh += "}\n"

2
soc/espressif/esp32/soc.c

@ -69,7 +69,7 @@ void IRAM_ATTR __esp_platform_start(void) @@ -69,7 +69,7 @@ void IRAM_ATTR __esp_platform_start(void)
__asm__ __volatile__ ("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
/* Initialize the architecture CPU pointer. Some of the
* initialization code wants a valid arch_current_thread() before
* initialization code wants a valid _current before
* z_prep_c() is invoked.
*/
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));

2
soc/espressif/esp32/soc_appcpu.c

@ -80,7 +80,7 @@ void IRAM_ATTR __appcpu_start(void) @@ -80,7 +80,7 @@ void IRAM_ATTR __appcpu_start(void)
: "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
/* Initialize the architecture CPU pointer. Some of the
* initialization code wants a valid arch_current_thread() before
* initialization code wants a valid _current before
* z_prep_c() is invoked.
*/
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[1]));

2
soc/espressif/esp32s2/soc.c

@ -62,7 +62,7 @@ void __attribute__((section(".iram1"))) __esp_platform_start(void) @@ -62,7 +62,7 @@ void __attribute__((section(".iram1"))) __esp_platform_start(void)
__asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
/* Initialize the architecture CPU pointer. Some of the
* initialization code wants a valid arch_current_thread() before
* initialization code wants a valid _current before
* arch_kernel_init() is invoked.
*/
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));

2
soc/espressif/esp32s3/soc.c

@ -97,7 +97,7 @@ void IRAM_ATTR __esp_platform_start(void) @@ -97,7 +97,7 @@ void IRAM_ATTR __esp_platform_start(void)
__asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
/* Initialize the architecture CPU pointer. Some of the
* initialization code wants a valid arch_current_thread() before
* initialization code wants a valid _current before
* arch_kernel_init() is invoked.
*/
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));

2
soc/espressif/esp32s3/soc_appcpu.c

@ -65,7 +65,7 @@ void IRAM_ATTR __appcpu_start(void) @@ -65,7 +65,7 @@ void IRAM_ATTR __appcpu_start(void)
__asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
/* Initialize the architecture CPU pointer. Some of the
* initialization code wants a valid arch_current_thread() before
* initialization code wants a valid _current before
* arch_kernel_init() is invoked.
*/
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[1]));

2
subsys/net/lib/sockets/sockets.c

@ -68,7 +68,7 @@ static inline void *get_sock_vtable(int sock, @@ -68,7 +68,7 @@ static inline void *get_sock_vtable(int sock,
if (ctx == NULL) {
NET_DBG("Invalid access on sock %d by thread %p (%s)", sock,
arch_current_thread(), k_thread_name_get(arch_current_thread()));
_current, k_thread_name_get(_current));
}
return ctx;

6
subsys/portability/cmsis_rtos_v2/kernel.c

@ -39,7 +39,7 @@ osStatus_t osKernelGetInfo(osVersion_t *version, char *id_buf, uint32_t id_size) @@ -39,7 +39,7 @@ osStatus_t osKernelGetInfo(osVersion_t *version, char *id_buf, uint32_t id_size)
*/
int32_t osKernelLock(void)
{
int temp = arch_current_thread()->base.sched_locked;
int temp = _current->base.sched_locked;
if (k_is_in_isr()) {
return osErrorISR;
@ -55,7 +55,7 @@ int32_t osKernelLock(void) @@ -55,7 +55,7 @@ int32_t osKernelLock(void)
*/
int32_t osKernelUnlock(void)
{
int temp = arch_current_thread()->base.sched_locked;
int temp = _current->base.sched_locked;
if (k_is_in_isr()) {
return osErrorISR;
@ -71,7 +71,7 @@ int32_t osKernelUnlock(void) @@ -71,7 +71,7 @@ int32_t osKernelUnlock(void)
*/
int32_t osKernelRestoreLock(int32_t lock)
{
arch_current_thread()->base.sched_locked = lock;
_current->base.sched_locked = lock;
if (k_is_in_isr()) {
return osErrorISR;

4
subsys/profiling/perf/backends/perf_riscv.c

@ -76,10 +76,10 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) @@ -76,10 +76,10 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
* function prologue or epilogue.
*/
buf[idx++] = (uintptr_t)esf->ra;
if (valid_stack((uintptr_t)new_fp, arch_current_thread())) {
if (valid_stack((uintptr_t)new_fp, _current)) {
fp = new_fp;
}
while (valid_stack((uintptr_t)fp, arch_current_thread())) {
while (valid_stack((uintptr_t)fp, _current)) {
if (idx >= size) {
return 0;
}

2
subsys/profiling/perf/backends/perf_x86.c

@ -67,7 +67,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) @@ -67,7 +67,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
*/
buf[idx++] = (uintptr_t)isf->eip;
while (valid_stack((uintptr_t)fp, arch_current_thread())) {
while (valid_stack((uintptr_t)fp, _current)) {
if (idx >= size) {
return 0;
}

8
subsys/profiling/perf/backends/perf_x86_64.c

@ -35,13 +35,13 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) @@ -35,13 +35,13 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
/*
* In x86_64 (arch/x86/core/intel64/locore.S) %rip and %rbp
* are always saved in arch_current_thread()->callee_saved before calling
* are always saved in _current->callee_saved before calling
* handler function if interrupt is not nested
*
* %rip points the location where interrupt was occurred
*/
buf[idx++] = (uintptr_t)arch_current_thread()->callee_saved.rip;
void **fp = (void **)arch_current_thread()->callee_saved.rbp;
buf[idx++] = (uintptr_t)_current->callee_saved.rip;
void **fp = (void **)_current->callee_saved.rbp;
/*
* %rbp is frame pointer.
@ -53,7 +53,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) @@ -53,7 +53,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
* %rbp (next) <- %rbp (curr)
* ....
*/
while (valid_stack((uintptr_t)fp, arch_current_thread())) {
while (valid_stack((uintptr_t)fp, _current)) {
if (idx >= size) {
return 0;
}

2
subsys/shell/modules/kernel_service/thread/unwind.c

@ -30,7 +30,7 @@ static int cmd_kernel_thread_unwind(const struct shell *sh, size_t argc, char ** @@ -30,7 +30,7 @@ static int cmd_kernel_thread_unwind(const struct shell *sh, size_t argc, char **
int err = 0;
if (argc == 1) {
thread = arch_current_thread();
thread = _current;
} else {
thread = UINT_TO_POINTER(shell_strtoull(argv[1], 16, &err));
if (err != 0) {

6
tests/arch/arm/arm_interrupt/src/arm_interrupt.c

@ -177,7 +177,7 @@ ZTEST(arm_interrupt, test_arm_esf_collection) @@ -177,7 +177,7 @@ ZTEST(arm_interrupt, test_arm_esf_collection)
* crashy thread we create below runs to completion before we get
* to the end of this function
*/
k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY));
k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
TC_PRINT("Testing ESF Reporting\n");
k_thread_create(&esf_collection_thread, esf_collection_stack,
@ -366,9 +366,9 @@ ZTEST(arm_interrupt, test_arm_interrupt) @@ -366,9 +366,9 @@ ZTEST(arm_interrupt, test_arm_interrupt)
uint32_t fp_extra_size =
(__get_CONTROL() & CONTROL_FPCA_Msk) ?
FPU_STACK_EXTRA_SIZE : 0;
__set_PSP(arch_current_thread()->stack_info.start + 0x10 + fp_extra_size);
__set_PSP(_current->stack_info.start + 0x10 + fp_extra_size);
#else
__set_PSP(arch_current_thread()->stack_info.start + 0x10);
__set_PSP(_current->stack_info.start + 0x10);
#endif
__enable_irq();

28
tests/arch/arm/arm_thread_swap/src/arm_syscalls.c

@ -38,20 +38,20 @@ void z_impl_test_arm_user_syscall(void) @@ -38,20 +38,20 @@ void z_impl_test_arm_user_syscall(void)
* - PSPLIM register guards the privileged stack
* - MSPLIM register still guards the interrupt stack
*/
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0,
"mode variable not set to PRIV mode in system call\n");
zassert_false(arch_is_user_context(),
"arch_is_user_context() indicates nPRIV\n");
zassert_true(
((__get_PSP() >= arch_current_thread()->arch.priv_stack_start) &&
(__get_PSP() < (arch_current_thread()->arch.priv_stack_start +
((__get_PSP() >= _current->arch.priv_stack_start) &&
(__get_PSP() < (_current->arch.priv_stack_start +
CONFIG_PRIVILEGED_STACK_SIZE))),
"Process SP outside thread privileged stack limits\n");
#if defined(CONFIG_BUILTIN_STACK_GUARD)
zassert_true(__get_PSPLIM() == arch_current_thread()->arch.priv_stack_start,
zassert_true(__get_PSPLIM() == _current->arch.priv_stack_start,
"PSPLIM not guarding the thread's privileged stack\n");
zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks,
"MSPLIM not guarding the interrupt stack\n");
@ -78,16 +78,16 @@ void arm_isr_handler(const void *args) @@ -78,16 +78,16 @@ void arm_isr_handler(const void *args)
* - MSPLIM register still guards the interrupt stack
*/
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) != 0,
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) != 0,
"mode variable not set to nPRIV mode for user thread\n");
zassert_false(arch_is_user_context(),
"arch_is_user_context() indicates nPRIV in ISR\n");
zassert_true(
((__get_PSP() >= arch_current_thread()->stack_info.start) &&
(__get_PSP() < (arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size))),
((__get_PSP() >= _current->stack_info.start) &&
(__get_PSP() < (_current->stack_info.start +
_current->stack_info.size))),
"Process SP outside thread stack limits\n");
static int first_call = 1;
@ -97,7 +97,7 @@ void arm_isr_handler(const void *args) @@ -97,7 +97,7 @@ void arm_isr_handler(const void *args)
/* Trigger thread yield() manually */
(void)irq_lock();
z_move_thread_to_end_of_prio_q(arch_current_thread());
z_move_thread_to_end_of_prio_q(_current);
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
irq_unlock(0);
@ -165,20 +165,20 @@ ZTEST(arm_thread_swap, test_arm_syscalls) @@ -165,20 +165,20 @@ ZTEST(arm_thread_swap, test_arm_syscalls)
* - PSPLIM register guards the default stack
* - MSPLIM register guards the interrupt stack
*/
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0,
"mode variable not set to PRIV mode for supervisor thread\n");
zassert_false(arch_is_user_context(),
"arch_is_user_context() indicates nPRIV\n");
zassert_true(
((__get_PSP() >= arch_current_thread()->stack_info.start) &&
(__get_PSP() < (arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size))),
((__get_PSP() >= _current->stack_info.start) &&
(__get_PSP() < (_current->stack_info.start +
_current->stack_info.size))),
"Process SP outside thread stack limits\n");
#if defined(CONFIG_BUILTIN_STACK_GUARD)
zassert_true(__get_PSPLIM() == arch_current_thread()->stack_info.start,
zassert_true(__get_PSPLIM() == _current->stack_info.start,
"PSPLIM not guarding the default stack\n");
zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks,
"MSPLIM not guarding the interrupt stack\n");

54
tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c

@ -274,16 +274,16 @@ static void alt_thread_entry(void *p1, void *p2, void *p3) @@ -274,16 +274,16 @@ static void alt_thread_entry(void *p1, void *p2, void *p3)
/* Verify that the _current_ (alt) thread is
* initialized with EXC_RETURN.Ftype set
*/
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
"Alt thread FPCA flag not clear at initialization\n");
#if defined(CONFIG_MPU_STACK_GUARD)
/* Alt thread is created with K_FP_REGS set, so we
* expect lazy stacking and long guard to be enabled.
*/
zassert_true((arch_current_thread()->arch.mode &
zassert_true((_current->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
"Alt thread MPU GUAR DFLOAT flag not set at initialization\n");
zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0,
zassert_true((_current->base.user_options & K_FP_REGS) != 0,
"Alt thread K_FP_REGS not set at initialization\n");
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
"Lazy FP Stacking not set at initialization\n");
@ -326,7 +326,7 @@ static void alt_thread_entry(void *p1, void *p2, void *p3) @@ -326,7 +326,7 @@ static void alt_thread_entry(void *p1, void *p2, void *p3)
p_ztest_thread->arch.swap_return_value = SWAP_RETVAL;
#endif
z_move_thread_to_end_of_prio_q(arch_current_thread());
z_move_thread_to_end_of_prio_q(_current);
/* Modify the callee-saved registers by zero-ing them.
* The main test thread will, later, assert that they
@ -451,20 +451,20 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -451,20 +451,20 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
*/
load_callee_saved_regs(&ztest_thread_callee_saved_regs_init);
k_thread_priority_set(arch_current_thread(), K_PRIO_COOP(PRIORITY));
k_thread_priority_set(_current, K_PRIO_COOP(PRIORITY));
/* Export current thread's callee-saved registers pointer
* and arch.basepri variable pointer, into global pointer
* variables, so they can be easily accessible by other
* (alternative) test thread.
*/
p_ztest_thread = arch_current_thread();
p_ztest_thread = _current;
/* Confirm initial conditions before starting the test. */
test_flag = switch_flag;
zassert_true(test_flag == false,
"Switch flag not initialized properly\n");
zassert_true(arch_current_thread()->arch.basepri == 0,
zassert_true(_current->arch.basepri == 0,
"Thread BASEPRI flag not clear at thread start\n");
/* Verify, also, that the interrupts are unlocked. */
#if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
@ -484,16 +484,16 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -484,16 +484,16 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
"Main test thread does not start in privilege mode\n");
/* Assert that the mode status variable indicates privilege mode */
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0,
"Thread nPRIV flag not clear for supervisor thread: 0x%0x\n",
arch_current_thread()->arch.mode);
_current->arch.mode);
#endif /* CONFIG_USERSPACE */
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* The main test thread is not (yet) actively using the FP registers */
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
"Thread Ftype flag not set at initialization 0x%0x\n",
arch_current_thread()->arch.mode);
_current->arch.mode);
/* Verify that the main test thread is initialized with FPCA cleared. */
zassert_true((__get_CONTROL() & CONTROL_FPCA_Msk) == 0,
@ -506,7 +506,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -506,7 +506,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
/* Clear the thread's floating-point callee-saved registers' container.
* The container will, later, be populated by the swap mechanism.
*/
memset(&arch_current_thread()->arch.preempt_float, 0,
memset(&_current->arch.preempt_float, 0,
sizeof(struct _preempt_float));
/* Randomize the FP callee-saved registers at test initialization */
@ -520,13 +520,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -520,13 +520,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
/* The main test thread is using the FP registers, but the .mode
* flag is not updated until the next context switch.
*/
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
"Thread Ftype flag not set at initialization\n");
#if defined(CONFIG_MPU_STACK_GUARD)
zassert_true((arch_current_thread()->arch.mode &
zassert_true((_current->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0,
"Thread MPU GUAR DFLOAT flag not clear at initialization\n");
zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) == 0,
zassert_true((_current->base.user_options & K_FP_REGS) == 0,
"Thread K_FP_REGS not clear at initialization\n");
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) == 0,
"Lazy FP Stacking not clear at initialization\n");
@ -555,13 +555,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -555,13 +555,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
* explicitly required by the test.
*/
(void)irq_lock();
z_move_thread_to_end_of_prio_q(arch_current_thread());
z_move_thread_to_end_of_prio_q(_current);
/* Clear the thread's callee-saved registers' container.
* The container will, later, be populated by the swap
* mechanism.
*/
memset(&arch_current_thread()->callee_saved, 0, sizeof(_callee_saved_t));
memset(&_current->callee_saved, 0, sizeof(_callee_saved_t));
/* Verify context-switch has not occurred yet. */
test_flag = switch_flag;
@ -677,7 +677,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -677,7 +677,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
*/
verify_callee_saved(
&ztest_thread_callee_saved_regs_container,
&arch_current_thread()->callee_saved);
&_current->callee_saved);
/* Verify context-switch did occur. */
test_flag = switch_flag;
@ -693,7 +693,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -693,7 +693,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
* the alternative thread modified it, since the thread
* is now switched back in.
*/
zassert_true(arch_current_thread()->arch.basepri == 0,
zassert_true(_current->arch.basepri == 0,
"arch.basepri value not in accordance with the update\n");
#if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
@ -714,12 +714,12 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -714,12 +714,12 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
#if !defined(CONFIG_NO_OPTIMIZATIONS)
/* The thread is now swapped-back in. */
zassert_equal(arch_current_thread()->arch.swap_return_value, SWAP_RETVAL,
zassert_equal(_current->arch.swap_return_value, SWAP_RETVAL,
"Swap value not set as expected: 0x%x (0x%x)\n",
arch_current_thread()->arch.swap_return_value, SWAP_RETVAL);
zassert_equal(arch_current_thread()->arch.swap_return_value, ztest_swap_return_val,
_current->arch.swap_return_value, SWAP_RETVAL);
zassert_equal(_current->arch.swap_return_value, ztest_swap_return_val,
"Swap value not returned as expected 0x%x (0x%x)\n",
arch_current_thread()->arch.swap_return_value, ztest_swap_return_val);
_current->arch.swap_return_value, ztest_swap_return_val);
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
@ -737,7 +737,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -737,7 +737,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
*/
verify_fp_callee_saved(
&ztest_thread_fp_callee_saved_regs,
&arch_current_thread()->arch.preempt_float);
&_current->arch.preempt_float);
/* Verify that the main test thread restored the FPSCR bit-0. */
zassert_true((__get_FPSCR() & 0x1) == 0x1,
@ -746,13 +746,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) @@ -746,13 +746,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
/* The main test thread is using the FP registers, and the .mode
* flag and MPU GUARD flag are now updated.
*/
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
"Thread Ftype flag not cleared after main returned back\n");
#if defined(CONFIG_MPU_STACK_GUARD)
zassert_true((arch_current_thread()->arch.mode &
zassert_true((_current->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
"Thread MPU GUARD FLOAT flag not set\n");
zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0,
zassert_true((_current->base.user_options & K_FP_REGS) != 0,
"Thread K_FPREGS not set after main returned back\n");
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
"Lazy FP Stacking not set after main returned back\n");

2
tests/arch/riscv/userspace/riscv_gp/src/main.c

@ -39,7 +39,7 @@ static void rogue_user_fn(void *p1, void *p2, void *p3) @@ -39,7 +39,7 @@ static void rogue_user_fn(void *p1, void *p2, void *p3)
if (IS_ENABLED(CONFIG_RISCV_GP)) {
zassert_equal(reg_read(gp), 0xbad);
} else { /* CONFIG_RISCV_CURRENT_VIA_GP */
zassert_equal((uintptr_t)arch_current_thread(), 0xbad);
zassert_equal((uintptr_t)_current, 0xbad);
}
/* Sleep to force a context switch, which will sanitize `gp` */

4
tests/benchmarks/footprints/src/system_thread.c

@ -28,12 +28,12 @@ void test_thread_entry(void *p, void *p1, void *p2) @@ -28,12 +28,12 @@ void test_thread_entry(void *p, void *p1, void *p2)
void thread_swap(void *p1, void *p2, void *p3)
{
k_thread_abort(arch_current_thread());
k_thread_abort(_current);
}
void thread_suspend(void *p1, void *p2, void *p3)
{
k_thread_suspend(arch_current_thread());
k_thread_suspend(_current);
}
void thread_yield0(void *p1, void *p2, void *p3)

8
tests/kernel/context/src/main.c

@ -135,7 +135,7 @@ static void isr_handler(const void *data) @@ -135,7 +135,7 @@ static void isr_handler(const void *data)
break;
}
if (arch_current_thread()->base.prio < 0) {
if (_current->base.prio < 0) {
isr_info.value = K_COOP_THREAD;
break;
}
@ -643,9 +643,9 @@ ZTEST(context, test_ctx_thread) @@ -643,9 +643,9 @@ ZTEST(context, test_ctx_thread)
TC_PRINT("Testing k_is_in_isr() from a preemptible thread\n");
zassert_false(k_is_in_isr(), "Should not be in ISR context");
zassert_false(arch_current_thread()->base.prio < 0,
zassert_false(_current->base.prio < 0,
"Current thread should have preemptible priority: %d",
arch_current_thread()->base.prio);
_current->base.prio);
}
@ -683,7 +683,7 @@ static void _test_kernel_thread(k_tid_t _thread_id) @@ -683,7 +683,7 @@ static void _test_kernel_thread(k_tid_t _thread_id)
zassert_false(k_is_in_isr(), "k_is_in_isr() when called from a thread is true");
zassert_false((arch_current_thread()->base.prio >= 0),
zassert_false((_current->base.prio >= 0),
"thread is not a cooperative thread");
}

2
tests/kernel/fatal/exception/src/main.c

@ -314,7 +314,7 @@ ZTEST(fatal_exception, test_fatal) @@ -314,7 +314,7 @@ ZTEST(fatal_exception, test_fatal)
* priority -1. To run the test smoothly make both main and ztest
* threads run at same priority level.
*/
k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY));
k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
#ifndef CONFIG_ARCH_POSIX
TC_PRINT("test alt thread 1: generic CPU exception\n");

2
tests/kernel/fatal/message_capture/src/main.c

@ -86,7 +86,7 @@ int main(void) @@ -86,7 +86,7 @@ int main(void)
* panic and not an oops). Set the thread non-essential as a
* workaround.
*/
z_thread_essential_clear(arch_current_thread());
z_thread_essential_clear(_current);
test_message_capture();
return 0;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save