@ -31,7 +31,7 @@ extern struct k_thread *pending_current;
@@ -31,7 +31,7 @@ extern struct k_thread *pending_current;
struct k_spinlock _sched_spinlock ;
/* Storage to "complete" the context switch from an invalid/incomplete thread
* context ( ex : exiting an ISR that aborted arch _current_thread ( ) )
* context ( ex : exiting an ISR that aborted _current )
*/
__incoherent struct k_thread _thread_dummy ;
@ -98,12 +98,12 @@ static ALWAYS_INLINE struct k_thread *runq_best(void)
@@ -98,12 +98,12 @@ static ALWAYS_INLINE struct k_thread *runq_best(void)
return _priq_run_best ( curr_cpu_runq ( ) ) ;
}
/* arch _current_thread() is never in the run queue until context switch on
/* _current is never in the run queue until context switch on
* SMP configurations , see z_requeue_current ( )
*/
static inline bool should_queue_thread ( struct k_thread * thread )
{
return ! IS_ENABLED ( CONFIG_SMP ) | | ( thread ! = arch _current_thread ( ) ) ;
return ! IS_ENABLED ( CONFIG_SMP ) | | ( thread ! = _current ) ;
}
static ALWAYS_INLINE void queue_thread ( struct k_thread * thread )
@ -113,7 +113,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
@@ -113,7 +113,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
runq_add ( thread ) ;
}
# ifdef CONFIG_SMP
if ( thread = = arch _current_thread ( ) ) {
if ( thread = = _current ) {
/* add current to end of queue means "yield" */
_current_cpu - > swap_ok = true ;
}
@ -167,8 +167,8 @@ static inline void clear_halting(struct k_thread *thread)
@@ -167,8 +167,8 @@ static inline void clear_halting(struct k_thread *thread)
static ALWAYS_INLINE struct k_thread * next_up ( void )
{
# ifdef CONFIG_SMP
if ( is_halting ( arch _current_thread ( ) ) ) {
halt_thread ( arch _current_thread ( ) , is_aborting ( arch _current_thread ( ) ) ?
if ( is_halting ( _current ) ) {
halt_thread ( _current , is_aborting ( _current ) ?
_THREAD_DEAD : _THREAD_SUSPENDED ) ;
}
# endif /* CONFIG_SMP */
@ -207,42 +207,42 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
@@ -207,42 +207,42 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
# else
/* Under SMP, the "cache" mechanism for selecting the next
* thread doesn ' t work , so we have more work to do to test
* arch _current_thread ( ) against the best choice from the queue . Here , the
* _current against the best choice from the queue . Here , the
* thread selected above represents " the best thread that is
* not current " .
*
* Subtle note on " queued " : in SMP mode , arch _current_thread ( ) does not
* Subtle note on " queued " : in SMP mode , _current does not
* live in the queue , so this isn ' t exactly the same thing as
* " ready " , it means " is arch _current_thread() already added back to the
* " ready " , it means " is _current already added back to the
* queue such that we don ' t want to re - add it " .
*/
bool queued = z_is_thread_queued ( arch _current_thread ( ) ) ;
bool active = ! z_is_thread_prevented_from_running ( arch _current_thread ( ) ) ;
bool queued = z_is_thread_queued ( _current ) ;
bool active = ! z_is_thread_prevented_from_running ( _current ) ;
if ( thread = = NULL ) {
thread = _current_cpu - > idle_thread ;
}
if ( active ) {
int32_t cmp = z_sched_prio_cmp ( arch _current_thread ( ) , thread ) ;
int32_t cmp = z_sched_prio_cmp ( _current , thread ) ;
/* Ties only switch if state says we yielded */
if ( ( cmp > 0 ) | | ( ( cmp = = 0 ) & & ! _current_cpu - > swap_ok ) ) {
thread = arch _current_thread ( ) ;
thread = _current ;
}
if ( ! should_preempt ( thread , _current_cpu - > swap_ok ) ) {
thread = arch _current_thread ( ) ;
thread = _current ;
}
}
/* Put arch _current_thread() back into the queue */
if ( ( thread ! = arch _current_thread ( ) ) & & active & &
! z_is_idle_thread_object ( arch _current_thread ( ) ) & & ! queued ) {
queue_thread ( arch _current_thread ( ) ) ;
/* Put _current back into the queue */
if ( ( thread ! = _current ) & & active & &
! z_is_idle_thread_object ( _current ) & & ! queued ) {
queue_thread ( _current ) ;
}
/* Take the new arch _current_thread() out of the queue */
/* Take the new _current out of the queue */
if ( z_is_thread_queued ( thread ) ) {
dequeue_thread ( thread ) ;
}
@ -258,7 +258,7 @@ void move_thread_to_end_of_prio_q(struct k_thread *thread)
@@ -258,7 +258,7 @@ void move_thread_to_end_of_prio_q(struct k_thread *thread)
dequeue_thread ( thread ) ;
}
queue_thread ( thread ) ;
update_cache ( thread = = arch _current_thread ( ) ) ;
update_cache ( thread = = _current ) ;
}
/* Track cooperative threads preempted by metairqs so we can return to
@ -269,10 +269,10 @@ static void update_metairq_preempt(struct k_thread *thread)
@@ -269,10 +269,10 @@ static void update_metairq_preempt(struct k_thread *thread)
{
# if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
( CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES )
if ( thread_is_metairq ( thread ) & & ! thread_is_metairq ( arch _current_thread ( ) ) & &
! thread_is_preemptible ( arch _current_thread ( ) ) ) {
if ( thread_is_metairq ( thread ) & & ! thread_is_metairq ( _current ) & &
! thread_is_preemptible ( _current ) ) {
/* Record new preemption */
_current_cpu - > metairq_preempted = arch _current_thread ( ) ;
_current_cpu - > metairq_preempted = _current ;
} else if ( ! thread_is_metairq ( thread ) & & ! z_is_idle_thread_object ( thread ) ) {
/* Returning from existing preemption */
_current_cpu - > metairq_preempted = NULL ;
@ -292,14 +292,14 @@ static ALWAYS_INLINE void update_cache(int preempt_ok)
@@ -292,14 +292,14 @@ static ALWAYS_INLINE void update_cache(int preempt_ok)
if ( should_preempt ( thread , preempt_ok ) ) {
# ifdef CONFIG_TIMESLICING
if ( thread ! = arch _current_thread ( ) ) {
if ( thread ! = _current ) {
z_reset_time_slice ( thread ) ;
}
# endif /* CONFIG_TIMESLICING */
update_metairq_preempt ( thread ) ;
_kernel . ready_q . cache = thread ;
} else {
_kernel . ready_q . cache = arch _current_thread ( ) ;
_kernel . ready_q . cache = _current ;
}
# else
@ -378,9 +378,9 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
@@ -378,9 +378,9 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
*/
static void thread_halt_spin ( struct k_thread * thread , k_spinlock_key_t key )
{
if ( is_halting ( arch _current_thread ( ) ) ) {
halt_thread ( arch _current_thread ( ) ,
is_aborting ( arch _current_thread ( ) ) ? _THREAD_DEAD : _THREAD_SUSPENDED ) ;
if ( is_halting ( _current ) ) {
halt_thread ( _current ,
is_aborting ( _current ) ? _THREAD_DEAD : _THREAD_SUSPENDED ) ;
}
k_spin_unlock ( & _sched_spinlock , key ) ;
while ( is_halting ( thread ) ) {
@ -394,7 +394,7 @@ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
@@ -394,7 +394,7 @@ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
/* Shared handler for k_thread_{suspend,abort}(). Called with the
* scheduler lock held and the key passed ( which it may
* release / reacquire ! ) which will be released before a possible return
* ( aborting arch _current_thread ( ) will not return , obviously ) , which may be after
* ( aborting _current will not return , obviously ) , which may be after
* a context switch .
*/
static ALWAYS_INLINE void z_thread_halt ( struct k_thread * thread , k_spinlock_key_t key ,
@ -427,14 +427,14 @@ static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_
@@ -427,14 +427,14 @@ static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_
if ( arch_is_in_isr ( ) ) {
thread_halt_spin ( thread , key ) ;
} else {
add_to_waitq_locked ( arch _current_thread ( ) , wq ) ;
add_to_waitq_locked ( _current , wq ) ;
z_swap ( & _sched_spinlock , key ) ;
}
} else {
halt_thread ( thread , terminate ? _THREAD_DEAD : _THREAD_SUSPENDED ) ;
if ( ( thread = = arch _current_thread ( ) ) & & ! arch_is_in_isr ( ) ) {
if ( ( thread = = _current ) & & ! arch_is_in_isr ( ) ) {
z_swap ( & _sched_spinlock , key ) ;
__ASSERT ( ! terminate , " aborted arch _current_thread() back from dead " ) ;
__ASSERT ( ! terminate , " aborted _current back from dead " ) ;
} else {
k_spin_unlock ( & _sched_spinlock , key ) ;
}
@ -453,7 +453,7 @@ void z_impl_k_thread_suspend(k_tid_t thread)
@@ -453,7 +453,7 @@ void z_impl_k_thread_suspend(k_tid_t thread)
/* Special case "suspend the current thread" as it doesn't
* need the async complexity below .
*/
if ( thread = = arch _current_thread ( ) & & ! arch_is_in_isr ( ) & & ! IS_ENABLED ( CONFIG_SMP ) ) {
if ( thread = = _current & & ! arch_is_in_isr ( ) & & ! IS_ENABLED ( CONFIG_SMP ) ) {
k_spinlock_key_t key = k_spin_lock ( & _sched_spinlock ) ;
z_mark_thread_as_suspended ( thread ) ;
@ -521,7 +521,7 @@ static void unready_thread(struct k_thread *thread)
@@ -521,7 +521,7 @@ static void unready_thread(struct k_thread *thread)
if ( z_is_thread_queued ( thread ) ) {
dequeue_thread ( thread ) ;
}
update_cache ( thread = = arch _current_thread ( ) ) ;
update_cache ( thread = = _current ) ;
}
/* _sched_spinlock must be held */
@ -558,7 +558,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
@@ -558,7 +558,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
void z_pend_thread ( struct k_thread * thread , _wait_q_t * wait_q ,
k_timeout_t timeout )
{
__ASSERT_NO_MSG ( thread = = arch _current_thread ( ) | | is_thread_dummy ( thread ) ) ;
__ASSERT_NO_MSG ( thread = = _current | | is_thread_dummy ( thread ) ) ;
K_SPINLOCK ( & _sched_spinlock ) {
pend_locked ( thread , wait_q , timeout ) ;
}
@ -616,7 +616,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
@@ -616,7 +616,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t * wait_q , k_timeout_t timeout )
{
# if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = arch _current_thread ( ) ;
pending_current = _current ;
# endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
__ASSERT_NO_MSG ( sizeof ( _sched_spinlock ) = = 0 | | lock ! = & _sched_spinlock ) ;
@ -629,7 +629,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
@@ -629,7 +629,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
* held .
*/
( void ) k_spin_lock ( & _sched_spinlock ) ;
pend_locked ( arch _current_thread ( ) , wait_q , timeout ) ;
pend_locked ( _current , wait_q , timeout ) ;
k_spin_release ( lock ) ;
return z_swap ( & _sched_spinlock , key ) ;
}
@ -727,7 +727,7 @@ static inline bool need_swap(void)
@@ -727,7 +727,7 @@ static inline bool need_swap(void)
/* Check if the next ready thread is the same as the current thread */
new_thread = _kernel . ready_q . cache ;
return new_thread ! = arch _current_thread ( ) ;
return new_thread ! = _current ;
# endif /* CONFIG_SMP */
}
@ -763,15 +763,15 @@ void k_sched_lock(void)
@@ -763,15 +763,15 @@ void k_sched_lock(void)
void k_sched_unlock ( void )
{
K_SPINLOCK ( & _sched_spinlock ) {
__ASSERT ( arch _current_thread ( ) - > base . sched_locked ! = 0U , " " ) ;
__ASSERT ( _current - > base . sched_locked ! = 0U , " " ) ;
__ASSERT ( ! arch_is_in_isr ( ) , " " ) ;
+ + arch _current_thread ( ) - > base . sched_locked ;
+ + _current - > base . sched_locked ;
update_cache ( 0 ) ;
}
LOG_DBG ( " scheduler unlocked (%p:%d) " ,
arch _current_thread ( ) , arch _current_thread ( ) - > base . sched_locked ) ;
_current , _current - > base . sched_locked ) ;
SYS_PORT_TRACING_FUNC ( k_thread , sched_unlock ) ;
@ -783,10 +783,10 @@ struct k_thread *z_swap_next_thread(void)
@@ -783,10 +783,10 @@ struct k_thread *z_swap_next_thread(void)
# ifdef CONFIG_SMP
struct k_thread * ret = next_up ( ) ;
if ( ret = = arch _current_thread ( ) ) {
if ( ret = = _current ) {
/* When not swapping, have to signal IPIs here. In
* the context switch case it must happen later , after
* arch _current_thread ( ) gets requeued .
* _current gets requeued .
*/
signal_pending_ipi ( ) ;
}
@ -827,7 +827,7 @@ static inline void set_current(struct k_thread *new_thread)
@@ -827,7 +827,7 @@ static inline void set_current(struct k_thread *new_thread)
* function .
*
* @ warning
* The arch _current_thread ( ) value may have changed after this call and not refer
* The _current value may have changed after this call and not refer
* to the interrupted thread anymore . It might be necessary to make a local
* copy before calling this function .
*
@ -843,7 +843,7 @@ void *z_get_next_switch_handle(void *interrupted)
@@ -843,7 +843,7 @@ void *z_get_next_switch_handle(void *interrupted)
void * ret = NULL ;
K_SPINLOCK ( & _sched_spinlock ) {
struct k_thread * old_thread = arch _current_thread ( ) , * new_thread ;
struct k_thread * old_thread = _current , * new_thread ;
if ( IS_ENABLED ( CONFIG_SMP ) ) {
old_thread - > switch_handle = NULL ;
@ -869,7 +869,7 @@ void *z_get_next_switch_handle(void *interrupted)
@@ -869,7 +869,7 @@ void *z_get_next_switch_handle(void *interrupted)
# endif /* CONFIG_TIMESLICING */
# ifdef CONFIG_SPIN_VALIDATE
/* Changed arch _current_thread() ! Update the spinlock
/* Changed _current! Update the spinlock
* bookkeeping so the validation doesn ' t get
* confused when the " wrong " thread tries to
* release the lock .
@ -904,9 +904,9 @@ void *z_get_next_switch_handle(void *interrupted)
@@ -904,9 +904,9 @@ void *z_get_next_switch_handle(void *interrupted)
return ret ;
# else
z_sched_usage_switch ( _kernel . ready_q . cache ) ;
arch _current_thread ( ) - > switch_handle = interrupted ;
_current - > switch_handle = interrupted ;
set_current ( _kernel . ready_q . cache ) ;
return arch _current_thread ( ) - > switch_handle ;
return _current - > switch_handle ;
# endif /* CONFIG_SMP */
}
# endif /* CONFIG_USE_SWITCH */
@ -952,7 +952,7 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
@@ -952,7 +952,7 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
bool need_sched = z_thread_prio_set ( ( struct k_thread * ) thread , prio ) ;
if ( ( need_sched ) & & ( IS_ENABLED ( CONFIG_SMP ) | |
( arch _current_thread ( ) - > base . sched_locked = = 0U ) ) ) {
( _current - > base . sched_locked = = 0U ) ) ) {
z_reschedule_unlocked ( ) ;
}
}
@ -1036,7 +1036,7 @@ static inline void z_vrfy_k_reschedule(void)
@@ -1036,7 +1036,7 @@ static inline void z_vrfy_k_reschedule(void)
bool k_can_yield ( void )
{
return ! ( k_is_pre_kernel ( ) | | k_is_in_isr ( ) | |
z_is_idle_thread_object ( arch _current_thread ( ) ) ) ;
z_is_idle_thread_object ( _current ) ) ;
}
void z_impl_k_yield ( void )
@ -1048,7 +1048,7 @@ void z_impl_k_yield(void)
@@ -1048,7 +1048,7 @@ void z_impl_k_yield(void)
k_spinlock_key_t key = k_spin_lock ( & _sched_spinlock ) ;
# ifdef CONFIG_SMP
z_mark_thread_as_queued ( arch _current_thread ( ) ) ;
z_mark_thread_as_queued ( _current ) ;
# endif
runq_yield ( ) ;
@ -1070,7 +1070,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
@@ -1070,7 +1070,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
__ASSERT ( ! arch_is_in_isr ( ) , " " ) ;
LOG_DBG ( " thread %p for %lu ticks " , arch _current_thread ( ) , ( unsigned long ) ticks ) ;
LOG_DBG ( " thread %p for %lu ticks " , _current , ( unsigned long ) ticks ) ;
/* wait of 0 ms is treated as a 'yield' */
if ( ticks = = 0 ) {
@ -1088,11 +1088,11 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
@@ -1088,11 +1088,11 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
k_spinlock_key_t key = k_spin_lock ( & _sched_spinlock ) ;
# if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = arch _current_thread ( ) ;
pending_current = _current ;
# endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
unready_thread ( arch _current_thread ( ) ) ;
z_add_thread_timeout ( arch _current_thread ( ) , timeout ) ;
z_mark_thread_as_sleeping ( arch _current_thread ( ) ) ;
unready_thread ( _current ) ;
z_add_thread_timeout ( _current , timeout ) ;
z_mark_thread_as_sleeping ( _current ) ;
( void ) z_swap ( & _sched_spinlock , key ) ;
@ -1195,7 +1195,7 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread)
@@ -1195,7 +1195,7 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread)
k_tid_t z_impl_k_sched_current_thread_query ( void )
{
return arch _current_thread ( ) ;
return _current ;
}
# ifdef CONFIG_USERSPACE
@ -1250,13 +1250,13 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
@@ -1250,13 +1250,13 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
z_abort_thread_timeout ( thread ) ;
unpend_all ( & thread - > join_queue ) ;
/* Edge case: aborting arch _current_thread() from within an
/* Edge case: aborting _current from within an
* ISR that preempted it requires clearing the
* arch _current_thread ( ) pointer so the upcoming context
* _current pointer so the upcoming context
* switch doesn ' t clobber the now - freed
* memory
*/
if ( thread = = arch _current_thread ( ) & & arch_is_in_isr ( ) ) {
if ( thread = = _current & & arch_is_in_isr ( ) ) {
dummify = true ;
}
}
@ -1299,10 +1299,10 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
@@ -1299,10 +1299,10 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
k_thread_abort_cleanup ( thread ) ;
# endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
/* Do this "set arch _current_thread() to dummy" step last so that
* subsystems above can rely on arch _current_thread ( ) being
/* Do this "set _current to dummy" step last so that
* subsystems above can rely on _current being
* unchanged . Disabled for posix as that arch
* continues to use the arch _current_thread ( ) pointer in its swap
* continues to use the _current pointer in its swap
* code . Note that we must leave a non - null switch
* handle for any threads spinning in join ( ) ( this can
* never be used , as our thread is flagged dead , but
@ -1310,7 +1310,7 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
@@ -1310,7 +1310,7 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
*/
if ( dummify & & ! IS_ENABLED ( CONFIG_ARCH_POSIX ) ) {
# ifdef CONFIG_USE_SWITCH
arch _current_thread ( ) - > switch_handle = arch _current_thread ( ) ;
_current - > switch_handle = _current ;
# endif
z_dummy_thread_init ( & _thread_dummy ) ;
@ -1368,13 +1368,13 @@ int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
@@ -1368,13 +1368,13 @@ int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
ret = 0 ;
} else if ( K_TIMEOUT_EQ ( timeout , K_NO_WAIT ) ) {
ret = - EBUSY ;
} else if ( ( thread = = arch _current_thread ( ) ) | |
( thread - > base . pended_on = = & arch _current_thread ( ) - > join_queue ) ) {
} else if ( ( thread = = _current ) | |
( thread - > base . pended_on = = & _current - > join_queue ) ) {
ret = - EDEADLK ;
} else {
__ASSERT ( ! arch_is_in_isr ( ) , " cannot join in ISR " ) ;
add_to_waitq_locked ( arch _current_thread ( ) , & thread - > join_queue ) ;
add_thread_timeout ( arch _current_thread ( ) , timeout ) ;
add_to_waitq_locked ( _current , & thread - > join_queue ) ;
add_thread_timeout ( _current , timeout ) ;
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING ( k_thread , join , thread , timeout ) ;
ret = z_swap ( & _sched_spinlock , key ) ;
@ -1473,7 +1473,7 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
@@ -1473,7 +1473,7 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
int ret = z_pend_curr ( lock , key , wait_q , timeout ) ;
if ( data ! = NULL ) {
* data = arch _current_thread ( ) - > base . swap_data ;
* data = _current - > base . swap_data ;
}
return ret ;
}