@ -50,7 +50,7 @@ static inline int is_metairq(struct k_thread *thread)
@@ -50,7 +50,7 @@ static inline int is_metairq(struct k_thread *thread)
# else
ARG_UNUSED ( thread ) ;
return 0 ;
# endif
# endif /* CONFIG_NUM_METAIRQ_PRIORITIES */
}
# if CONFIG_ASSERT
@ -58,7 +58,7 @@ static inline bool is_thread_dummy(struct k_thread *thread)
@@ -58,7 +58,7 @@ static inline bool is_thread_dummy(struct k_thread *thread)
{
return ( thread - > base . thread_state & _THREAD_DUMMY ) ! = 0U ;
}
# endif
# endif /* CONFIG_ASSERT */
/*
* Return value same as e . g . memcmp
@ -98,7 +98,7 @@ int32_t z_sched_prio_cmp(struct k_thread *thread_1,
@@ -98,7 +98,7 @@ int32_t z_sched_prio_cmp(struct k_thread *thread_1,
*/
return ( int32_t ) ( d2 - d1 ) ;
}
# endif
# endif /* CONFIG_SCHED_DEADLINE */
return 0 ;
}
@ -154,7 +154,7 @@ static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
@@ -154,7 +154,7 @@ static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
}
return NULL ;
}
# endif
# endif /* CONFIG_SCHED_CPU_MASK */
# if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB)
static ALWAYS_INLINE void z_priq_dumb_add ( sys_dlist_t * pq ,
@ -174,7 +174,7 @@ static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
@@ -174,7 +174,7 @@ static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
sys_dlist_append ( pq , & thread - > base . qnode_dlist ) ;
}
# endif
# endif /* CONFIG_SCHED_DUMB || CONFIG_WAITQ_DUMB */
static ALWAYS_INLINE void * thread_runq ( struct k_thread * thread )
{
@ -193,7 +193,7 @@ static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
@@ -193,7 +193,7 @@ static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
# else
ARG_UNUSED ( thread ) ;
return & _kernel . ready_q . runq ;
# endif
# endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
}
static ALWAYS_INLINE void * curr_cpu_runq ( void )
@ -202,7 +202,7 @@ static ALWAYS_INLINE void *curr_cpu_runq(void)
@@ -202,7 +202,7 @@ static ALWAYS_INLINE void *curr_cpu_runq(void)
return & arch_curr_cpu ( ) - > ready_q . runq ;
# else
return & _kernel . ready_q . runq ;
# endif
# endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
}
static ALWAYS_INLINE void runq_add ( struct k_thread * thread )
@ -239,7 +239,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
@@ -239,7 +239,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
/* add current to end of queue means "yield" */
_current_cpu - > swap_ok = true ;
}
# endif
# endif /* CONFIG_SMP */
}
static ALWAYS_INLINE void dequeue_thread ( struct k_thread * thread )
@ -266,7 +266,7 @@ static void signal_pending_ipi(void)
@@ -266,7 +266,7 @@ static void signal_pending_ipi(void)
arch_sched_ipi ( ) ;
}
}
# endif
# endif /* CONFIG_SMP && CONFIG_SCHED_IPI_SUPPORTED */
}
# ifdef CONFIG_SMP
@ -296,7 +296,7 @@ static inline bool is_halting(struct k_thread *thread)
@@ -296,7 +296,7 @@ static inline bool is_halting(struct k_thread *thread)
return ( thread - > base . thread_state &
( _THREAD_ABORTING | _THREAD_SUSPENDING ) ) ! = 0U ;
}
# endif
# endif /* CONFIG_SMP */
/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
static inline void clear_halting ( struct k_thread * thread )
@ -311,7 +311,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
@@ -311,7 +311,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
halt_thread ( _current , is_aborting ( _current ) ?
_THREAD_DEAD : _THREAD_SUSPENDED ) ;
}
# endif
# endif /* CONFIG_SMP */
struct k_thread * thread = runq_best ( ) ;
@ -332,6 +332,9 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
@@ -332,6 +332,9 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
}
}
# endif
/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
* CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
*/
# ifndef CONFIG_SMP
/* In uniprocessor mode, we can leave the current thread in
@ -386,7 +389,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
@@ -386,7 +389,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
_current_cpu - > swap_ok = false ;
return thread ;
# endif
# endif /* CONFIG_SMP */
}
static void move_thread_to_end_of_prio_q ( struct k_thread * thread )
@ -404,7 +407,7 @@ static void flag_ipi(void)
@@ -404,7 +407,7 @@ static void flag_ipi(void)
if ( arch_num_cpus ( ) > 1 ) {
_kernel . pending_ipi = true ;
}
# endif
# endif /* CONFIG_SMP && CONFIG_SCHED_IPI_SUPPORTED */
}
# ifdef CONFIG_TIMESLICING
@ -421,7 +424,7 @@ static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
@@ -421,7 +424,7 @@ static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
* a noop condition in z_time_slice ( ) .
*/
static struct k_thread * pending_current ;
# endif
# endif /* CONFIG_SWAP_NONATOMIC */
static inline int slice_time ( struct k_thread * thread )
{
@ -433,7 +436,7 @@ static inline int slice_time(struct k_thread *thread)
@@ -433,7 +436,7 @@ static inline int slice_time(struct k_thread *thread)
}
# else
ARG_UNUSED ( thread ) ;
# endif
# endif /* CONFIG_TIMESLICE_PER_THREAD */
return ret ;
}
@ -447,7 +450,7 @@ static inline bool sliceable(struct k_thread *thread)
@@ -447,7 +450,7 @@ static inline bool sliceable(struct k_thread *thread)
# ifdef CONFIG_TIMESLICE_PER_THREAD
ret | = thread - > base . slice_ticks ! = 0 ;
# endif
# endif /* CONFIG_TIMESLICE_PER_THREAD */
return ret ;
}
@ -498,7 +501,7 @@ void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks
@@ -498,7 +501,7 @@ void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks
thread - > base . slice_data = data ;
}
}
# endif
# endif /* CONFIG_TIMESLICE_PER_THREAD */
/* Called out of each timer interrupt */
void z_time_slice ( void )
@ -513,7 +516,7 @@ void z_time_slice(void)
@@ -513,7 +516,7 @@ void z_time_slice(void)
return ;
}
pending_current = NULL ;
# endif
# endif /* CONFIG_SWAP_NONATOMIC */
if ( slice_expired [ _current_cpu - > id ] & & sliceable ( curr ) ) {
# ifdef CONFIG_TIMESLICE_PER_THREAD
@ -522,7 +525,7 @@ void z_time_slice(void)
@@ -522,7 +525,7 @@ void z_time_slice(void)
curr - > base . slice_expired ( curr , curr - > base . slice_data ) ;
key = k_spin_lock ( & _sched_spinlock ) ;
}
# endif
# endif /* CONFIG_TIMESLICE_PER_THREAD */
if ( ! z_is_thread_prevented_from_running ( curr ) ) {
move_thread_to_end_of_prio_q ( curr ) ;
}
@ -530,7 +533,7 @@ void z_time_slice(void)
@@ -530,7 +533,7 @@ void z_time_slice(void)
}
k_spin_unlock ( & _sched_spinlock , key ) ;
}
# endif
# endif /* CONFIG_TIMESLICING */
/* Track cooperative threads preempted by metairqs so we can return to
* them specifically . Called at the moment a new thread has been
@ -551,6 +554,9 @@ static void update_metairq_preempt(struct k_thread *thread)
@@ -551,6 +554,9 @@ static void update_metairq_preempt(struct k_thread *thread)
# else
ARG_UNUSED ( thread ) ;
# endif
/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
* CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
*/
}
static void update_cache ( int preempt_ok )
@ -563,7 +569,7 @@ static void update_cache(int preempt_ok)
@@ -563,7 +569,7 @@ static void update_cache(int preempt_ok)
if ( thread ! = _current ) {
z_reset_time_slice ( thread ) ;
}
# endif
# endif /* CONFIG_TIMESLICING */
update_metairq_preempt ( thread ) ;
_kernel . ready_q . cache = thread ;
} else {
@ -578,7 +584,7 @@ static void update_cache(int preempt_ok)
@@ -578,7 +584,7 @@ static void update_cache(int preempt_ok)
* reason the scheduler will make the same decision anyway .
*/
_current_cpu - > swap_ok = preempt_ok ;
# endif
# endif /* CONFIG_SMP */
}
static bool thread_active_elsewhere ( struct k_thread * thread )
@ -598,7 +604,7 @@ static bool thread_active_elsewhere(struct k_thread *thread)
@@ -598,7 +604,7 @@ static bool thread_active_elsewhere(struct k_thread *thread)
return true ;
}
}
# endif
# endif /* CONFIG_SMP */
ARG_UNUSED ( thread ) ;
return false ;
}
@ -607,7 +613,7 @@ static void ready_thread(struct k_thread *thread)
@@ -607,7 +613,7 @@ static void ready_thread(struct k_thread *thread)
{
# ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG ( arch_mem_coherent ( thread ) ) ;
# endif
# endif /* CONFIG_KERNEL_COHERENCE */
/* If thread is queued already, do not try and added it to the
* run queue again
@ -693,7 +699,7 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
@@ -693,7 +699,7 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
*/
# ifdef CONFIG_SCHED_IPI_SUPPORTED
arch_sched_ipi ( ) ;
# endif
# endif /* CONFIG_SCHED_IPI_SUPPORTED */
}
if ( is_halting ( thread ) & & ( thread ! = _current ) ) {
@ -719,7 +725,7 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
@@ -719,7 +725,7 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
}
return ; /* lock has been released */
}
# endif
# endif /* CONFIG_SMP */
halt_thread ( thread , terminate ? _THREAD_DEAD : _THREAD_SUSPENDED ) ;
if ( ( thread = = _current ) & & ! arch_is_in_isr ( ) ) {
z_swap ( & _sched_spinlock , key ) ;
@ -757,7 +763,7 @@ static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
@@ -757,7 +763,7 @@ static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
z_impl_k_thread_suspend ( thread ) ;
}
# include <syscalls/k_thread_suspend_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
void z_impl_k_thread_resume ( struct k_thread * thread )
{
@ -786,7 +792,7 @@ static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
@@ -786,7 +792,7 @@ static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
z_impl_k_thread_resume ( thread ) ;
}
# include <syscalls/k_thread_resume_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
static _wait_q_t * pended_on_thread ( struct k_thread * thread )
{
@ -829,7 +835,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
@@ -829,7 +835,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
{
# ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG ( wait_q = = NULL | | arch_mem_coherent ( wait_q ) ) ;
# endif
# endif /* CONFIG_KERNEL_COHERENCE */
add_to_waitq_locked ( thread , wait_q ) ;
add_thread_timeout ( thread , timeout ) ;
}
@ -873,7 +879,7 @@ void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
@@ -873,7 +879,7 @@ void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
if ( do_nothing ) {
continue ;
}
# endif
# endif /* CONFIG_EVENTS */
if ( ! killed ) {
/* The thread is not being killed */
@ -899,14 +905,14 @@ void z_thread_timeout(struct _timeout *timeout)
@@ -899,14 +905,14 @@ void z_thread_timeout(struct _timeout *timeout)
z_sched_wake_thread ( thread , true ) ;
}
# endif
# endif /* CONFIG_SYS_CLOCK_EXISTS */
int z_pend_curr ( struct k_spinlock * lock , k_spinlock_key_t key ,
_wait_q_t * wait_q , k_timeout_t timeout )
{
# if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current ;
# endif
# endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
__ASSERT_NO_MSG ( sizeof ( _sched_spinlock ) = = 0 | | lock ! = & _sched_spinlock ) ;
/* We do a "lock swap" prior to calling z_swap(), such that
@ -994,7 +1000,7 @@ static inline bool resched(uint32_t key)
@@ -994,7 +1000,7 @@ static inline bool resched(uint32_t key)
{
# ifdef CONFIG_SMP
_current_cpu - > swap_ok = 0 ;
# endif
# endif /* CONFIG_SMP */
return arch_irq_unlocked ( key ) & & ! arch_is_in_isr ( ) ;
}
@ -1014,7 +1020,7 @@ static inline bool need_swap(void)
@@ -1014,7 +1020,7 @@ static inline bool need_swap(void)
/* Check if the next ready thread is the same as the current thread */
new_thread = _kernel . ready_q . cache ;
return new_thread ! = _current ;
# endif
# endif /* CONFIG_SMP */
}
void z_reschedule ( struct k_spinlock * lock , k_spinlock_key_t key )
@ -1079,7 +1085,7 @@ struct k_thread *z_swap_next_thread(void)
@@ -1079,7 +1085,7 @@ struct k_thread *z_swap_next_thread(void)
return ret ;
# else
return _kernel . ready_q . cache ;
# endif
# endif /* CONFIG_SMP */
}
# ifdef CONFIG_USE_SWITCH
@ -1148,7 +1154,7 @@ void *z_get_next_switch_handle(void *interrupted)
@@ -1148,7 +1154,7 @@ void *z_get_next_switch_handle(void *interrupted)
# ifdef CONFIG_TIMESLICING
z_reset_time_slice ( new_thread ) ;
# endif
# endif /* CONFIG_TIMESLICING */
# ifdef CONFIG_SPIN_VALIDATE
/* Changed _current! Update the spinlock
@ -1157,7 +1163,7 @@ void *z_get_next_switch_handle(void *interrupted)
@@ -1157,7 +1163,7 @@ void *z_get_next_switch_handle(void *interrupted)
* release the lock .
*/
z_spin_lock_set_owner ( & _sched_spinlock ) ;
# endif
# endif /* CONFIG_SPIN_VALIDATE */
/* A queued (runnable) old/current thread
* needs to be added back to the run queue
@ -1183,9 +1189,9 @@ void *z_get_next_switch_handle(void *interrupted)
@@ -1183,9 +1189,9 @@ void *z_get_next_switch_handle(void *interrupted)
_current - > switch_handle = interrupted ;
set_current ( _kernel . ready_q . cache ) ;
return _current - > switch_handle ;
# endif
# endif /* CONFIG_SMP */
}
# endif
# endif /* CONFIG_USE_SWITCH */
int z_unpend_all ( _wait_q_t * wait_q )
{
@ -1226,7 +1232,7 @@ void z_sched_init(void)
@@ -1226,7 +1232,7 @@ void z_sched_init(void)
}
# else
init_ready_q ( & _kernel . ready_q ) ;
# endif
# endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
}
int z_impl_k_thread_priority_get ( k_tid_t thread )
@ -1241,7 +1247,7 @@ static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
@@ -1241,7 +1247,7 @@ static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
return z_impl_k_thread_priority_get ( thread ) ;
}
# include <syscalls/k_thread_priority_get_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
void z_impl_k_thread_priority_set ( k_tid_t thread , int prio )
{
@ -1270,11 +1276,11 @@ static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
@@ -1270,11 +1276,11 @@ static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
K_OOPS ( K_SYSCALL_VERIFY_MSG ( ( int8_t ) prio > = thread - > base . prio ,
" thread priority may only be downgraded (%d < %d) " ,
prio , thread - > base . prio ) ) ;
# endif
# endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
z_impl_k_thread_priority_set ( thread , prio ) ;
}
# include <syscalls/k_thread_priority_set_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
# ifdef CONFIG_SCHED_DEADLINE
void z_impl_k_thread_deadline_set ( k_tid_t tid , int deadline )
@ -1312,8 +1318,8 @@ static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
@@ -1312,8 +1318,8 @@ static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
z_impl_k_thread_deadline_set ( ( k_tid_t ) thread , deadline ) ;
}
# include <syscalls/k_thread_deadline_set_mrsh.c>
# endif
# endif
# endif /* CONFIG_USERSPACE */
# endif /* CONFIG_SCHED_DEADLINE */
bool k_can_yield ( void )
{
@ -1344,7 +1350,7 @@ static inline void z_vrfy_k_yield(void)
@@ -1344,7 +1350,7 @@ static inline void z_vrfy_k_yield(void)
z_impl_k_yield ( ) ;
}
# include <syscalls/k_yield_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
static int32_t z_tick_sleep ( k_ticks_t ticks )
{
@ -1360,7 +1366,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
@@ -1360,7 +1366,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
k_yield ( ) ;
return 0 ;
}
# endif
# endif /* CONFIG_MULTITHREADING */
if ( Z_TICK_ABS ( ticks ) < = 0 ) {
expected_wakeup_ticks = ticks + sys_clock_tick_get_32 ( ) ;
@ -1374,7 +1380,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
@@ -1374,7 +1380,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
# if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current ;
# endif
# endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
unready_thread ( _current ) ;
z_add_thread_timeout ( _current , timeout ) ;
z_mark_thread_as_suspended ( _current ) ;
@ -1390,7 +1396,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
@@ -1390,7 +1396,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
# else
/* busy wait to be time coherent since subsystems may depend on it */
z_impl_k_busy_wait ( k_ticks_to_us_ceil32 ( expected_wakeup_ticks ) ) ;
# endif
# endif /* CONFIG_MULTITHREADING */
return 0 ;
}
@ -1410,7 +1416,7 @@ int32_t z_impl_k_sleep(k_timeout_t timeout)
@@ -1410,7 +1416,7 @@ int32_t z_impl_k_sleep(k_timeout_t timeout)
# else
/* In Single Thread, just wait for an interrupt saving power */
k_cpu_idle ( ) ;
# endif
# endif /* CONFIG_MULTITHREADING */
SYS_PORT_TRACING_FUNC_EXIT ( k_thread , sleep , timeout , ( int32_t ) K_TICKS_FOREVER ) ;
return ( int32_t ) K_TICKS_FOREVER ;
@ -1433,7 +1439,7 @@ static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
@@ -1433,7 +1439,7 @@ static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
return z_impl_k_sleep ( timeout ) ;
}
# include <syscalls/k_sleep_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
int32_t z_impl_k_usleep ( int us )
{
@ -1457,7 +1463,7 @@ static inline int32_t z_vrfy_k_usleep(int us)
@@ -1457,7 +1463,7 @@ static inline int32_t z_vrfy_k_usleep(int us)
return z_impl_k_usleep ( us ) ;
}
# include <syscalls/k_usleep_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
void z_impl_k_wakeup ( k_tid_t thread )
{
@ -1491,7 +1497,7 @@ void z_impl_k_wakeup(k_tid_t thread)
@@ -1491,7 +1497,7 @@ void z_impl_k_wakeup(k_tid_t thread)
# ifdef CONFIG_TRACE_SCHED_IPI
extern void z_trace_sched_ipi ( void ) ;
# endif
# endif /* CONFIG_TRACE_SCHED_IPI */
# ifdef CONFIG_SMP
void z_sched_ipi ( void )
@ -1501,15 +1507,15 @@ void z_sched_ipi(void)
@@ -1501,15 +1507,15 @@ void z_sched_ipi(void)
*/
# ifdef CONFIG_TRACE_SCHED_IPI
z_trace_sched_ipi ( ) ;
# endif
# endif /* CONFIG_TRACE_SCHED_IPI */
# ifdef CONFIG_TIMESLICING
if ( sliceable ( _current ) ) {
z_time_slice ( ) ;
}
# endif
# endif /* CONFIG_TIMESLICING */
}
# endif
# endif /* CONFIG_SMP */
# ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_wakeup ( k_tid_t thread )
@ -1518,7 +1524,7 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread)
@@ -1518,7 +1524,7 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread)
z_impl_k_wakeup ( thread ) ;
}
# include <syscalls/k_wakeup_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
k_tid_t z_impl_k_sched_current_thread_query ( void )
{
@ -1528,13 +1534,13 @@ k_tid_t z_impl_k_sched_current_thread_query(void)
@@ -1528,13 +1534,13 @@ k_tid_t z_impl_k_sched_current_thread_query(void)
* local interrupts when reading it .
*/
unsigned int k = arch_irq_lock ( ) ;
# endif
# endif /* CONFIG_SMP */
k_tid_t ret = _current_cpu - > current ;
# ifdef CONFIG_SMP
arch_irq_unlock ( k ) ;
# endif
# endif /* CONFIG_SMP */
return ret ;
}
@ -1544,7 +1550,7 @@ static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
@@ -1544,7 +1550,7 @@ static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
return z_impl_k_sched_current_thread_query ( ) ;
}
# include <syscalls/k_sched_current_thread_query_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
int z_impl_k_is_preempt_thread ( void )
{
@ -1557,7 +1563,7 @@ static inline int z_vrfy_k_is_preempt_thread(void)
@@ -1557,7 +1563,7 @@ static inline int z_vrfy_k_is_preempt_thread(void)
return z_impl_k_is_preempt_thread ( ) ;
}
# include <syscalls/k_is_preempt_thread_mrsh.c>
# endif
# endif /* CONFIG_USERSPACE */
static inline void unpend_all ( _wait_q_t * wait_q )
{
@ -1573,7 +1579,7 @@ static inline void unpend_all(_wait_q_t *wait_q)
@@ -1573,7 +1579,7 @@ static inline void unpend_all(_wait_q_t *wait_q)
# ifdef CONFIG_THREAD_ABORT_HOOK
extern void thread_abort_hook ( struct k_thread * thread ) ;
# endif
# endif /* CONFIG_THREAD_ABORT_HOOK */
/**
* @ brief Dequeues the specified thread
@ -1604,7 +1610,7 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
@@ -1604,7 +1610,7 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
}
# ifdef CONFIG_SMP
unpend_all ( & thread - > halt_queue ) ;
# endif
# endif /* CONFIG_SMP */
update_cache ( 1 ) ;
if ( new_state = = _THREAD_SUSPENDED ) {
@ -1613,28 +1619,28 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
@@ -1613,28 +1619,28 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
# if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable ( thread ) ;
# endif
# endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
SYS_PORT_TRACING_FUNC ( k_thread , sched_abort , thread ) ;
z_thread_monitor_exit ( thread ) ;
# ifdef CONFIG_THREAD_ABORT_HOOK
thread_abort_hook ( thread ) ;
# endif
# endif /* CONFIG_THREAD_ABORT_HOOK */
# ifdef CONFIG_OBJ_CORE_THREAD
# ifdef CONFIG_OBJ_CORE_STATS_THREAD
k_obj_core_stats_deregister ( K_OBJ_CORE ( thread ) ) ;
# endif
# endif /* CONFIG_OBJ_CORE_STATS_THREAD */
k_obj_core_unlink ( K_OBJ_CORE ( thread ) ) ;
# endif
# endif /* CONFIG_OBJ_CORE_THREAD */
# ifdef CONFIG_USERSPACE
z_mem_domain_exit_thread ( thread ) ;
k_thread_perms_all_clear ( thread ) ;
k_object_uninit ( thread - > stack_obj ) ;
k_object_uninit ( thread ) ;
# endif
# endif /* CONFIG_USERSPACE */
}
}
@ -1666,7 +1672,7 @@ void z_impl_k_thread_abort(struct k_thread *thread)
@@ -1666,7 +1672,7 @@ void z_impl_k_thread_abort(struct k_thread *thread)
SYS_PORT_TRACING_OBJ_FUNC_EXIT ( k_thread , abort , thread ) ;
}
# endif
# endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
int z_impl_k_thread_join ( struct k_thread * thread , k_timeout_t timeout )
{
@ -1723,7 +1729,7 @@ static bool thread_obj_validate(struct k_thread *thread)
@@ -1723,7 +1729,7 @@ static bool thread_obj_validate(struct k_thread *thread)
default :
# ifdef CONFIG_LOG
k_object_dump_error ( ret , thread , ko , K_OBJ_THREAD ) ;
# endif
# endif /* CONFIG_LOG */
K_OOPS ( K_SYSCALL_VERIFY_MSG ( ret , " access denied " ) ) ;
}
CODE_UNREACHABLE ; /* LCOV_EXCL_LINE */