|
|
|
@ -34,110 +34,77 @@ void z_thread_monitor_exit(struct k_thread *thread)
@@ -34,110 +34,77 @@ void z_thread_monitor_exit(struct k_thread *thread)
|
|
|
|
|
k_spin_unlock(&z_thread_monitor_lock, key); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data) |
|
|
|
|
/*
|
|
|
|
|
* Helper function to iterate over threads with optional filtering and locking behavior. |
|
|
|
|
*/ |
|
|
|
|
static void thread_foreach_helper(k_thread_user_cb_t user_cb, void *user_data, |
|
|
|
|
bool unlocked, bool filter_by_cpu, unsigned int cpu) |
|
|
|
|
{ |
|
|
|
|
struct k_thread *thread; |
|
|
|
|
k_spinlock_key_t key; |
|
|
|
|
|
|
|
|
|
__ASSERT(user_cb != NULL, "user_cb can not be NULL"); |
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Lock is needed to make sure that the _kernel.threads is not being |
|
|
|
|
* modified by the user_cb either directly or indirectly. |
|
|
|
|
* The indirect ways are through calling k_thread_create and |
|
|
|
|
* k_thread_abort from user_cb. |
|
|
|
|
*/ |
|
|
|
|
key = k_spin_lock(&z_thread_monitor_lock); |
|
|
|
|
if (filter_by_cpu) { |
|
|
|
|
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds"); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach); |
|
|
|
|
key = k_spin_lock(&z_thread_monitor_lock); |
|
|
|
|
|
|
|
|
|
for (thread = _kernel.threads; thread; thread = thread->next_thread) { |
|
|
|
|
user_cb(thread, user_data); |
|
|
|
|
} |
|
|
|
|
/* cpu is only defined when SMP=y*/ |
|
|
|
|
#ifdef CONFIG_SMP |
|
|
|
|
bool on_cpu = (thread->base.cpu == cpu); |
|
|
|
|
#else |
|
|
|
|
bool on_cpu = false; |
|
|
|
|
#endif |
|
|
|
|
if (filter_by_cpu && !on_cpu) { |
|
|
|
|
continue; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach); |
|
|
|
|
if (unlocked) { |
|
|
|
|
k_spin_unlock(&z_thread_monitor_lock, key); |
|
|
|
|
user_cb(thread, user_data); |
|
|
|
|
key = k_spin_lock(&z_thread_monitor_lock); |
|
|
|
|
} else { |
|
|
|
|
user_cb(thread, user_data); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
k_spin_unlock(&z_thread_monitor_lock, key); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data) |
|
|
|
|
/*
|
|
|
|
|
* Public API functions using the helper. |
|
|
|
|
*/ |
|
|
|
|
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data) |
|
|
|
|
{ |
|
|
|
|
struct k_thread *thread; |
|
|
|
|
k_spinlock_key_t key; |
|
|
|
|
|
|
|
|
|
__ASSERT(user_cb != NULL, "user_cb can not be NULL"); |
|
|
|
|
|
|
|
|
|
key = k_spin_lock(&z_thread_monitor_lock); |
|
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach); |
|
|
|
|
thread_foreach_helper(user_cb, user_data, false, false, 0); |
|
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data) |
|
|
|
|
{ |
|
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked); |
|
|
|
|
|
|
|
|
|
for (thread = _kernel.threads; thread; thread = thread->next_thread) { |
|
|
|
|
k_spin_unlock(&z_thread_monitor_lock, key); |
|
|
|
|
user_cb(thread, user_data); |
|
|
|
|
key = k_spin_lock(&z_thread_monitor_lock); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
thread_foreach_helper(user_cb, user_data, true, false, 0); |
|
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked); |
|
|
|
|
|
|
|
|
|
k_spin_unlock(&z_thread_monitor_lock, key); |
|
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP |
|
|
|
|
void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, |
|
|
|
|
void *user_data) |
|
|
|
|
void *user_data) |
|
|
|
|
{ |
|
|
|
|
struct k_thread *thread; |
|
|
|
|
k_spinlock_key_t key; |
|
|
|
|
|
|
|
|
|
__ASSERT(user_cb != NULL, "user_cb can not be NULL"); |
|
|
|
|
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds"); |
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Lock is needed to make sure that the _kernel.threads is not being |
|
|
|
|
* modified by the user_cb either directly or indirectly. |
|
|
|
|
* The indirect ways are through calling k_thread_create and |
|
|
|
|
* k_thread_abort from user_cb. |
|
|
|
|
*/ |
|
|
|
|
key = k_spin_lock(&z_thread_monitor_lock); |
|
|
|
|
|
|
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach); |
|
|
|
|
|
|
|
|
|
for (thread = _kernel.threads; thread; thread = thread->next_thread) { |
|
|
|
|
if (thread->base.cpu == cpu) { |
|
|
|
|
user_cb(thread, user_data); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
thread_foreach_helper(user_cb, user_data, false, true, cpu); |
|
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach); |
|
|
|
|
|
|
|
|
|
k_spin_unlock(&z_thread_monitor_lock, key); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, |
|
|
|
|
void *user_data) |
|
|
|
|
void *user_data) |
|
|
|
|
{ |
|
|
|
|
struct k_thread *thread; |
|
|
|
|
k_spinlock_key_t key; |
|
|
|
|
|
|
|
|
|
__ASSERT(user_cb != NULL, "user_cb can not be NULL"); |
|
|
|
|
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds"); |
|
|
|
|
|
|
|
|
|
key = k_spin_lock(&z_thread_monitor_lock); |
|
|
|
|
|
|
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked); |
|
|
|
|
|
|
|
|
|
for (thread = _kernel.threads; thread; thread = thread->next_thread) { |
|
|
|
|
if (thread->base.cpu == cpu) { |
|
|
|
|
k_spin_unlock(&z_thread_monitor_lock, key); |
|
|
|
|
user_cb(thread, user_data); |
|
|
|
|
key = k_spin_lock(&z_thread_monitor_lock); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
thread_foreach_helper(user_cb, user_data, true, true, cpu); |
|
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked); |
|
|
|
|
|
|
|
|
|
k_spin_unlock(&z_thread_monitor_lock, key); |
|
|
|
|
} |
|
|
|
|
#endif /* CONFIG_SMP */ |
|
|
|
|