Browse Source

kernel: Fix sloppy wait queue API

There were multiple spots where code was using the _wait_q_t
abstraction as a synonym for a dlist and doing direct list management
on them with the dlist APIs.  Refactor _wait_q_t into a proper opaque
struct (not a typedef for sys_dlist_t) and write a simple wrapper API
for the existing usages.  Now replacement of wait_q with a different
data structure is much cleaner.

Note that there were some SYS_DLIST_FOR_EACH_SAFE loops in mailbox.c
that got replaced by the normal/non-safe macro.  While these loops do
mutate the list in the code body, they always do an early return in
those circumstances instead of returning into the macro'd for() loop,
so the _SAFE usage was needless.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
pull/7639/merge
Andy Ross 7 years ago committed by Anas Nashif
parent
commit
ccf3bf7ed3
  1. 28
      include/kernel.h
  2. 9
      include/posix/pthread.h
  3. 2
      kernel/include/ksched.h
  4. 13
      kernel/include/wait_q.h
  5. 15
      kernel/mailbox.c
  6. 2
      kernel/mem_slab.c
  7. 4
      kernel/mempool.c
  8. 4
      kernel/msg_q.c
  9. 5
      kernel/mutex.c
  10. 13
      kernel/pipes.c
  11. 2
      kernel/queue.c
  12. 11
      kernel/sched.c
  13. 2
      kernel/sem.c
  14. 4
      kernel/stack.c
  15. 4
      kernel/timer.c
  16. 2
      lib/posix/pthread_barrier.c
  17. 2
      lib/posix/pthread_cond.c
  18. 5
      subsys/logging/event_logger.c

28
include/kernel.h

@ -91,7 +91,11 @@ extern "C" { @@ -91,7 +91,11 @@ extern "C" {
#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
typedef sys_dlist_t _wait_q_t;
typedef struct {
sys_dlist_t waitq;
} _wait_q_t;
#define _WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
#ifdef CONFIG_OBJECT_TRACING
#define _OBJECT_TRACING_NEXT_PTR(type) struct type *__next
@ -1306,7 +1310,7 @@ struct k_timer { @@ -1306,7 +1310,7 @@ struct k_timer {
.timeout.wait_q = NULL, \
.timeout.thread = NULL, \
.timeout.func = _timer_expiration_handler, \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
.expiry_fn = expiry, \
.stop_fn = stop, \
.status = 0, \
@ -1648,7 +1652,7 @@ struct k_queue { @@ -1648,7 +1652,7 @@ struct k_queue {
#define _K_QUEUE_INITIALIZER(obj) \
{ \
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
_POLL_EVENT_OBJ_INIT(obj) \
_OBJECT_TRACING_INIT \
}
@ -2239,7 +2243,7 @@ struct k_stack { @@ -2239,7 +2243,7 @@ struct k_stack {
#define _K_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
.base = stack_buffer, \
.next = stack_buffer, \
.top = stack_buffer + stack_num_entries, \
@ -2677,7 +2681,7 @@ struct k_mutex { @@ -2677,7 +2681,7 @@ struct k_mutex {
#define _K_MUTEX_INITIALIZER(obj) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
.owner = NULL, \
.lock_count = 0, \
.owner_orig_prio = K_LOWEST_THREAD_PRIO, \
@ -2778,7 +2782,7 @@ struct k_sem { @@ -2778,7 +2782,7 @@ struct k_sem {
#define _K_SEM_INITIALIZER(obj, initial_count, count_limit) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
.count = initial_count, \
.limit = count_limit, \
_POLL_EVENT_OBJ_INIT(obj) \
@ -3063,7 +3067,7 @@ struct k_msgq { @@ -3063,7 +3067,7 @@ struct k_msgq {
#define _K_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
.max_msgs = q_max_msgs, \
.msg_size = q_msg_size, \
.buffer_start = q_buffer, \
@ -3333,8 +3337,8 @@ struct k_mbox { @@ -3333,8 +3337,8 @@ struct k_mbox {
#define _K_MBOX_INITIALIZER(obj) \
{ \
.tx_msg_queue = SYS_DLIST_STATIC_INIT(&obj.tx_msg_queue), \
.rx_msg_queue = SYS_DLIST_STATIC_INIT(&obj.rx_msg_queue), \
.tx_msg_queue = _WAIT_Q_INIT(&obj.tx_msg_queue), \
.rx_msg_queue = _WAIT_Q_INIT(&obj.rx_msg_queue), \
_OBJECT_TRACING_INIT \
}
@ -3512,8 +3516,8 @@ struct k_pipe { @@ -3512,8 +3516,8 @@ struct k_pipe {
.bytes_used = 0, \
.read_index = 0, \
.write_index = 0, \
.wait_q.writers = SYS_DLIST_STATIC_INIT(&obj.wait_q.writers), \
.wait_q.readers = SYS_DLIST_STATIC_INIT(&obj.wait_q.readers), \
.wait_q.writers = _WAIT_Q_INIT(&obj.wait_q.writers), \
.wait_q.readers = _WAIT_Q_INIT(&obj.wait_q.readers), \
_OBJECT_TRACING_INIT \
}
@ -3674,7 +3678,7 @@ struct k_mem_slab { @@ -3674,7 +3678,7 @@ struct k_mem_slab {
#define _K_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
slab_num_blocks) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
.num_blocks = slab_num_blocks, \
.block_size = slab_block_size, \
.buffer = slab_buffer, \

9
include/posix/pthread.h

@ -8,6 +8,7 @@ @@ -8,6 +8,7 @@
#define __PTHREAD_H__
#include <kernel.h>
#include <wait_q.h>
#include <posix/time.h>
#include <posix/unistd.h>
#include "sys/types.h"
@ -63,7 +64,7 @@ struct posix_thread { @@ -63,7 +64,7 @@ struct posix_thread {
*/
#define PTHREAD_COND_DEFINE(name) \
struct pthread_cond name = { \
.wait_q = SYS_DLIST_STATIC_INIT(&name.wait_q), \
.wait_q = _WAIT_Q_INIT(&name.wait_q), \
}
/**
@ -75,7 +76,7 @@ static inline int pthread_cond_init(pthread_cond_t *cv, @@ -75,7 +76,7 @@ static inline int pthread_cond_init(pthread_cond_t *cv,
const pthread_condattr_t *att)
{
ARG_UNUSED(att);
sys_dlist_init(&cv->wait_q);
_waitq_init(&cv->wait_q);
return 0;
}
@ -284,7 +285,7 @@ static inline int pthread_mutexattr_destroy(pthread_mutexattr_t *m) @@ -284,7 +285,7 @@ static inline int pthread_mutexattr_destroy(pthread_mutexattr_t *m)
*/
#define PTHREAD_BARRIER_DEFINE(name, count) \
struct pthread_barrier name = { \
.wait_q = SYS_DLIST_STATIC_INIT(&name.wait_q), \
.wait_q = _WAIT_Q_INIT(&name.wait_q), \
.max = count, \
}
@ -308,7 +309,7 @@ static inline int pthread_barrier_init(pthread_barrier_t *b, @@ -308,7 +309,7 @@ static inline int pthread_barrier_init(pthread_barrier_t *b,
b->max = count;
b->count = 0;
sys_dlist_init(&b->wait_q);
_waitq_init(&b->wait_q);
return 0;
}

2
kernel/include/ksched.h

@ -43,7 +43,7 @@ void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout); @@ -43,7 +43,7 @@ void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
int _reschedule(int key);
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q);
void _unpend_thread(struct k_thread *thread);
void _unpend_all(_wait_q_t *wait_q);
int _unpend_all(_wait_q_t *wait_q);
void _thread_priority_set(struct k_thread *thread, int prio);
void *_get_next_switch_handle(void *interrupted);
struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,

13
kernel/include/wait_q.h

@ -42,7 +42,18 @@ static ALWAYS_INLINE int _abort_thread_timeout(struct k_thread *thread) @@ -42,7 +42,18 @@ static ALWAYS_INLINE int _abort_thread_timeout(struct k_thread *thread)
#define _get_next_timeout_expiry() (K_FOREVER)
#endif
#define _WAIT_Q_INIT(wait_q) SYS_DLIST_STATIC_INIT(wait_q)
#define _WAIT_Q_FOR_EACH(wq, thread_ptr) \
SYS_DLIST_FOR_EACH_CONTAINER(&((wq)->waitq), thread_ptr, base.k_q_node)
static inline void _waitq_init(_wait_q_t *w)
{
sys_dlist_init(&w->waitq);
}
static inline struct k_thread *_waitq_head(_wait_q_t *w)
{
return (void *)sys_dlist_peek_head(&w->waitq);
}
#ifdef __cplusplus
}

15
kernel/mailbox.c

@ -103,8 +103,8 @@ SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); @@ -103,8 +103,8 @@ SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
void k_mbox_init(struct k_mbox *mbox_ptr)
{
sys_dlist_init(&mbox_ptr->tx_msg_queue);
sys_dlist_init(&mbox_ptr->rx_msg_queue);
_waitq_init(&mbox_ptr->tx_msg_queue);
_waitq_init(&mbox_ptr->rx_msg_queue);
SYS_TRACING_OBJ_INIT(k_mbox, mbox_ptr);
}
@ -239,7 +239,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -239,7 +239,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
s32_t timeout)
{
struct k_thread *sending_thread;
struct k_thread *receiving_thread, *next;
struct k_thread *receiving_thread;
struct k_mbox_msg *rx_msg;
unsigned int key;
@ -253,8 +253,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -253,8 +253,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
/* search mailbox's rx queue for a compatible receiver */
key = irq_lock();
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&mbox->rx_msg_queue, receiving_thread,
next, base.k_q_node) {
_WAIT_Q_FOR_EACH(&mbox->rx_msg_queue, receiving_thread) {
rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data;
if (mbox_message_match(tx_msg, rx_msg) == 0) {
@ -421,7 +420,7 @@ static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer) @@ -421,7 +420,7 @@ static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
s32_t timeout)
{
struct k_thread *sending_thread, *next;
struct k_thread *sending_thread;
struct k_mbox_msg *tx_msg;
unsigned int key;
int result;
@ -432,9 +431,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, @@ -432,9 +431,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
/* search mailbox's tx queue for a compatible sender */
key = irq_lock();
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&mbox->tx_msg_queue, sending_thread,
next, base.k_q_node) {
_WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
if (mbox_message_match(tx_msg, rx_msg) == 0) {

2
kernel/mem_slab.c

@ -77,7 +77,7 @@ void k_mem_slab_init(struct k_mem_slab *slab, void *buffer, @@ -77,7 +77,7 @@ void k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
slab->buffer = buffer;
slab->num_used = 0;
create_free_list(slab);
sys_dlist_init(&slab->wait_q);
_waitq_init(&slab->wait_q);
SYS_TRACING_OBJ_INIT(k_mem_slab, slab);
_k_object_init(slab);

4
kernel/mempool.c

@ -29,7 +29,7 @@ static int pool_id(struct k_mem_pool *pool) @@ -29,7 +29,7 @@ static int pool_id(struct k_mem_pool *pool)
static void k_mem_pool_init(struct k_mem_pool *p)
{
sys_dlist_init(&p->wait_q);
_waitq_init(&p->wait_q);
_sys_mem_pool_base_init(&p->base);
}
@ -99,7 +99,7 @@ void k_mem_pool_free_id(struct k_mem_block_id *id) @@ -99,7 +99,7 @@ void k_mem_pool_free_id(struct k_mem_block_id *id)
*/
key = irq_lock();
_unpend_all(&p->wait_q);
need_sched = _unpend_all(&p->wait_q);
if (need_sched && !_is_in_isr()) {
_reschedule(key);

4
kernel/msg_q.c

@ -58,7 +58,7 @@ void k_msgq_init(struct k_msgq *q, char *buffer, size_t msg_size, @@ -58,7 +58,7 @@ void k_msgq_init(struct k_msgq *q, char *buffer, size_t msg_size,
q->write_ptr = buffer;
q->used_msgs = 0;
q->flags = 0;
sys_dlist_init(&q->wait_q);
_waitq_init(&q->wait_q);
SYS_TRACING_OBJ_INIT(k_msgq, q);
_k_object_init(q);
@ -99,7 +99,7 @@ Z_SYSCALL_HANDLER(k_msgq_alloc_init, q, msg_size, max_msgs) @@ -99,7 +99,7 @@ Z_SYSCALL_HANDLER(k_msgq_alloc_init, q, msg_size, max_msgs)
void k_msgq_cleanup(struct k_msgq *q)
{
__ASSERT_NO_MSG(sys_dlist_is_empty(&q->wait_q));
__ASSERT_NO_MSG(sys_dlist_is_empty(&q->wait_q.waitq));
if (q->flags & K_MSGQ_FLAG_ALLOC) {
k_free(q->buffer_start);

5
kernel/mutex.c

@ -75,7 +75,7 @@ void _impl_k_mutex_init(struct k_mutex *mutex) @@ -75,7 +75,7 @@ void _impl_k_mutex_init(struct k_mutex *mutex)
/* initialized upon first use */
/* mutex->owner_orig_prio = 0; */
sys_dlist_init(&mutex->wait_q);
_waitq_init(&mutex->wait_q);
SYS_TRACING_OBJ_INIT(k_mutex, mutex);
_k_object_init(mutex);
@ -173,8 +173,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) @@ -173,8 +173,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
K_DEBUG("%p timeout on mutex %p\n", _current, mutex);
struct k_thread *waiter =
(struct k_thread *)sys_dlist_peek_head(&mutex->wait_q);
struct k_thread *waiter = _waitq_head(&mutex->wait_q);
new_prio = mutex->owner_orig_prio;
new_prio = waiter ? new_prio_for_inheritance(waiter->base.prio,

13
kernel/pipes.c

@ -136,8 +136,8 @@ void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size) @@ -136,8 +136,8 @@ void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
pipe->read_index = 0;
pipe->write_index = 0;
pipe->flags = 0;
sys_dlist_init(&pipe->wait_q.writers);
sys_dlist_init(&pipe->wait_q.readers);
_waitq_init(&pipe->wait_q.writers);
_waitq_init(&pipe->wait_q.readers);
SYS_TRACING_OBJ_INIT(k_pipe, pipe);
_k_object_init(pipe);
}
@ -175,8 +175,8 @@ Z_SYSCALL_HANDLER(k_pipe_alloc_init, pipe, size) @@ -175,8 +175,8 @@ Z_SYSCALL_HANDLER(k_pipe_alloc_init, pipe, size)
void k_pipe_cleanup(struct k_pipe *pipe)
{
__ASSERT_NO_MSG(sys_dlist_is_empty(&pipe->wait_q.readers));
__ASSERT_NO_MSG(sys_dlist_is_empty(&pipe->wait_q.writers));
__ASSERT_NO_MSG(sys_dlist_is_empty(&pipe->wait_q.readers.waitq));
__ASSERT_NO_MSG(sys_dlist_is_empty(&pipe->wait_q.writers.waitq));
if (pipe->flags & K_PIPE_FLAG_ALLOC) {
k_free(pipe->buffer);
@ -322,8 +322,7 @@ static bool pipe_xfer_prepare(sys_dlist_t *xfer_list, @@ -322,8 +322,7 @@ static bool pipe_xfer_prepare(sys_dlist_t *xfer_list,
size_t num_bytes = 0;
if (timeout == K_NO_WAIT) {
for (node = sys_dlist_peek_head(wait_q); node != NULL;
node = sys_dlist_peek_next(wait_q, node)) {
SYS_DLIST_FOR_EACH_NODE(&wait_q->waitq, node) {
thread = (struct k_thread *)node;
desc = (struct k_pipe_desc *)thread->base.swap_data;
@ -347,7 +346,7 @@ static bool pipe_xfer_prepare(sys_dlist_t *xfer_list, @@ -347,7 +346,7 @@ static bool pipe_xfer_prepare(sys_dlist_t *xfer_list,
sys_dlist_init(xfer_list);
num_bytes = 0;
while ((thread = (struct k_thread *) sys_dlist_peek_head(wait_q))) {
while ((thread = _waitq_head(wait_q))) {
desc = (struct k_pipe_desc *)thread->base.swap_data;
num_bytes += desc->bytes_to_xfer;

2
kernel/queue.c

@ -84,7 +84,7 @@ SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); @@ -84,7 +84,7 @@ SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
void _impl_k_queue_init(struct k_queue *queue)
{
sys_sflist_init(&queue->data_q);
sys_dlist_init(&queue->wait_q);
_waitq_init(&queue->wait_q);
#if defined(CONFIG_POLL)
sys_dlist_init(&queue->poll_events);
#endif

11
kernel/sched.c

@ -337,15 +337,18 @@ struct k_thread *_unpend_first_thread(_wait_q_t *wait_q) @@ -337,15 +337,18 @@ struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
return t;
}
void _unpend_all(_wait_q_t *waitq)
int _unpend_all(_wait_q_t *waitq)
{
while (!sys_dlist_is_empty(waitq)) {
struct k_thread *th = (void *)sys_dlist_peek_head(waitq);
int need_sched = 0;
struct k_thread *th;
while ((th = _waitq_head(waitq))) {
_unpend_thread(th);
_ready_thread(th);
need_sched = 1;
}
return need_sched;
}
@ -717,6 +720,6 @@ struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q, @@ -717,6 +720,6 @@ struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,
ARG_UNUSED(from);
#endif
return (struct k_thread *)sys_dlist_peek_head(wait_q);
return _waitq_head(wait_q);
}

2
kernel/sem.c

@ -62,7 +62,7 @@ void _impl_k_sem_init(struct k_sem *sem, unsigned int initial_count, @@ -62,7 +62,7 @@ void _impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
sem->count = initial_count;
sem->limit = limit;
sys_dlist_init(&sem->wait_q);
_waitq_init(&sem->wait_q);
#if defined(CONFIG_POLL)
sys_dlist_init(&sem->poll_events);
#endif

4
kernel/stack.c

@ -48,7 +48,7 @@ SYS_INIT(init_stack_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); @@ -48,7 +48,7 @@ SYS_INIT(init_stack_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
void k_stack_init(struct k_stack *stack, u32_t *buffer,
unsigned int num_entries)
{
sys_dlist_init(&stack->wait_q);
_waitq_init(&stack->wait_q);
stack->next = stack->base = buffer;
stack->top = stack->base + num_entries;
@ -86,7 +86,7 @@ Z_SYSCALL_HANDLER(k_stack_alloc_init, stack, num_entries) @@ -86,7 +86,7 @@ Z_SYSCALL_HANDLER(k_stack_alloc_init, stack, num_entries)
void k_stack_cleanup(struct k_stack *stack)
{
__ASSERT_NO_MSG(sys_dlist_is_empty(&stack->wait_q));
__ASSERT_NO_MSG(sys_dlist_is_empty(&stack->wait_q.waitq));
if (stack->flags & K_STACK_FLAG_ALLOC) {
k_free(stack->base);

4
kernel/timer.c

@ -68,7 +68,7 @@ void _timer_expiration_handler(struct _timeout *t) @@ -68,7 +68,7 @@ void _timer_expiration_handler(struct _timeout *t)
timer->expiry_fn(timer);
}
thread = (struct k_thread *)sys_dlist_peek_head(&timer->wait_q);
thread = _waitq_head(&timer->wait_q);
if (!thread) {
return;
@ -100,7 +100,7 @@ void k_timer_init(struct k_timer *timer, @@ -100,7 +100,7 @@ void k_timer_init(struct k_timer *timer,
timer->stop_fn = stop_fn;
timer->status = 0;
sys_dlist_init(&timer->wait_q);
_waitq_init(&timer->wait_q);
_init_timeout(&timer->timeout, _timer_expiration_handler);
SYS_TRACING_OBJ_INIT(k_timer, timer);

2
lib/posix/pthread_barrier.c

@ -18,7 +18,7 @@ int pthread_barrier_wait(pthread_barrier_t *b) @@ -18,7 +18,7 @@ int pthread_barrier_wait(pthread_barrier_t *b)
if (b->count >= b->max) {
b->count = 0;
while (!sys_dlist_is_empty(&b->wait_q)) {
while (_waitq_head(&b->wait_q)) {
_ready_one_thread(&b->wait_q);
}
return _reschedule(key);

2
lib/posix/pthread_cond.c

@ -57,7 +57,7 @@ int pthread_cond_broadcast(pthread_cond_t *cv) @@ -57,7 +57,7 @@ int pthread_cond_broadcast(pthread_cond_t *cv)
{
int key = irq_lock();
while (!sys_dlist_is_empty(&cv->wait_q)) {
while (_waitq_head(&cv->wait_q)) {
_ready_one_thread(&cv->wait_q);
}

5
subsys/logging/event_logger.c

@ -12,6 +12,7 @@ @@ -12,6 +12,7 @@
#include <logging/event_logger.h>
#include <ring_buffer.h>
#include <kernel_structs.h>
#include <wait_q.h>
void sys_event_logger_init(struct event_logger *logger,
u32_t *logger_buffer, u32_t buffer_size)
@ -52,7 +53,7 @@ void sys_event_logger_put(struct event_logger *logger, u16_t event_id, @@ -52,7 +53,7 @@ void sys_event_logger_put(struct event_logger *logger, u16_t event_id,
*/
struct k_thread *event_logger_thread =
(struct k_thread *)sys_dlist_peek_head(&(logger->sync_sema.wait_q));
_waitq_head(&(logger->sync_sema.wait_q));
if (_current != event_logger_thread) {
event_logger_put(logger, event_id, event_data,
data_size, k_sem_give);
@ -90,7 +91,7 @@ void _sys_event_logger_put_non_preemptible(struct event_logger *logger, @@ -90,7 +91,7 @@ void _sys_event_logger_put_non_preemptible(struct event_logger *logger,
*/
struct k_thread *event_logger_thread =
(struct k_thread *)sys_dlist_peek_head(&(logger->sync_sema.wait_q));
_waitq_head(&(logger->sync_sema.wait_q));
if (_current != event_logger_thread) {
event_logger_put(logger, event_id, event_data, data_size,

Loading…
Cancel
Save