Browse Source

kernel: Propagate z_handle_obj_poll_events() return

Propagates the return value from z_handle_obj_poll_events()
within the message queue, pipes, queue and semaphore objects.
This allows the kernel object code to determine whether it
needs to perform a full reschedule, or if it can perform a
more optimized exit strategy.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
pull/85097/head
Peter Mitsis 7 months ago committed by Benjamin Cabé
parent
commit
6e3f57118f
  1. 16
      kernel/msg_q.c
  2. 7
      kernel/pipes.c
  3. 12
      kernel/queue.c
  4. 5
      kernel/sem.c

16
kernel/msg_q.c

@ -29,12 +29,16 @@ @@ -29,12 +29,16 @@
static struct k_obj_type obj_type_msgq;
#endif /* CONFIG_OBJ_CORE_MSGQ */
#ifdef CONFIG_POLL
static inline void handle_poll_events(struct k_msgq *msgq, uint32_t state)
static inline bool handle_poll_events(struct k_msgq *msgq)
{
z_handle_obj_poll_events(&msgq->poll_events, state);
}
#ifdef CONFIG_POLL
return z_handle_obj_poll_events(&msgq->poll_events,
K_POLL_STATE_MSGQ_DATA_AVAILABLE);
#else
ARG_UNUSED(msgq);
return false;
#endif /* CONFIG_POLL */
}
void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
uint32_t max_msgs)
@ -157,9 +161,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout @@ -157,9 +161,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout
msgq->write_ptr = msgq->buffer_start;
}
msgq->used_msgs++;
#ifdef CONFIG_POLL
handle_poll_events(msgq, K_POLL_STATE_MSGQ_DATA_AVAILABLE);
#endif /* CONFIG_POLL */
(void)handle_poll_events(msgq);
}
result = 0;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {

7
kernel/pipes.c

@ -105,12 +105,13 @@ static inline int z_vrfy_k_pipe_alloc_init(struct k_pipe *pipe, size_t size) @@ -105,12 +105,13 @@ static inline int z_vrfy_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
#include <zephyr/syscalls/k_pipe_alloc_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
static inline void handle_poll_events(struct k_pipe *pipe)
static inline bool handle_poll_events(struct k_pipe *pipe)
{
#ifdef CONFIG_POLL
z_handle_obj_poll_events(&pipe->poll_events, K_POLL_STATE_PIPE_DATA_AVAILABLE);
return z_handle_obj_poll_events(&pipe->poll_events, K_POLL_STATE_PIPE_DATA_AVAILABLE);
#else
ARG_UNUSED(pipe);
return false;
#endif /* CONFIG_POLL */
}
@ -468,7 +469,7 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data, @@ -468,7 +469,7 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data,
*/
if ((pipe->bytes_used != 0U) && (*bytes_written != 0U)) {
handle_poll_events(pipe);
reschedule_needed = handle_poll_events(pipe) || reschedule_needed;
}
/*

12
kernel/queue.c

@ -84,13 +84,15 @@ static void prepare_thread_to_run(struct k_thread *thread, void *data) @@ -84,13 +84,15 @@ static void prepare_thread_to_run(struct k_thread *thread, void *data)
z_ready_thread(thread);
}
static inline void handle_poll_events(struct k_queue *queue, uint32_t state)
static inline bool handle_poll_events(struct k_queue *queue, uint32_t state)
{
#ifdef CONFIG_POLL
z_handle_obj_poll_events(&queue->poll_events, state);
return z_handle_obj_poll_events(&queue->poll_events, state);
#else
ARG_UNUSED(queue);
ARG_UNUSED(state);
return false;
#endif /* CONFIG_POLL */
}
@ -107,7 +109,7 @@ void z_impl_k_queue_cancel_wait(struct k_queue *queue) @@ -107,7 +109,7 @@ void z_impl_k_queue_cancel_wait(struct k_queue *queue)
prepare_thread_to_run(first_pending_thread, NULL);
}
handle_poll_events(queue, K_POLL_STATE_CANCELLED);
(void)handle_poll_events(queue, K_POLL_STATE_CANCELLED);
z_reschedule(&queue->lock, key);
}
@ -167,7 +169,7 @@ static int32_t queue_insert(struct k_queue *queue, void *prev, void *data, @@ -167,7 +169,7 @@ static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
sys_sflist_insert(&queue->data_q, prev, data);
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
(void)handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
z_reschedule(&queue->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0);
@ -274,7 +276,7 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail) @@ -274,7 +276,7 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, 0);
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
(void)handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
z_reschedule(&queue->lock, key);
return 0;
}

5
kernel/sem.c

@ -85,8 +85,7 @@ int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count, @@ -85,8 +85,7 @@ int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count,
static inline bool handle_poll_events(struct k_sem *sem)
{
#ifdef CONFIG_POLL
z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
return true;
return z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
#else
ARG_UNUSED(sem);
return false;
@ -180,7 +179,7 @@ void z_impl_k_sem_reset(struct k_sem *sem) @@ -180,7 +179,7 @@ void z_impl_k_sem_reset(struct k_sem *sem)
SYS_PORT_TRACING_OBJ_FUNC(k_sem, reset, sem);
handle_poll_events(sem);
(void)handle_poll_events(sem);
z_reschedule(&lock, key);
}

Loading…
Cancel
Save