Browse Source

kernel: rename thread return value functions

z_set_thread_return_value is part of the core kernel -> arch
interface and has been renamed to z_arch_thread_return_value_set.

z_set_thread_return_value_with_data renamed to
z_thread_return_value_set_with_data for consistency.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
pull/19425/head
Andrew Boie 6 years ago committed by Anas Nashif
parent
commit
4ad9f687df
  1. 2
      arch/arm/core/swap.c
  2. 2
      arch/arm/include/kernel_arch_func.h
  3. 2
      arch/nios2/core/swap.S
  4. 2
      arch/nios2/include/kernel_arch_func.h
  5. 4
      arch/posix/core/swap.c
  6. 2
      arch/posix/include/kernel_arch_func.h
  7. 2
      arch/riscv/core/swap.S
  8. 2
      arch/riscv/include/kernel_arch_func.h
  9. 8
      arch/x86/core/ia32/swap.S
  10. 2
      arch/x86/include/ia32/kernel_arch_func.h
  11. 2
      kernel/futex.c
  12. 6
      kernel/include/kernel_structs.h
  13. 4
      kernel/mailbox.c
  14. 2
      kernel/mem_slab.c
  15. 6
      kernel/msg_q.c
  16. 2
      kernel/mutex.c
  17. 2
      kernel/poll.c
  18. 2
      kernel/queue.c
  19. 2
      kernel/sem.c
  20. 2
      kernel/stack.c
  21. 2
      kernel/timer.c
  22. 2
      lib/posix/pthread_mutex.c

2
arch/arm/core/swap.c

@ -42,7 +42,7 @@ extern const int _k_neg_eagain;
* as BASEPRI is not available. * as BASEPRI is not available.
* *
* @return -EAGAIN, or a return value set by a call to * @return -EAGAIN, or a return value set by a call to
* z_set_thread_return_value() * z_arch_thread_return_value_set()
* *
*/ */
int __swap(int key) int __swap(int key)

2
arch/arm/include/kernel_arch_func.h

@ -136,7 +136,7 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread,
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value) z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
thread->arch.swap_return_value = value; thread->arch.swap_return_value = value;
} }

2
arch/nios2/core/swap.S

@ -120,7 +120,7 @@ SECTION_FUNC(exception.other, __swap)
/* /*
* Load return value into r2 (return value register). -EAGAIN unless * Load return value into r2 (return value register). -EAGAIN unless
* someone previously called z_set_thread_return_value(). Do this before * someone previously called z_arch_thread_return_value_set(). Do this before
* we potentially unlock interrupts. * we potentially unlock interrupts.
*/ */
ldw r2, _thread_offset_to_retval(r2) ldw r2, _thread_offset_to_retval(r2)

2
arch/nios2/include/kernel_arch_func.h

@ -33,7 +33,7 @@ static ALWAYS_INLINE void kernel_arch_init(void)
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value) z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
thread->callee_saved.retval = value; thread->callee_saved.retval = value;
} }

4
arch/posix/core/swap.c

@ -30,7 +30,7 @@
* *
* *
* @return -EAGAIN, or a return value set by a call to * @return -EAGAIN, or a return value set by a call to
* z_set_thread_return_value() * z_arch_thread_return_value_set()
* *
*/ */
@ -48,7 +48,7 @@ int __swap(unsigned int key)
*/ */
_kernel.current->callee_saved.key = key; _kernel.current->callee_saved.key = key;
_kernel.current->callee_saved.retval = -EAGAIN; _kernel.current->callee_saved.retval = -EAGAIN;
/* retval may be modified with a call to z_set_thread_return_value() */ /* retval may be modified with a call to z_arch_thread_return_value_set() */
posix_thread_status_t *ready_thread_ptr = posix_thread_status_t *ready_thread_ptr =
(posix_thread_status_t *) (posix_thread_status_t *)

2
arch/posix/include/kernel_arch_func.h

@ -44,7 +44,7 @@ static inline void kernel_arch_init(void)
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value) z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
thread->callee_saved.retval = value; thread->callee_saved.retval = value;
} }

2
arch/riscv/core/swap.S

@ -78,7 +78,7 @@ SECTION_FUNC(exception.other, __swap)
* Prior to unlocking irq, load return value of * Prior to unlocking irq, load return value of
* __swap to temp register t2 (from * __swap to temp register t2 (from
* _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN, * _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
* unless someone has previously called z_set_thread_return_value(..). * unless someone has previously called z_arch_thread_return_value_set(..).
*/ */
la t0, _kernel la t0, _kernel

2
arch/riscv/include/kernel_arch_func.h

@ -29,7 +29,7 @@ static ALWAYS_INLINE void kernel_arch_init(void)
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value) z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
thread->arch.swap_return_value = value; thread->arch.swap_return_value = value;
} }

8
arch/x86/core/ia32/swap.S

@ -68,7 +68,7 @@
* potential security leaks. * potential security leaks.
* *
* @return -EAGAIN, or a return value set by a call to * @return -EAGAIN, or a return value set by a call to
* z_set_thread_return_value() * z_arch_thread_return_value_set()
* *
* C function prototype: * C function prototype:
* *
@ -117,7 +117,7 @@ SECTION_FUNC(TEXT, __swap)
* Carve space for the return value. Setting it to a default of * Carve space for the return value. Setting it to a default of
* -EAGAIN eliminates the need for the timeout code to set it. * -EAGAIN eliminates the need for the timeout code to set it.
* If another value is ever needed, it can be modified with * If another value is ever needed, it can be modified with
* z_set_thread_return_value(). * z_arch_thread_return_value_set().
*/ */
pushl _k_neg_eagain pushl _k_neg_eagain
@ -342,7 +342,7 @@ CROHandlingDone:
movl _thread_offset_to_esp(%eax), %esp movl _thread_offset_to_esp(%eax), %esp
/* load return value from a possible z_set_thread_return_value() */ /* load return value from a possible z_arch_thread_return_value_set() */
popl %eax popl %eax
@ -357,7 +357,7 @@ CROHandlingDone:
* %eax may contain one of these values: * %eax may contain one of these values:
* *
* - the return value for __swap() that was set up by a call to * - the return value for __swap() that was set up by a call to
* z_set_thread_return_value() * z_arch_thread_return_value_set()
* - -EINVAL * - -EINVAL
*/ */

2
arch/x86/include/ia32/kernel_arch_func.h

@ -60,7 +60,7 @@ static inline void kernel_arch_init(void)
* @return N/A * @return N/A
*/ */
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value) z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
/* write into 'eax' slot created in z_swap() entry */ /* write into 'eax' slot created in z_swap() entry */

2
kernel/futex.c

@ -42,7 +42,7 @@ int z_impl_k_futex_wake(struct k_futex *futex, bool wake_all)
thread = z_unpend_first_thread(&futex_data->wait_q); thread = z_unpend_first_thread(&futex_data->wait_q);
if (thread) { if (thread) {
z_ready_thread(thread); z_ready_thread(thread);
z_set_thread_return_value(thread, 0); z_arch_thread_return_value_set(thread, 0);
woken++; woken++;
} }
} while (thread && wake_all); } while (thread && wake_all);

6
kernel/include/kernel_structs.h

@ -195,18 +195,18 @@ extern struct z_kernel _kernel;
* z_swap() is in use it's a simple inline provided by the kernel. * z_swap() is in use it's a simple inline provided by the kernel.
*/ */
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value) z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
thread->swap_retval = value; thread->swap_retval = value;
} }
#endif #endif
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_set_thread_return_value_with_data(struct k_thread *thread, z_thread_return_value_set_with_data(struct k_thread *thread,
unsigned int value, unsigned int value,
void *data) void *data)
{ {
z_set_thread_return_value(thread, value); z_arch_thread_return_value_set(thread, value);
thread->base.swap_data = data; thread->base.swap_data = data;
} }

4
kernel/mailbox.c

@ -211,7 +211,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
#endif #endif
/* synchronous send: wake up sending thread */ /* synchronous send: wake up sending thread */
z_set_thread_return_value(sending_thread, 0); z_arch_thread_return_value_set(sending_thread, 0);
z_mark_thread_as_not_pending(sending_thread); z_mark_thread_as_not_pending(sending_thread);
z_ready_thread(sending_thread); z_ready_thread(sending_thread);
z_reschedule_unlocked(); z_reschedule_unlocked();
@ -257,7 +257,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
z_unpend_thread(receiving_thread); z_unpend_thread(receiving_thread);
/* ready receiver for execution */ /* ready receiver for execution */
z_set_thread_return_value(receiving_thread, 0); z_arch_thread_return_value_set(receiving_thread, 0);
z_ready_thread(receiving_thread); z_ready_thread(receiving_thread);
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)

2
kernel/mem_slab.c

@ -119,7 +119,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q); struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
if (pending_thread != NULL) { if (pending_thread != NULL) {
z_set_thread_return_value_with_data(pending_thread, 0, *mem); z_thread_return_value_set_with_data(pending_thread, 0, *mem);
z_ready_thread(pending_thread); z_ready_thread(pending_thread);
z_reschedule(&lock, key); z_reschedule(&lock, key);
} else { } else {

6
kernel/msg_q.c

@ -126,7 +126,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout)
(void)memcpy(pending_thread->base.swap_data, data, (void)memcpy(pending_thread->base.swap_data, data,
msgq->msg_size); msgq->msg_size);
/* wake up waiting thread */ /* wake up waiting thread */
z_set_thread_return_value(pending_thread, 0); z_arch_thread_return_value_set(pending_thread, 0);
z_ready_thread(pending_thread); z_ready_thread(pending_thread);
z_reschedule(&msgq->lock, key); z_reschedule(&msgq->lock, key);
return 0; return 0;
@ -215,7 +215,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout)
msgq->used_msgs++; msgq->used_msgs++;
/* wake up waiting thread */ /* wake up waiting thread */
z_set_thread_return_value(pending_thread, 0); z_arch_thread_return_value_set(pending_thread, 0);
z_ready_thread(pending_thread); z_ready_thread(pending_thread);
z_reschedule(&msgq->lock, key); z_reschedule(&msgq->lock, key);
return 0; return 0;
@ -287,7 +287,7 @@ void z_impl_k_msgq_purge(struct k_msgq *msgq)
/* wake up any threads that are waiting to write */ /* wake up any threads that are waiting to write */
while ((pending_thread = z_unpend_first_thread(&msgq->wait_q)) != NULL) { while ((pending_thread = z_unpend_first_thread(&msgq->wait_q)) != NULL) {
z_set_thread_return_value(pending_thread, -ENOMSG); z_arch_thread_return_value_set(pending_thread, -ENOMSG);
z_ready_thread(pending_thread); z_ready_thread(pending_thread);
} }

2
kernel/mutex.c

@ -236,7 +236,7 @@ void z_impl_k_mutex_unlock(struct k_mutex *mutex)
k_spin_unlock(&lock, key); k_spin_unlock(&lock, key);
z_set_thread_return_value(new_owner, 0); z_arch_thread_return_value_set(new_owner, 0);
/* /*
* new owner is already of higher or equal prio than first * new owner is already of higher or equal prio than first

2
kernel/poll.c

@ -358,7 +358,7 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state)
} }
z_unpend_thread(thread); z_unpend_thread(thread);
z_set_thread_return_value(thread, z_arch_thread_return_value_set(thread,
state == K_POLL_STATE_CANCELLED ? -EINTR : 0); state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
if (!z_is_thread_ready(thread)) { if (!z_is_thread_ready(thread)) {

2
kernel/queue.c

@ -103,7 +103,7 @@ static inline void z_vrfy_k_queue_init(struct k_queue *queue)
static void prepare_thread_to_run(struct k_thread *thread, void *data) static void prepare_thread_to_run(struct k_thread *thread, void *data)
{ {
z_ready_thread(thread); z_ready_thread(thread);
z_set_thread_return_value_with_data(thread, 0, data); z_thread_return_value_set_with_data(thread, 0, data);
} }
#endif /* CONFIG_POLL */ #endif /* CONFIG_POLL */

2
kernel/sem.c

@ -110,7 +110,7 @@ static void do_sem_give(struct k_sem *sem)
if (thread != NULL) { if (thread != NULL) {
z_ready_thread(thread); z_ready_thread(thread);
z_set_thread_return_value(thread, 0); z_arch_thread_return_value_set(thread, 0);
} else { } else {
increment_count_up_to_limit(sem); increment_count_up_to_limit(sem);
handle_poll_events(sem); handle_poll_events(sem);

2
kernel/stack.c

@ -106,7 +106,7 @@ void z_impl_k_stack_push(struct k_stack *stack, stack_data_t data)
if (first_pending_thread != NULL) { if (first_pending_thread != NULL) {
z_ready_thread(first_pending_thread); z_ready_thread(first_pending_thread);
z_set_thread_return_value_with_data(first_pending_thread, z_thread_return_value_set_with_data(first_pending_thread,
0, (void *)data); 0, (void *)data);
z_reschedule(&stack->lock, key); z_reschedule(&stack->lock, key);
return; return;

2
kernel/timer.c

@ -82,7 +82,7 @@ void z_timer_expiration_handler(struct _timeout *t)
z_ready_thread(thread); z_ready_thread(thread);
z_set_thread_return_value(thread, 0); z_arch_thread_return_value_set(thread, 0);
} }

2
lib/posix/pthread_mutex.c

@ -143,7 +143,7 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
m->owner = (pthread_t)thread; m->owner = (pthread_t)thread;
m->lock_count++; m->lock_count++;
z_ready_thread(thread); z_ready_thread(thread);
z_set_thread_return_value(thread, 0); z_arch_thread_return_value_set(thread, 0);
z_reschedule_irqlock(key); z_reschedule_irqlock(key);
return 0; return 0;
} }

Loading…
Cancel
Save