Browse Source

kernel/timeout: Make timeout arguments an opaque type

Add a k_timeout_t type, and use it everywhere that kernel API
functions were accepting a millisecond timeout argument.  Instead of
forcing milliseconds everywhere (which are often not integrally
representable as system ticks), do the conversion to ticks at the
point where the timeout is created.  This avoids an extra unit
conversion in some application code, and allows us to express the
timeout in units other than milliseconds to achieve greater precision.

The existing K_MSEC() et. al. macros now return initializers for a
k_timeout_t.

The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t
values, which means they cannot be operated on as integers.
Applications which have their own APIs that need to inspect these
vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to
test for equality.

Timer drivers, which receive an integer tick count in ther
z_clock_set_timeout() functions, now use the integer-valued
K_TICKS_FOREVER constant instead of K_FOREVER.

For the initial release, to preserve source compatibility, a
CONFIG_LEGACY_TIMEOUT_API kconfig is provided.  When true, the
k_timeout_t will remain a compatible 32 bit value that will work with
any legacy Zephyr application.

Some subsystems present timeout (or timeout-like) values to their own
users as APIs that would re-use the kernel's own constants and
conventions.  These will require some minor design work to adapt to
the new scheme (in most cases just using k_timeout_t directly in their
own API), and they have not been changed in this patch, instead
selecting CONFIG_LEGACY_TIMEOUT_API via kconfig.  These subsystems
include: CAN Bus, the Microbit display driver, I2S, LoRa modem
drivers, the UART Async API, Video hardware drivers, the console
subsystem, and the network buffer abstraction.

k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant
provided that works identically to the original API.

Most of the changes here are just type/configuration management and
documentation, but there are logic changes in mempool, where a loop
that used a timeout numerically has been reworked using a new
z_timeout_end_calc() predicate.  Also in queue.c, a (when POLL was
enabled) a similar loop was needlessly used to try to retry the
k_poll() call after a spurious failure.  But k_poll() does not fail
spuriously, so the loop was removed.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
pull/23560/head
Andy Ross 5 years ago committed by Anas Nashif
parent
commit
7832738ae9
  1. 2
      boards/arm/qemu_cortex_m0/nrf_timer_timer.c
  2. 1
      drivers/can/Kconfig
  3. 1
      drivers/display/Kconfig.microbit
  4. 1
      drivers/i2s/Kconfig
  5. 1
      drivers/lora/Kconfig
  6. 1
      drivers/serial/Kconfig
  7. 2
      drivers/timer/apic_timer.c
  8. 5
      drivers/timer/arcv2_timer0.c
  9. 2
      drivers/timer/arm_arch_timer.c
  10. 2
      drivers/timer/cavs_timer.c
  11. 2
      drivers/timer/cc13x2_cc26x2_rtc_timer.c
  12. 5
      drivers/timer/cortex_m_systick.c
  13. 4
      drivers/timer/hpet.c
  14. 2
      drivers/timer/legacy_api.h
  15. 4
      drivers/timer/loapic_timer.c
  16. 4
      drivers/timer/mchp_xec_rtos_timer.c
  17. 2
      drivers/timer/native_posix_timer.c
  18. 2
      drivers/timer/nrf_rtc_timer.c
  19. 2
      drivers/timer/riscv_machine_timer.c
  20. 2
      drivers/timer/xlnx_psttc_timer.c
  21. 2
      drivers/timer/xtensa_sys_timer.c
  22. 1
      drivers/video/Kconfig
  23. 2
      include/drivers/timer/system_timer.h
  24. 159
      include/kernel.h
  25. 10
      include/sys/mutex.h
  26. 4
      include/sys/sem.h
  27. 57
      include/sys_clock.h
  28. 14
      include/timeout_q.h
  29. 9
      kernel/Kconfig
  30. 5
      kernel/futex.c
  31. 9
      kernel/include/ksched.h
  32. 13
      kernel/mailbox.c
  33. 4
      kernel/mem_slab.c
  34. 20
      kernel/mempool.c
  35. 18
      kernel/msg_q.c
  36. 7
      kernel/mutex.c
  37. 20
      kernel/pipes.c
  38. 21
      kernel/poll.c
  39. 40
      kernel/queue.c
  40. 70
      kernel/sched.c
  41. 9
      kernel/sem.c
  42. 7
      kernel/stack.c
  43. 22
      kernel/thread.c
  44. 36
      kernel/timeout.c
  45. 43
      kernel/timer.c
  46. 11
      kernel/work_q.c
  47. 1
      lib/cmsis_rtos_v1/Kconfig
  48. 1
      lib/cmsis_rtos_v2/Kconfig
  49. 4
      lib/os/mutex.c
  50. 4
      lib/os/sem.c
  51. 1
      lib/posix/Kconfig
  52. 2
      lib/posix/pthread_common.c
  53. 8
      samples/cpp_synchronization/src/main.cpp
  54. 4
      samples/scheduler/metairq_dispatch/src/msgdev.c
  55. 1
      soc/arm/ti_simplelink/Kconfig
  56. 1
      subsys/console/Kconfig
  57. 1
      subsys/net/Kconfig
  58. 4
      subsys/power/policy/policy_residency.c
  59. 2
      tests/kernel/lifo/lifo_usage/src/main.c
  60. 5
      tests/kernel/mbox/mbox_usage/src/main.c
  61. 1
      tests/kernel/mem_protect/futex/prj.conf
  62. 3
      tests/kernel/mem_protect/futex/src/main.c
  63. 4
      tests/kernel/pending/src/main.c
  64. 6
      tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c
  65. 2
      tests/kernel/sleep/src/main.c
  66. 2
      tests/kernel/workq/work_queue/src/main.c

2
boards/arm/qemu_cortex_m0/nrf_timer_timer.c

@ -108,7 +108,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -108,7 +108,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
ARG_UNUSED(idle);
#ifdef CONFIG_TICKLESS_KERNEL
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);

1
drivers/can/Kconfig

@ -8,6 +8,7 @@ @@ -8,6 +8,7 @@
#
menuconfig CAN
bool "CAN Drivers"
select LEGACY_TIMEOUT_API
help
Enable CAN Driver Configuration

1
drivers/display/Kconfig.microbit

@ -8,6 +8,7 @@ config MICROBIT_DISPLAY @@ -8,6 +8,7 @@ config MICROBIT_DISPLAY
depends on BOARD_BBC_MICROBIT
depends on PRINTK
depends on GPIO
select LEGACY_TIMEOUT_API
help
Enable this to be able to display images and text on the 5x5
LED matrix display on the BBC micro:bit.

1
drivers/i2s/Kconfig

@ -8,6 +8,7 @@ @@ -8,6 +8,7 @@
#
menuconfig I2S
bool "I2S bus drivers"
select LEGACY_TIMEOUT_API
help
Enable support for the I2S (Inter-IC Sound) hardware bus.

1
drivers/lora/Kconfig

@ -9,6 +9,7 @@ @@ -9,6 +9,7 @@
menuconfig LORA
bool "LoRa support"
depends on NEWLIB_LIBC
select LEGACY_TIMEOUT_API
help
Include LoRa drivers in the system configuration.

1
drivers/serial/Kconfig

@ -34,6 +34,7 @@ config SERIAL_SUPPORT_INTERRUPT @@ -34,6 +34,7 @@ config SERIAL_SUPPORT_INTERRUPT
config UART_ASYNC_API
bool "Enable new asynchronous UART API [EXPERIMENTAL]"
depends on SERIAL_SUPPORT_ASYNC
select LEGACY_TIMEOUT_API
help
This option enables new asynchronous UART API.

2
drivers/timer/apic_timer.c

@ -89,7 +89,7 @@ void z_clock_set_timeout(s32_t n, bool idle) @@ -89,7 +89,7 @@ void z_clock_set_timeout(s32_t n, bool idle)
if (n < 1) {
full_ticks = 0;
} else if ((n == K_FOREVER) || (n > MAX_TICKS)) {
} else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) {
full_ticks = MAX_TICKS - 1;
} else {
full_ticks = n - 1;

5
drivers/timer/arcv2_timer0.c

@ -242,7 +242,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -242,7 +242,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
* However for single core using 32-bits arc timer, idle cannot
* be ignored, as 32-bits timer will overflow in a not-long time.
*/
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && ticks == K_FOREVER) {
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && ticks == K_TICKS_FOREVER) {
timer0_control_register_set(0);
timer0_count_register_set(0);
timer0_limit_register_set(0);
@ -268,7 +268,8 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -268,7 +268,8 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
arch_irq_unlock(key);
#endif
#else
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) {
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle
&& ticks == K_TICKS_FOREVER) {
timer0_control_register_set(0);
timer0_count_register_set(0);
timer0_limit_register_set(0);

2
drivers/timer/arm_arch_timer.c

@ -68,7 +68,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -68,7 +68,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
return;
}
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);

2
drivers/timer/cavs_timer.c

@ -120,7 +120,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -120,7 +120,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
ARG_UNUSED(idle);
#ifdef CONFIG_TICKLESS_KERNEL
ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);

2
drivers/timer/cc13x2_cc26x2_rtc_timer.c

@ -207,7 +207,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -207,7 +207,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
#ifdef CONFIG_TICKLESS_KERNEL
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t) MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);

5
drivers/timer/cortex_m_systick.c

@ -172,7 +172,8 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -172,7 +172,8 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
* the counter. (Note: we can assume if idle==true that
* interrupts are already disabled)
*/
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) {
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle
&& ticks == K_TICKS_FOREVER) {
SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
last_load = TIMER_STOPPED;
return;
@ -181,7 +182,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -181,7 +182,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
#if defined(CONFIG_TICKLESS_KERNEL)
u32_t delay;
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);

4
drivers/timer/hpet.c

@ -129,12 +129,12 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -129,12 +129,12 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
ARG_UNUSED(idle);
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
if (ticks == K_FOREVER && idle) {
if (ticks == K_TICKS_FOREVER && idle) {
GENERAL_CONF_REG &= ~GCONF_ENABLE;
return;
}
ticks = ticks == K_FOREVER ? max_ticks : ticks;
ticks = ticks == K_TICKS_FOREVER ? max_ticks : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)max_ticks), 0);
k_spinlock_key_t key = k_spin_lock(&lock);

2
drivers/timer/legacy_api.h

@ -34,7 +34,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -34,7 +34,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
if (idle) {
z_timer_idle_enter(ticks);
} else {
z_set_time(ticks == K_FOREVER ? 0 : ticks);
z_set_time(ticks == K_TICKS_FOREVER ? 0 : ticks);
}
#endif
}

4
drivers/timer/loapic_timer.c

@ -390,7 +390,7 @@ void z_timer_idle_enter(s32_t ticks /* system ticks */ @@ -390,7 +390,7 @@ void z_timer_idle_enter(s32_t ticks /* system ticks */
)
{
#ifdef CONFIG_TICKLESS_KERNEL
if (ticks != K_FOREVER) {
if (ticks != K_TICKS_FOREVER) {
/* Need to reprogram only if current program is smaller */
if (ticks > programmed_full_ticks) {
z_set_time(ticks);
@ -417,7 +417,7 @@ void z_timer_idle_enter(s32_t ticks /* system ticks */ @@ -417,7 +417,7 @@ void z_timer_idle_enter(s32_t ticks /* system ticks */
cycles = current_count_register_get();
if ((ticks == K_FOREVER) || (ticks > max_system_ticks)) {
if ((ticks == K_TICKS_FOREVER) || (ticks > max_system_ticks)) {
/*
* The number of cycles until the timer must fire next might not fit
* in the 32-bit counter register. To work around this, program

4
drivers/timer/mchp_xec_rtos_timer.c

@ -135,7 +135,7 @@ void z_clock_set_timeout(s32_t n, bool idle) @@ -135,7 +135,7 @@ void z_clock_set_timeout(s32_t n, bool idle)
u32_t full_cycles; /* full_ticks represented as cycles */
u32_t partial_cycles; /* number of cycles to first tick boundary */
if (idle && (n == K_FOREVER)) {
if (idle && (n == K_TICKS_FOREVER)) {
/*
* We are not in a locked section. Are writes to two
* global objects safe from pre-emption?
@ -147,7 +147,7 @@ void z_clock_set_timeout(s32_t n, bool idle) @@ -147,7 +147,7 @@ void z_clock_set_timeout(s32_t n, bool idle)
if (n < 1) {
full_ticks = 0;
} else if ((n == K_FOREVER) || (n > MAX_TICKS)) {
} else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) {
full_ticks = MAX_TICKS - 1;
} else {
full_ticks = n - 1;

2
drivers/timer/native_posix_timer.c

@ -90,7 +90,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -90,7 +90,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
/* Note that we treat INT_MAX literally as anyhow the maximum amount of
* ticks we can report with z_clock_announce() is INT_MAX
*/
if (ticks == K_FOREVER) {
if (ticks == K_TICKS_FOREVER) {
silent_ticks = INT64_MAX;
} else if (ticks > 0) {
silent_ticks = ticks - 1;

2
drivers/timer/nrf_rtc_timer.c

@ -117,7 +117,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -117,7 +117,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
ARG_UNUSED(idle);
#ifdef CONFIG_TICKLESS_KERNEL
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);

2
drivers/timer/riscv_machine_timer.c

@ -104,7 +104,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -104,7 +104,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
return;
}
ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);

2
drivers/timer/xlnx_psttc_timer.c

@ -161,7 +161,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -161,7 +161,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
cycles = read_count();
/* Calculate timeout counter value */
if (ticks == K_FOREVER) {
if (ticks == K_TICKS_FOREVER) {
next_cycles = cycles + CYCLES_NEXT_MAX;
} else {
next_cycles = cycles + ((u32_t)ticks * CYCLES_PER_TICK);

2
drivers/timer/xtensa_sys_timer.c

@ -71,7 +71,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) @@ -71,7 +71,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
ARG_UNUSED(idle);
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);

1
drivers/video/Kconfig

@ -8,6 +8,7 @@ @@ -8,6 +8,7 @@
#
menuconfig VIDEO
bool "VIDEO hardware support"
select LEGACY_TIMEOUT_API
help
Enable support for the VIDEO.

2
include/drivers/timer/system_timer.h

@ -59,7 +59,7 @@ extern int z_clock_device_ctrl(struct device *device, u32_t ctrl_command, @@ -59,7 +59,7 @@ extern int z_clock_device_ctrl(struct device *device, u32_t ctrl_command,
* treated identically: it simply indicates the kernel would like the
* next tick announcement as soon as possible.
*
* Note that ticks can also be passed the special value K_FOREVER,
* Note that ticks can also be passed the special value K_TICKS_FOREVER,
* indicating that no future timer interrupts are expected or required
* and that the system is permitted to enter an indefinite sleep even
* if this could cause rollover of the internal counter (i.e. the

159
include/kernel.h

@ -810,7 +810,7 @@ extern void k_thread_foreach_unlocked( @@ -810,7 +810,7 @@ extern void k_thread_foreach_unlocked(
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
* @param delay Scheduling delay (in milliseconds), or K_NO_WAIT (for no delay).
* @param delay Scheduling delay, or K_NO_WAIT (for no delay).
*
* @return ID of new thread.
*
@ -821,7 +821,7 @@ __syscall k_tid_t k_thread_create(struct k_thread *new_thread, @@ -821,7 +821,7 @@ __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, u32_t options, s32_t delay);
int prio, u32_t options, k_timeout_t delay);
/**
* @brief Drop a thread's privileges permanently to user mode
@ -926,15 +926,27 @@ void k_thread_system_pool_assign(struct k_thread *thread); @@ -926,15 +926,27 @@ void k_thread_system_pool_assign(struct k_thread *thread);
* This API may only be called from ISRs with a K_NO_WAIT timeout.
*
* @param thread Thread to wait to exit
* @param timeout non-negative upper bound time in ms to wait for the thread
* to exit.
* @param timeout upper bound time to wait for the thread to exit.
* @retval 0 success, target thread has exited or wasn't running
* @retval -EBUSY returned without waiting
* @retval -EAGAIN waiting period timed out
* @retval -EDEADLK target thread is joining on the caller, or target thread
* is the caller
*/
__syscall int k_thread_join(struct k_thread *thread, s32_t timeout);
__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
/**
* @brief Put the current thread to sleep.
*
* This routine puts the current thread to sleep for @a duration,
* specified as a k_timeout_t object.
*
* @param timeout Desired duration of sleep.
*
* @return Zero if the requested time has elapsed or the number of milliseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
__syscall s32_t k_sleep(k_timeout_t timeout);
/**
* @brief Put the current thread to sleep.
@ -946,7 +958,10 @@ __syscall int k_thread_join(struct k_thread *thread, s32_t timeout); @@ -946,7 +958,10 @@ __syscall int k_thread_join(struct k_thread *thread, s32_t timeout);
* @return Zero if the requested time has elapsed or the number of milliseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
__syscall s32_t k_sleep(s32_t ms);
static inline s32_t k_msleep(s32_t ms)
{
return k_sleep(Z_TIMEOUT_MS(ms));
}
/**
* @brief Put the current thread to sleep with microsecond resolution.
@ -1531,7 +1546,7 @@ const char *k_thread_state_str(k_tid_t thread_id); @@ -1531,7 +1546,7 @@ const char *k_thread_state_str(k_tid_t thread_id);
*
* @return Timeout delay value.
*/
#define K_NO_WAIT 0
#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
/**
* @brief Generate timeout delay from milliseconds.
@ -1543,7 +1558,7 @@ const char *k_thread_state_str(k_tid_t thread_id); @@ -1543,7 +1558,7 @@ const char *k_thread_state_str(k_tid_t thread_id);
*
* @return Timeout delay value.
*/
#define K_MSEC(ms) (ms)
#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
/**
* @brief Generate timeout delay from seconds.
@ -1589,7 +1604,7 @@ const char *k_thread_state_str(k_tid_t thread_id); @@ -1589,7 +1604,7 @@ const char *k_thread_state_str(k_tid_t thread_id);
*
* @return Timeout delay value.
*/
#define K_FOREVER (-1)
#define K_FOREVER Z_FOREVER
/**
* @}
@ -1617,7 +1632,7 @@ struct k_timer { @@ -1617,7 +1632,7 @@ struct k_timer {
void (*stop_fn)(struct k_timer *timer);
/* timer period */
s32_t period;
k_timeout_t period;
/* timer status */
u32_t status;
@ -1639,7 +1654,6 @@ struct k_timer { @@ -1639,7 +1654,6 @@ struct k_timer {
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.expiry_fn = expiry, \
.stop_fn = stop, \
.period = 0, \
.status = 0, \
.user_data = 0, \
_OBJECT_TRACING_INIT \
@ -1727,13 +1741,13 @@ extern void k_timer_init(struct k_timer *timer, @@ -1727,13 +1741,13 @@ extern void k_timer_init(struct k_timer *timer,
* using the new duration and period values.
*
* @param timer Address of timer.
* @param duration Initial timer duration (in milliseconds).
* @param period Timer period (in milliseconds).
* @param duration Initial timer duration.
* @param period Timer period.
*
* @return N/A
*/
__syscall void k_timer_start(struct k_timer *timer,
s32_t duration, s32_t period);
k_timeout_t duration, k_timeout_t period);
/**
* @brief Stop a timer.
@ -2189,14 +2203,14 @@ extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list); @@ -2189,14 +2203,14 @@ extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
* @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT.
*
* @param queue Address of the queue.
* @param timeout Non-negative waiting period to obtain a data item (in
* milliseconds), or one of the special values K_NO_WAIT and
* @param timeout Non-negative waiting period to obtain a data item
* or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @return Address of the data item if successful; NULL if returned
* without waiting, or waiting period timed out.
*/
__syscall void *k_queue_get(struct k_queue *queue, s32_t timeout);
__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
/**
* @brief Remove an element from a queue.
@ -2358,7 +2372,7 @@ struct z_futex_data { @@ -2358,7 +2372,7 @@ struct z_futex_data {
* @param futex Address of the futex.
* @param expected Expected value of the futex, if it is different the caller
* will not wait on it.
* @param timeout Non-negative waiting period on the futex, in milliseconds, or
* @param timeout Non-negative waiting period on the futex, or
* one of the special values K_NO_WAIT or K_FOREVER.
* @retval -EACCES Caller does not have read access to futex address.
* @retval -EAGAIN If the futex value did not match the expected parameter.
@ -2368,7 +2382,8 @@ struct z_futex_data { @@ -2368,7 +2382,8 @@ struct z_futex_data {
* should check the futex's value on wakeup to determine if it needs
* to block again.
*/
__syscall int k_futex_wait(struct k_futex *futex, int expected, s32_t timeout);
__syscall int k_futex_wait(struct k_futex *futex, int expected,
k_timeout_t timeout);
/**
* @brief Wake one/all threads pending on a futex
@ -2529,7 +2544,7 @@ struct k_fifo { @@ -2529,7 +2544,7 @@ struct k_fifo {
* @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT.
*
* @param fifo Address of the FIFO queue.
* @param timeout Waiting period to obtain a data item (in milliseconds),
* @param timeout Waiting period to obtain a data item,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @return Address of the data item if successful; NULL if returned
@ -2689,7 +2704,7 @@ struct k_lifo { @@ -2689,7 +2704,7 @@ struct k_lifo {
* @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT.
*
* @param lifo Address of the LIFO queue.
* @param timeout Waiting period to obtain a data item (in milliseconds),
* @param timeout Waiting period to obtain a data item,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @return Address of the data item if successful; NULL if returned
@ -2827,8 +2842,8 @@ __syscall int k_stack_push(struct k_stack *stack, stack_data_t data); @@ -2827,8 +2842,8 @@ __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
*
* @param stack Address of the stack.
* @param data Address of area to hold the value popped from the stack.
* @param timeout Non-negative waiting period to obtain a value (in
* milliseconds), or one of the special values K_NO_WAIT and
* @param timeout Waiting period to obtain a value,
* or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Element popped from stack.
@ -2837,7 +2852,7 @@ __syscall int k_stack_push(struct k_stack *stack, stack_data_t data); @@ -2837,7 +2852,7 @@ __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
* @req K-STACK-001
*/
__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
s32_t timeout);
k_timeout_t timeout);
/**
* @brief Statically define and initialize a stack
@ -3142,8 +3157,7 @@ extern void k_delayed_work_init(struct k_delayed_work *work, @@ -3142,8 +3157,7 @@ extern void k_delayed_work_init(struct k_delayed_work *work,
*
* @param work_q Address of workqueue.
* @param work Address of delayed work item.
* @param delay Non-negative delay before submitting the work item (in
* milliseconds).
* @param delay Delay before submitting the work item
*
* @retval 0 Work item countdown started.
* @retval -EINVAL Work item is being processed or has completed its work.
@ -3152,7 +3166,7 @@ extern void k_delayed_work_init(struct k_delayed_work *work, @@ -3152,7 +3166,7 @@ extern void k_delayed_work_init(struct k_delayed_work *work,
*/
extern int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
s32_t delay);
k_timeout_t delay);
/**
* @brief Cancel a delayed work item.
@ -3228,8 +3242,7 @@ static inline void k_work_submit(struct k_work *work) @@ -3228,8 +3242,7 @@ static inline void k_work_submit(struct k_work *work)
* @note Can be called by ISRs.
*
* @param work Address of delayed work item.
* @param delay Non-negative delay before submitting the work item (in
* milliseconds).
* @param delay Delay before submitting the work item
*
* @retval 0 Work item countdown started.
* @retval -EINVAL Work item is being processed or has completed its work.
@ -3237,7 +3250,7 @@ static inline void k_work_submit(struct k_work *work) @@ -3237,7 +3250,7 @@ static inline void k_work_submit(struct k_work *work)
* @req K-DWORK-001
*/
static inline int k_delayed_work_submit(struct k_delayed_work *work,
s32_t delay)
k_timeout_t delay)
{
return k_delayed_work_submit_to_queue(&k_sys_work_q, work, delay);
}
@ -3299,7 +3312,7 @@ extern void k_work_poll_init(struct k_work_poll *work, @@ -3299,7 +3312,7 @@ extern void k_work_poll_init(struct k_work_poll *work,
* @param work Address of delayed work item.
* @param events An array of pointers to events which trigger the work.
* @param num_events The number of events in the array.
* @param timeout Non-negative timeout after which the work will be scheduled
* @param timeout Timeout after which the work will be scheduled
* for execution even if not triggered.
*
*
@ -3311,7 +3324,7 @@ extern int k_work_poll_submit_to_queue(struct k_work_q *work_q, @@ -3311,7 +3324,7 @@ extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
s32_t timeout);
k_timeout_t timeout);
/**
* @brief Submit a triggered work item to the system workqueue.
@ -3337,7 +3350,7 @@ extern int k_work_poll_submit_to_queue(struct k_work_q *work_q, @@ -3337,7 +3350,7 @@ extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
* @param work Address of delayed work item.
* @param events An array of pointers to events which trigger the work.
* @param num_events The number of events in the array.
* @param timeout Non-negative timeout after which the work will be scheduled
* @param timeout Timeout after which the work will be scheduled
* for execution even if not triggered.
*
* @retval 0 Work item started watching for events.
@ -3347,7 +3360,7 @@ extern int k_work_poll_submit_to_queue(struct k_work_q *work_q, @@ -3347,7 +3360,7 @@ extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
static inline int k_work_poll_submit(struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
s32_t timeout)
k_timeout_t timeout)
{
return k_work_poll_submit_to_queue(&k_sys_work_q, work,
events, num_events, timeout);
@ -3455,8 +3468,8 @@ __syscall int k_mutex_init(struct k_mutex *mutex); @@ -3455,8 +3468,8 @@ __syscall int k_mutex_init(struct k_mutex *mutex);
* completes immediately and the lock count is increased by 1.
*
* @param mutex Address of the mutex.
* @param timeout Non-negative waiting period to lock the mutex (in
* milliseconds), or one of the special values K_NO_WAIT and
* @param timeout Waiting period to lock the mutex,
* or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Mutex locked.
@ -3464,7 +3477,7 @@ __syscall int k_mutex_init(struct k_mutex *mutex); @@ -3464,7 +3477,7 @@ __syscall int k_mutex_init(struct k_mutex *mutex);
* @retval -EAGAIN Waiting period timed out.
* @req K-MUTEX-002
*/
__syscall int k_mutex_lock(struct k_mutex *mutex, s32_t timeout);
__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
/**
* @brief Unlock a mutex.
@ -3550,16 +3563,15 @@ __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count, @@ -3550,16 +3563,15 @@ __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
* @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT.
*
* @param sem Address of the semaphore.
* @param timeout Non-negative waiting period to take the semaphore (in
* milliseconds), or one of the special values K_NO_WAIT and
* K_FOREVER.
* @param timeout Waiting period to take the semaphore,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Semaphore taken.
* @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
* @req K-SEM-001
*/
__syscall int k_sem_take(struct k_sem *sem, s32_t timeout);
__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
/**
* @brief Give a semaphore.
@ -3803,8 +3815,8 @@ int k_msgq_cleanup(struct k_msgq *msgq); @@ -3803,8 +3815,8 @@ int k_msgq_cleanup(struct k_msgq *msgq);
*
* @param msgq Address of the message queue.
* @param data Pointer to the message.
* @param timeout Non-negative waiting period to add the message (in
* milliseconds), or one of the special values K_NO_WAIT and
* @param timeout Non-negative waiting period to add the message,
* or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Message sent.
@ -3812,7 +3824,7 @@ int k_msgq_cleanup(struct k_msgq *msgq); @@ -3812,7 +3824,7 @@ int k_msgq_cleanup(struct k_msgq *msgq);
* @retval -EAGAIN Waiting period timed out.
* @req K-MSGQ-002
*/
__syscall int k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout);
__syscall int k_msgq_put(struct k_msgq *msgq, void *data, k_timeout_t timeout);
/**
* @brief Receive a message from a message queue.
@ -3824,8 +3836,8 @@ __syscall int k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout); @@ -3824,8 +3836,8 @@ __syscall int k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout);
*
* @param msgq Address of the message queue.
* @param data Address of area to hold the received message.
* @param timeout Non-negative waiting period to receive the message (in
* milliseconds), or one of the special values K_NO_WAIT and
* @param timeout Waiting period to receive the message,
* or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Message received.
@ -3833,7 +3845,7 @@ __syscall int k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout); @@ -3833,7 +3845,7 @@ __syscall int k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout);
* @retval -EAGAIN Waiting period timed out.
* @req K-MSGQ-002
*/
__syscall int k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout);
__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
/**
* @brief Peek/read a message from a message queue.
@ -4042,8 +4054,8 @@ extern void k_mbox_init(struct k_mbox *mbox); @@ -4042,8 +4054,8 @@ extern void k_mbox_init(struct k_mbox *mbox);
*
* @param mbox Address of the mailbox.
* @param tx_msg Address of the transmit message descriptor.
* @param timeout Non-negative waiting period for the message to be received (in
* milliseconds), or one of the special values K_NO_WAIT
* @param timeout Waiting period for the message to be received,
* or one of the special values K_NO_WAIT
* and K_FOREVER. Once the message has been received,
* this routine waits as long as necessary for the message
* to be completely processed.
@ -4054,7 +4066,7 @@ extern void k_mbox_init(struct k_mbox *mbox); @@ -4054,7 +4066,7 @@ extern void k_mbox_init(struct k_mbox *mbox);
* @req K-MBOX-002
*/
extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
s32_t timeout);
k_timeout_t timeout);
/**
* @brief Send a mailbox message in an asynchronous manner.
@ -4085,9 +4097,8 @@ extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -4085,9 +4097,8 @@ extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
* @param rx_msg Address of the receive message descriptor.
* @param buffer Address of the buffer to receive data, or NULL to defer data
* retrieval and message disposal until later.
* @param timeout Non-negative waiting period for a message to be received (in
* milliseconds), or one of the special values K_NO_WAIT
* and K_FOREVER.
* @param timeout Waiting period for a message to be received,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Message received.
* @retval -ENOMSG Returned without waiting.
@ -4095,7 +4106,7 @@ extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -4095,7 +4106,7 @@ extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
* @req K-MBOX-002
*/
extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
void *buffer, s32_t timeout);
void *buffer, k_timeout_t timeout);
/**
* @brief Retrieve mailbox message data into a buffer.
@ -4137,8 +4148,8 @@ extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer); @@ -4137,8 +4148,8 @@ extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
* @param rx_msg Address of a receive message descriptor.
* @param pool Address of memory pool, or NULL to discard data.
* @param block Address of the area to hold memory pool block info.
* @param timeout Non-negative waiting period to wait for a memory pool block
* (in milliseconds), or one of the special values K_NO_WAIT
* @param timeout Time to wait for a memory pool block,
* or one of the special values K_NO_WAIT
* and K_FOREVER.
*
* @retval 0 Data retrieved.
@ -4148,7 +4159,8 @@ extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer); @@ -4148,7 +4159,8 @@ extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
*/
extern int k_mbox_data_block_get(struct k_mbox_msg *rx_msg,
struct k_mem_pool *pool,
struct k_mem_block *block, s32_t timeout);
struct k_mem_block *block,
k_timeout_t timeout);
/** @} */
@ -4282,9 +4294,8 @@ __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size); @@ -4282,9 +4294,8 @@ __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
* @param bytes_to_write Size of data (in bytes).
* @param bytes_written Address of area to hold the number of bytes written.
* @param min_xfer Minimum number of bytes to write.
* @param timeout Non-negative waiting period to wait for the data to be written
* (in milliseconds), or one of the special values K_NO_WAIT
* and K_FOREVER.
* @param timeout Waiting period to wait for the data to be written,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 At least @a min_xfer bytes of data were written.
* @retval -EIO Returned without waiting; zero data bytes were written.
@ -4294,7 +4305,7 @@ __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size); @@ -4294,7 +4305,7 @@ __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
*/
__syscall int k_pipe_put(struct k_pipe *pipe, void *data,
size_t bytes_to_write, size_t *bytes_written,
size_t min_xfer, s32_t timeout);
size_t min_xfer, k_timeout_t timeout);
/**
* @brief Read data from a pipe.
@ -4306,9 +4317,8 @@ __syscall int k_pipe_put(struct k_pipe *pipe, void *data, @@ -4306,9 +4317,8 @@ __syscall int k_pipe_put(struct k_pipe *pipe, void *data,
* @param bytes_to_read Maximum number of data bytes to read.
* @param bytes_read Address of area to hold the number of bytes read.
* @param min_xfer Minimum number of data bytes to read.
* @param timeout Non-negative waiting period to wait for the data to be read
* (in milliseconds), or one of the special values K_NO_WAIT
* and K_FOREVER.
* @param timeout Waiting period to wait for the data to be read,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 At least @a min_xfer bytes of data were read.
* @retval -EINVAL invalid parameters supplied
@ -4319,7 +4329,7 @@ __syscall int k_pipe_put(struct k_pipe *pipe, void *data, @@ -4319,7 +4329,7 @@ __syscall int k_pipe_put(struct k_pipe *pipe, void *data,
*/
__syscall int k_pipe_get(struct k_pipe *pipe, void *data,
size_t bytes_to_read, size_t *bytes_read,
size_t min_xfer, s32_t timeout);
size_t min_xfer, k_timeout_t timeout);
/**
* @brief Write memory block to a pipe.
@ -4441,8 +4451,8 @@ extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, @@ -4441,8 +4451,8 @@ extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
*
* @param slab Address of the memory slab.
* @param mem Pointer to block address area.
* @param timeout Non-negative waiting period to wait for operation to complete
* (in milliseconds). Use K_NO_WAIT to return without waiting,
* @param timeout Non-negative waiting period to wait for operation to complete.
* Use K_NO_WAIT to return without waiting,
* or K_FOREVER to wait as long as necessary.
*
* @retval 0 Memory allocated. The block address area pointed at by @a mem
@ -4453,7 +4463,7 @@ extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, @@ -4453,7 +4463,7 @@ extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
* @req K-MSLAB-002
*/
extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
s32_t timeout);
k_timeout_t timeout);
/**
* @brief Free memory allocated from a memory slab.
@ -4565,8 +4575,8 @@ struct k_mem_pool { @@ -4565,8 +4575,8 @@ struct k_mem_pool {
* @param pool Address of the memory pool.
* @param block Pointer to block descriptor for the allocated memory.
* @param size Amount of memory to allocate (in bytes).
* @param timeout Non-negative waiting period to wait for operation to complete
* (in milliseconds). Use K_NO_WAIT to return without waiting,
* @param timeout Waiting period to wait for operation to complete.
* Use K_NO_WAIT to return without waiting,
* or K_FOREVER to wait as long as necessary.
*
* @retval 0 Memory allocated. The @a data field of the block descriptor
@ -4576,7 +4586,7 @@ struct k_mem_pool { @@ -4576,7 +4586,7 @@ struct k_mem_pool {
* @req K-MPOOL-002
*/
extern int k_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block,
size_t size, s32_t timeout);
size_t size, k_timeout_t timeout);
/**
* @brief Allocate memory from a memory pool with malloc() semantics
@ -4890,9 +4900,8 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type, @@ -4890,9 +4900,8 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type,
*
* @param events An array of pointers to events to be polled for.
* @param num_events The number of events in the array.
* @param timeout Non-negative waiting period for an event to be ready (in
* milliseconds), or one of the special values K_NO_WAIT and
* K_FOREVER.
* @param timeout Waiting period for an event to be ready,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 One or more events are ready.
* @retval -EAGAIN Waiting period timed out.
@ -4907,7 +4916,7 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type, @@ -4907,7 +4916,7 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type,
*/
__syscall int k_poll(struct k_poll_event *events, int num_events,
s32_t timeout);
k_timeout_t timeout);
/**
* @brief Initialize a poll signal object.

10
include/sys/mutex.h

@ -19,6 +19,7 @@ @@ -19,6 +19,7 @@
#ifdef CONFIG_USERSPACE
#include <sys/atomic.h>
#include <zephyr/types.h>
#include <sys_clock.h>
struct sys_mutex {
/* Currently unused, but will be used to store state for fast mutexes
@ -54,7 +55,8 @@ static inline void sys_mutex_init(struct sys_mutex *mutex) @@ -54,7 +55,8 @@ static inline void sys_mutex_init(struct sys_mutex *mutex)
*/
}
__syscall int z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout);
__syscall int z_sys_mutex_kernel_lock(struct sys_mutex *mutex,
k_timeout_t timeout);
__syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex);
@ -69,7 +71,7 @@ __syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex); @@ -69,7 +71,7 @@ __syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex);
* completes immediately and the lock count is increased by 1.
*
* @param mutex Address of the mutex, which may reside in user memory
* @param timeout Waiting period to lock the mutex (in milliseconds),
* @param timeout Waiting period to lock the mutex,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Mutex locked.
@ -78,7 +80,7 @@ __syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex); @@ -78,7 +80,7 @@ __syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex);
* @retval -EACCESS Caller has no access to provided mutex address
* @retval -EINVAL Provided mutex not recognized by the kernel
*/
static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
static inline int sys_mutex_lock(struct sys_mutex *mutex, k_timeout_t timeout)
{
/* For now, make the syscall unconditionally */
return z_sys_mutex_kernel_lock(mutex, timeout);
@ -126,7 +128,7 @@ static inline void sys_mutex_init(struct sys_mutex *mutex) @@ -126,7 +128,7 @@ static inline void sys_mutex_init(struct sys_mutex *mutex)
k_mutex_init(&mutex->kernel_mutex);
}
static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
static inline int sys_mutex_lock(struct sys_mutex *mutex, k_timeout_t timeout)
{
return k_mutex_lock(&mutex->kernel_mutex, timeout);
}

4
include/sys/sem.h

@ -110,7 +110,7 @@ int sys_sem_give(struct sys_sem *sem); @@ -110,7 +110,7 @@ int sys_sem_give(struct sys_sem *sem);
* This routine takes @a sem.
*
* @param sem Address of the sys_sem.
* @param timeout Waiting period to take the sys_sem (in milliseconds),
* @param timeout Waiting period to take the sys_sem,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 sys_sem taken.
@ -118,7 +118,7 @@ int sys_sem_give(struct sys_sem *sem); @@ -118,7 +118,7 @@ int sys_sem_give(struct sys_sem *sem);
* @retval -ETIMEDOUT Waiting period timed out.
* @retval -EACCES Caller does not have enough access.
*/
int sys_sem_take(struct sys_sem *sem, s32_t timeout);
int sys_sem_take(struct sys_sem *sem, k_timeout_t timeout);
/**
* @brief Get sys_sem's value

57
include/sys_clock.h

@ -28,6 +28,59 @@ @@ -28,6 +28,59 @@
extern "C" {
#endif
/**
* @addtogroup clock_apis
* @{
*/
typedef u32_t k_ticks_t;
#define K_TICKS_FOREVER ((k_ticks_t) -1)
#ifndef CONFIG_LEGACY_TIMEOUT_API
typedef struct {
k_ticks_t ticks;
} k_timeout_t;
/**
* @brief Compare timeouts for equality
*
* The k_timeout_t object is an opaque struct that should not be
* inspected by application code. This macro exists so that users can
* test timeout objects for equality with known constants
* (e.g. K_NO_WAIT and K_FOREVER) when implementing their own APIs in
* terms of Zephyr timeout constants.
*
* @return True if the timeout objects are identical
*/
#define K_TIMEOUT_EQ(a, b) ((a).ticks == (b).ticks)
#define Z_TIMEOUT_NO_WAIT ((k_timeout_t) {})
#define Z_TIMEOUT_TICKS(t) ((k_timeout_t) { .ticks = (t) })
#define Z_FOREVER Z_TIMEOUT_TICKS(K_TICKS_FOREVER)
#define Z_TIMEOUT_MS(t) Z_TIMEOUT_TICKS(k_ms_to_ticks_ceil32(MAX(t, 0)))
#define Z_TIMEOUT_US(t) Z_TIMEOUT_TICKS(k_us_to_ticks_ceil32(MAX(t, 0)))
#define Z_TIMEOUT_NS(t) Z_TIMEOUT_TICKS(k_ns_to_ticks_ceil32(MAX(t, 0)))
#define Z_TIMEOUT_CYC(t) Z_TIMEOUT_TICKS(k_cyc_to_ticks_ceil32(MAX(t, 0)))
#else
/* Legacy timeout API */
typedef s32_t k_timeout_t;
#define K_TIMEOUT_EQ(a, b) ((a) == (b))
#define Z_TIMEOUT_NO_WAIT 0
#define Z_TIMEOUT_TICKS(t) k_ticks_to_ms_ceil32(t)
#define Z_FOREVER K_TICKS_FOREVER
#define Z_TIMEOUT_MS(t) (t)
#define Z_TIMEOUT_US(t) ((t) * 1000)
#define Z_TIMEOUT_NS(t) ((t) * 1000000)
#define Z_TIMEOUT_CYC(t) k_cyc_to_ms_ceil32(MAX((t), 0))
#endif
/** @} */
#ifdef CONFIG_TICKLESS_KERNEL
extern int _sys_clock_always_on;
extern void z_enable_sys_clock(void);
@ -53,8 +106,6 @@ extern void z_enable_sys_clock(void); @@ -53,8 +106,6 @@ extern void z_enable_sys_clock(void);
/* number of nanoseconds per second */
#define NSEC_PER_SEC ((NSEC_PER_USEC) * (USEC_PER_MSEC) * (MSEC_PER_SEC))
#define k_msleep(ms) k_sleep(ms)
#define K_TIMEOUT_EQ(a, b) ((a) == (b))
/* kernel clocks */
@ -132,6 +183,8 @@ s64_t z_tick_get(void); @@ -132,6 +183,8 @@ s64_t z_tick_get(void);
#define z_tick_get_32() (0)
#endif
u64_t z_timeout_end_calc(k_timeout_t timeout);
/* timeouts */
struct _timeout;

14
include/timeout_q.h

@ -27,7 +27,8 @@ static inline void z_init_timeout(struct _timeout *t) @@ -27,7 +27,8 @@ static inline void z_init_timeout(struct _timeout *t)
sys_dnode_init(&t->node);
}
void z_add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks);
void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
k_timeout_t timeout);
int z_abort_timeout(struct _timeout *to);
@ -43,7 +44,7 @@ static inline void z_init_thread_timeout(struct _thread_base *thread_base) @@ -43,7 +44,7 @@ static inline void z_init_thread_timeout(struct _thread_base *thread_base)
extern void z_thread_timeout(struct _timeout *to);
static inline void z_add_thread_timeout(struct k_thread *th, s32_t ticks)
static inline void z_add_thread_timeout(struct k_thread *th, k_timeout_t ticks)
{
z_add_timeout(&th->base.timeout, z_thread_timeout, ticks);
}
@ -63,12 +64,17 @@ s32_t z_timeout_remaining(struct _timeout *timeout); @@ -63,12 +64,17 @@ s32_t z_timeout_remaining(struct _timeout *timeout);
/* Stubs when !CONFIG_SYS_CLOCK_EXISTS */
#define z_init_thread_timeout(t) do {} while (false)
#define z_add_thread_timeout(th, to) do {} while (false && to && (void *)th)
#define z_abort_thread_timeout(t) (0)
#define z_is_inactive_timeout(t) 0
#define z_get_next_timeout_expiry() (K_FOREVER)
#define z_get_next_timeout_expiry() (K_TICKS_FOREVER)
#define z_set_timeout_expiry(t, i) do {} while (false)
static inline void z_add_thread_timeout(struct k_thread *th, k_timeout_t ticks)
{
ARG_UNUSED(th);
ARG_UNUSED(ticks);
}
#endif
#ifdef __cplusplus

9
kernel/Kconfig

@ -570,6 +570,15 @@ config SYS_CLOCK_EXISTS @@ -570,6 +570,15 @@ config SYS_CLOCK_EXISTS
this is disabled. Obviously timeout-related APIs will not
work.
config LEGACY_TIMEOUT_API
bool "Support legacy k_timeout_t API"
help
The k_timeout_t API has changed to become an opaque type
that must be initialized with macros. Older applications
can choose this to continue using the old style of timeouts
(which were s32_t counts of milliseconds), at the cost of
not being able to use new features.
config XIP
bool "Execute in place"
help

5
kernel/futex.c

@ -62,7 +62,8 @@ static inline int z_vrfy_k_futex_wake(struct k_futex *futex, bool wake_all) @@ -62,7 +62,8 @@ static inline int z_vrfy_k_futex_wake(struct k_futex *futex, bool wake_all)
}
#include <syscalls/k_futex_wake_mrsh.c>
int z_impl_k_futex_wait(struct k_futex *futex, int expected, s32_t timeout)
int z_impl_k_futex_wait(struct k_futex *futex, int expected,
k_timeout_t timeout)
{
int ret;
k_spinlock_key_t key;
@ -90,7 +91,7 @@ int z_impl_k_futex_wait(struct k_futex *futex, int expected, s32_t timeout) @@ -90,7 +91,7 @@ int z_impl_k_futex_wait(struct k_futex *futex, int expected, s32_t timeout)
}
static inline int z_vrfy_k_futex_wait(struct k_futex *futex, int expected,
s32_t timeout)
k_timeout_t timeout)
{
if (Z_SYSCALL_MEMORY_WRITE(futex, sizeof(struct k_futex)) != 0) {
return -EACCES;

9
kernel/include/ksched.h

@ -42,9 +42,10 @@ void z_remove_thread_from_ready_q(struct k_thread *thread); @@ -42,9 +42,10 @@ void z_remove_thread_from_ready_q(struct k_thread *thread);
int z_is_thread_time_slicing(struct k_thread *thread);
void z_unpend_thread_no_timeout(struct k_thread *thread);
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, s32_t timeout);
int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout);
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
_wait_q_t *wait_q, k_timeout_t timeout);
int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, k_timeout_t timeout);
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
k_timeout_t timeout);
void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
void z_reschedule_irqlock(u32_t key);
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
@ -63,7 +64,7 @@ void z_sched_ipi(void); @@ -63,7 +64,7 @@ void z_sched_ipi(void);
void z_sched_start(struct k_thread *thread);
void z_ready_thread(struct k_thread *thread);
static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, k_timeout_t timeout)
{
(void) z_pend_curr_irqlock(arch_irq_lock(), wait_q, timeout);
}

13
kernel/mailbox.c

@ -233,7 +233,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg) @@ -233,7 +233,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
* @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
*/
static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
s32_t timeout)
k_timeout_t timeout)
{
struct k_thread *sending_thread;
struct k_thread *receiving_thread;
@ -286,7 +286,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -286,7 +286,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
}
/* didn't find a matching receiver: don't wait for one */
if (timeout == K_NO_WAIT) {
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&mbox->lock, key);
return -ENOMSG;
}
@ -304,7 +304,8 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -304,7 +304,8 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
return z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
}
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout)
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
k_timeout_t timeout)
{
/* configure things for a synchronous send, then send the message */
tx_msg->_syncing_thread = _current;
@ -351,7 +352,7 @@ void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer) @@ -351,7 +352,7 @@ void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
}
int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool,
struct k_mem_block *block, s32_t timeout)
struct k_mem_block *block, k_timeout_t timeout)
{
int result;
@ -416,7 +417,7 @@ static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer) @@ -416,7 +417,7 @@ static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
}
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
s32_t timeout)
k_timeout_t timeout)
{
struct k_thread *sending_thread;
struct k_mbox_msg *tx_msg;
@ -445,7 +446,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, @@ -445,7 +446,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
/* didn't find a matching sender */
if (timeout == K_NO_WAIT) {
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for a matching sender to appear */
k_spin_unlock(&mbox->lock, key);
return -ENOMSG;

4
kernel/mem_slab.c

@ -101,7 +101,7 @@ out: @@ -101,7 +101,7 @@ out:
return rc;
}
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout)
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
{
k_spinlock_key_t key = k_spin_lock(&lock);
int result;
@ -112,7 +112,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout) @@ -112,7 +112,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout)
slab->free_list = *(char **)(slab->free_list);
slab->num_used++;
result = 0;
} else if (timeout == K_NO_WAIT) {
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for a free block to become available */
*mem = NULL;
result = -ENOMEM;

20
kernel/mempool.c

@ -47,16 +47,14 @@ int init_static_pools(struct device *unused) @@ -47,16 +47,14 @@ int init_static_pools(struct device *unused)
SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
size_t size, s32_t timeout)
size_t size, k_timeout_t timeout)
{
int ret;
s64_t end = 0;
u64_t end = 0;
__ASSERT(!(arch_is_in_isr() && timeout != K_NO_WAIT), "");
__ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
if (timeout > 0) {
end = k_uptime_get() + timeout;
}
end = z_timeout_end_calc(timeout);
while (true) {
u32_t level_num, block_num;
@ -68,18 +66,20 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, @@ -68,18 +66,20 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
block->id.level = level_num;
block->id.block = block_num;
if (ret == 0 || timeout == K_NO_WAIT ||
if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
ret != -ENOMEM) {
return ret;
}
z_pend_curr_unlocked(&p->wait_q, timeout);
if (timeout != K_FOREVER) {
timeout = end - k_uptime_get();
if (timeout <= 0) {
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
s64_t remaining = end - z_tick_get();
if (remaining <= 0) {
break;
}
timeout = Z_TIMEOUT_TICKS(remaining);
}
}

18
kernel/msg_q.c

@ -113,9 +113,9 @@ int k_msgq_cleanup(struct k_msgq *msgq) @@ -113,9 +113,9 @@ int k_msgq_cleanup(struct k_msgq *msgq)
}
int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout)
int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, k_timeout_t timeout)
{
__ASSERT(!arch_is_in_isr() || timeout == K_NO_WAIT, "");
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
struct k_thread *pending_thread;
k_spinlock_key_t key;
@ -145,7 +145,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout) @@ -145,7 +145,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout)
msgq->used_msgs++;
}
result = 0;
} else if (timeout == K_NO_WAIT) {
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for message space to become available */
result = -ENOMSG;
} else {
@ -160,7 +160,8 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout) @@ -160,7 +160,8 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout)
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
static inline int z_vrfy_k_msgq_put(struct k_msgq *q, void *data,
k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
Z_OOPS(Z_SYSCALL_MEMORY_READ(data, q->msg_size));
@ -188,9 +189,9 @@ static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *q, @@ -188,9 +189,9 @@ static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *q,
#include <syscalls/k_msgq_get_attrs_mrsh.c>
#endif
int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout)
int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
{
__ASSERT(!arch_is_in_isr() || timeout == K_NO_WAIT, "");
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
k_spinlock_key_t key;
struct k_thread *pending_thread;
@ -226,7 +227,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout) @@ -226,7 +227,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout)
return 0;
}
result = 0;
} else if (timeout == K_NO_WAIT) {
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for a message to become available */
result = -ENOMSG;
} else {
@ -241,7 +242,8 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout) @@ -241,7 +242,8 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout)
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
static inline int z_vrfy_k_msgq_get(struct k_msgq *q, void *data,
k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, q->msg_size));

7
kernel/mutex.c

@ -116,7 +116,7 @@ static bool adjust_owner_prio(struct k_mutex *mutex, s32_t new_prio) @@ -116,7 +116,7 @@ static bool adjust_owner_prio(struct k_mutex *mutex, s32_t new_prio)
return false;
}
int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
{
int new_prio;
k_spinlock_key_t key;
@ -144,7 +144,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) @@ -144,7 +144,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
return 0;
}
if (unlikely(timeout == (s32_t)K_NO_WAIT)) {
if (unlikely(K_TIMEOUT_EQ(timeout, K_NO_WAIT))) {
k_spin_unlock(&lock, key);
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return -EBUSY;
@ -198,7 +198,8 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) @@ -198,7 +198,8 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex,
k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
return z_impl_k_mutex_lock(mutex, timeout);

20
kernel/pipes.c

@ -318,13 +318,13 @@ static bool pipe_xfer_prepare(sys_dlist_t *xfer_list, @@ -318,13 +318,13 @@ static bool pipe_xfer_prepare(sys_dlist_t *xfer_list,
size_t pipe_space,
size_t bytes_to_xfer,
size_t min_xfer,
s32_t timeout)
k_timeout_t timeout)
{
struct k_thread *thread;
struct k_pipe_desc *desc;
size_t num_bytes = 0;
if (timeout == K_NO_WAIT) {
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
_WAIT_Q_FOR_EACH(wait_q, thread) {
desc = (struct k_pipe_desc *)thread->base.swap_data;
@ -429,7 +429,7 @@ static void pipe_thread_ready(struct k_thread *thread) @@ -429,7 +429,7 @@ static void pipe_thread_ready(struct k_thread *thread)
int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
unsigned char *data, size_t bytes_to_write,
size_t *bytes_written, size_t min_xfer,
s32_t timeout)
k_timeout_t timeout)
{
struct k_thread *reader;
struct k_pipe_desc *desc;
@ -555,7 +555,7 @@ int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, @@ -555,7 +555,7 @@ int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
pipe_desc.buffer = data + num_bytes_written;
pipe_desc.bytes_to_xfer = bytes_to_write - num_bytes_written;
if (timeout != K_NO_WAIT) {
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
_current->base.swap_data = &pipe_desc;
/*
* Lock interrupts and unlock the scheduler before
@ -576,7 +576,7 @@ int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, @@ -576,7 +576,7 @@ int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
}
int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
size_t *bytes_read, size_t min_xfer, s32_t timeout)
size_t *bytes_read, size_t min_xfer, k_timeout_t timeout)
{
struct k_thread *writer;
struct k_pipe_desc *desc;
@ -701,7 +701,7 @@ int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, @@ -701,7 +701,7 @@ int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
pipe_desc.buffer = (u8_t *)data + num_bytes_read;
pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read;
if (timeout != K_NO_WAIT) {
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
_current->base.swap_data = &pipe_desc;
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
@ -720,7 +720,7 @@ int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, @@ -720,7 +720,7 @@ int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
#ifdef CONFIG_USERSPACE
int z_vrfy_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
size_t *bytes_read, size_t min_xfer, s32_t timeout)
size_t *bytes_read, size_t min_xfer, k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(bytes_read, sizeof(*bytes_read)));
@ -734,7 +734,8 @@ int z_vrfy_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, @@ -734,7 +734,8 @@ int z_vrfy_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
#endif
int z_impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
size_t *bytes_written, size_t min_xfer, s32_t timeout)
size_t *bytes_written, size_t min_xfer,
k_timeout_t timeout)
{
return z_pipe_put_internal(pipe, NULL, data,
bytes_to_write, bytes_written,
@ -743,7 +744,8 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write, @@ -743,7 +744,8 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
#ifdef CONFIG_USERSPACE
int z_vrfy_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
size_t *bytes_written, size_t min_xfer, s32_t timeout)
size_t *bytes_written, size_t min_xfer,
k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(bytes_written, sizeof(*bytes_written)));

21
kernel/poll.c

@ -244,7 +244,8 @@ static int k_poll_poller_cb(struct k_poll_event *event, u32_t state) @@ -244,7 +244,8 @@ static int k_poll_poller_cb(struct k_poll_event *event, u32_t state)
return 0;
}
int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
int z_impl_k_poll(struct k_poll_event *events, int num_events,
k_timeout_t timeout)
{
int events_registered;
k_spinlock_key_t key;
@ -257,7 +258,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) @@ -257,7 +258,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
__ASSERT(num_events >= 0, "<0 events\n");
events_registered = register_events(events, num_events, &poller,
(timeout == K_NO_WAIT));
K_TIMEOUT_EQ(timeout, K_NO_WAIT));
key = k_spin_lock(&lock);
@ -274,7 +275,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) @@ -274,7 +275,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
poller.is_polling = false;
if (timeout == K_NO_WAIT) {
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
return -EAGAIN;
}
@ -301,7 +302,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) @@ -301,7 +302,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_poll(struct k_poll_event *events,
int num_events, s32_t timeout)
int num_events, k_timeout_t timeout)
{
int ret;
k_spinlock_key_t key;
@ -582,7 +583,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q, @@ -582,7 +583,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q,
struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
s32_t timeout)
k_timeout_t timeout)
{
int events_registered;
k_spinlock_key_t key;
@ -626,7 +627,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q, @@ -626,7 +627,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q,
&work->poller, false);
key = k_spin_lock(&lock);
if (work->poller.is_polling && timeout != K_NO_WAIT) {
if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/*
* Poller is still polling.
* No event is ready and all are watched.
@ -634,11 +635,15 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q, @@ -634,11 +635,15 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q,
__ASSERT(num_events == events_registered,
"Some events were not registered!\n");
#ifdef CONFIG_LEGACY_TIMEOUT_API
timeout = k_ms_to_ticks_ceil32(timeout);
#endif
/* Setup timeout if such action is requested */
if (timeout != K_FOREVER) {
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
z_add_timeout(&work->timeout,
triggered_work_expiration_handler,
k_ms_to_ticks_ceil32(timeout));
timeout);
}
/* From now, any event will result in submitted work. */

40
kernel/queue.c

@ -293,45 +293,32 @@ int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list) @@ -293,45 +293,32 @@ int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
}
#if defined(CONFIG_POLL)
static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
static void *k_queue_poll(struct k_queue *queue, k_timeout_t timeout)
{
struct k_poll_event event;
int err, elapsed = 0, done = 0;
int err;
k_spinlock_key_t key;
void *val;
u32_t start;
k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
K_POLL_MODE_NOTIFY_ONLY, queue);
if (timeout != K_FOREVER) {
start = k_uptime_get_32();
}
do {
event.state = K_POLL_STATE_NOT_READY;
err = k_poll(&event, 1, timeout - elapsed);
if (err && err != -EAGAIN) {
return NULL;
}
event.state = K_POLL_STATE_NOT_READY;
err = k_poll(&event, 1, timeout);
key = k_spin_lock(&queue->lock);
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
k_spin_unlock(&queue->lock, key);
if (err && err != -EAGAIN) {
return NULL;
}
if ((val == NULL) && (timeout != K_FOREVER)) {
elapsed = k_uptime_get_32() - start;
done = elapsed > timeout;
}
} while (!val && !done);
key = k_spin_lock(&queue->lock);
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
k_spin_unlock(&queue->lock, key);
return val;
}
#endif /* CONFIG_POLL */
void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
{
k_spinlock_key_t key = k_spin_lock(&queue->lock);
void *data;
@ -345,7 +332,7 @@ void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout) @@ -345,7 +332,7 @@ void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
return data;
}
if (timeout == K_NO_WAIT) {
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&queue->lock, key);
return NULL;
}
@ -363,7 +350,8 @@ void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout) @@ -363,7 +350,8 @@ void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
}
#ifdef CONFIG_USERSPACE
static inline void *z_vrfy_k_queue_get(struct k_queue *queue, s32_t timeout)
static inline void *z_vrfy_k_queue_get(struct k_queue *queue,
k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
return z_impl_k_queue_get(queue, timeout);

70
kernel/sched.c

@ -582,31 +582,28 @@ static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) @@ -582,31 +582,28 @@ static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
}
}
static void add_thread_timeout_ms(struct k_thread *thread, s32_t timeout)
static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
{
if (timeout != K_FOREVER) {
s32_t ticks;
if (timeout < 0) {
timeout = 0;
}
ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout);
z_add_thread_timeout(thread, ticks);
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
#ifdef CONFIG_LEGACY_TIMEOUT_API
timeout = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout);
#endif
z_add_thread_timeout(thread, timeout);
}
}
static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
static void pend(struct k_thread *thread, _wait_q_t *wait_q,
k_timeout_t timeout)
{
LOCKED(&sched_spinlock) {
add_to_waitq_locked(thread, wait_q);
}
add_thread_timeout_ms(thread, timeout);
add_thread_timeout(thread, timeout);
}
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
k_timeout_t timeout)
{
__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
pend(thread, wait_q, timeout);
@ -651,7 +648,7 @@ void z_thread_timeout(struct _timeout *timeout) @@ -651,7 +648,7 @@ void z_thread_timeout(struct _timeout *timeout)
}
#endif
int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout)
int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
{
pend(_current, wait_q, timeout);
@ -671,7 +668,7 @@ int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout) @@ -671,7 +668,7 @@ int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout)
}
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, s32_t timeout)
_wait_q_t *wait_q, k_timeout_t timeout)
{
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current;
@ -1159,7 +1156,15 @@ static s32_t z_tick_sleep(s32_t ticks) @@ -1159,7 +1156,15 @@ static s32_t z_tick_sleep(s32_t ticks)
return 0;
}
k_timeout_t timeout;
#ifndef CONFIG_LEGACY_TIMEOUT_API
timeout = Z_TIMEOUT_TICKS(ticks);
#else
ticks += _TICK_ALIGN;
timeout = (k_ticks_t) ticks;
#endif
expected_wakeup_time = ticks + z_tick_get_32();
/* Spinlock purely for local interrupt locking to prevent us
@ -1173,7 +1178,7 @@ static s32_t z_tick_sleep(s32_t ticks) @@ -1173,7 +1178,7 @@ static s32_t z_tick_sleep(s32_t ticks)
pending_current = _current;
#endif
z_remove_thread_from_ready_q(_current);
z_add_thread_timeout(_current, ticks);
z_add_thread_timeout(_current, timeout);
z_mark_thread_as_suspended(_current);
(void)z_swap(&local_lock, key);
@ -1189,26 +1194,31 @@ static s32_t z_tick_sleep(s32_t ticks) @@ -1189,26 +1194,31 @@ static s32_t z_tick_sleep(s32_t ticks)
return 0;
}
s32_t z_impl_k_sleep(int ms)
s32_t z_impl_k_sleep(k_timeout_t timeout)
{
s32_t ticks;
k_ticks_t ticks;
__ASSERT(!arch_is_in_isr(), "");
if (ms == K_FOREVER) {
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
k_thread_suspend(_current);
return K_FOREVER;
return K_TICKS_FOREVER;
}
ticks = k_ms_to_ticks_ceil32(ms);
#ifdef CONFIG_LEGACY_TIMEOUT_API
ticks = k_ms_to_ticks_ceil32(timeout);
#else
ticks = timeout.ticks;
#endif
ticks = z_tick_sleep(ticks);
return k_ticks_to_ms_floor64(ticks);
}
#ifdef CONFIG_USERSPACE
static inline s32_t z_vrfy_k_sleep(int ms)
static inline s32_t z_vrfy_k_sleep(k_timeout_t timeout)
{
return z_impl_k_sleep(ms);
return z_impl_k_sleep(timeout);
}
#include <syscalls/k_sleep_mrsh.c>
#endif
@ -1407,12 +1417,13 @@ int k_thread_cpu_mask_disable(k_tid_t thread, int cpu) @@ -1407,12 +1417,13 @@ int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
#endif /* CONFIG_SCHED_CPU_MASK */
int z_impl_k_thread_join(struct k_thread *thread, s32_t timeout)
int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
{
k_spinlock_key_t key;
int ret;
__ASSERT(((arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
key = k_spin_lock(&sched_spinlock);
@ -1427,7 +1438,7 @@ int z_impl_k_thread_join(struct k_thread *thread, s32_t timeout) @@ -1427,7 +1438,7 @@ int z_impl_k_thread_join(struct k_thread *thread, s32_t timeout)
goto out;
}
if (timeout == K_NO_WAIT) {
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
ret = -EBUSY;
goto out;
}
@ -1436,7 +1447,7 @@ int z_impl_k_thread_join(struct k_thread *thread, s32_t timeout) @@ -1436,7 +1447,7 @@ int z_impl_k_thread_join(struct k_thread *thread, s32_t timeout)
pending_current = _current;
#endif
add_to_waitq_locked(_current, &thread->base.join_waiters);
add_thread_timeout_ms(_current, timeout);
add_thread_timeout(_current, timeout);
return z_swap(&sched_spinlock, key);
out:
@ -1472,7 +1483,8 @@ static bool thread_obj_validate(struct k_thread *thread) @@ -1472,7 +1483,8 @@ static bool thread_obj_validate(struct k_thread *thread)
CODE_UNREACHABLE;
}
static inline int z_vrfy_k_thread_join(struct k_thread *thread, s32_t timeout)
static inline int z_vrfy_k_thread_join(struct k_thread *thread,
k_timeout_t timeout)
{
if (thread_obj_validate(thread)) {
return 0;

9
kernel/sem.c

@ -133,11 +133,12 @@ static inline void z_vrfy_k_sem_give(struct k_sem *sem) @@ -133,11 +133,12 @@ static inline void z_vrfy_k_sem_give(struct k_sem *sem)
#include <syscalls/k_sem_give_mrsh.c>
#endif
int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout)
int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
{
int ret = 0;
__ASSERT(((arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
k_spinlock_key_t key = k_spin_lock(&lock);
@ -149,7 +150,7 @@ int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout) @@ -149,7 +150,7 @@ int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout)
goto out;
}
if (timeout == K_NO_WAIT) {
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
ret = -EBUSY;
goto out;
@ -163,7 +164,7 @@ out: @@ -163,7 +164,7 @@ out:
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_sem_take(struct k_sem *sem, s32_t timeout)
static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
return z_impl_k_sem_take((struct k_sem *)sem, timeout);

7
kernel/stack.c

@ -133,7 +133,8 @@ static inline int z_vrfy_k_stack_push(struct k_stack *stack, stack_data_t data) @@ -133,7 +133,8 @@ static inline int z_vrfy_k_stack_push(struct k_stack *stack, stack_data_t data)
#include <syscalls/k_stack_push_mrsh.c>
#endif
int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeout)
int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data,
k_timeout_t timeout)
{
k_spinlock_key_t key;
int result;
@ -147,7 +148,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeout) @@ -147,7 +148,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeout)
return 0;
}
if (timeout == K_NO_WAIT) {
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&stack->lock, key);
return -EBUSY;
}
@ -163,7 +164,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeout) @@ -163,7 +164,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeout)
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_stack_pop(struct k_stack *stack,
stack_data_t *data, s32_t timeout)
stack_data_t *data, k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(stack, K_OBJ_STACK));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, sizeof(stack_data_t)));

22
kernel/thread.c

@ -404,15 +404,17 @@ static inline void z_vrfy_k_thread_start(struct k_thread *thread) @@ -404,15 +404,17 @@ static inline void z_vrfy_k_thread_start(struct k_thread *thread)
#endif
#ifdef CONFIG_MULTITHREADING
static void schedule_new_thread(struct k_thread *thread, s32_t delay)
static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (delay == 0) {
if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
k_thread_start(thread);
} else {
s32_t ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
#ifdef CONFIG_LEGACY_TIMEOUT_API
delay = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
#endif
z_add_thread_timeout(thread, ticks);
z_add_thread_timeout(thread, delay);
}
#else
ARG_UNUSED(delay);
@ -612,7 +614,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread, @@ -612,7 +614,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, u32_t options, s32_t delay)
int prio, u32_t options, k_timeout_t delay)
{
__ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
@ -626,7 +628,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread, @@ -626,7 +628,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
prio, options, NULL);
if (delay != K_FOREVER) {
if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
schedule_new_thread(new_thread, delay);
}
@ -639,7 +641,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, @@ -639,7 +641,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, u32_t options, s32_t delay)
int prio, u32_t options, k_timeout_t delay)
{
size_t total_size, stack_obj_size;
struct z_object *stack_object;
@ -689,7 +691,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, @@ -689,7 +691,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
z_setup_new_thread(new_thread, stack, stack_size,
entry, p1, p2, p3, prio, options, NULL);
if (delay != K_FOREVER) {
if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
schedule_new_thread(new_thread, delay);
}
@ -747,9 +749,9 @@ void z_init_static_threads(void) @@ -747,9 +749,9 @@ void z_init_static_threads(void)
*/
k_sched_lock();
_FOREACH_STATIC_THREAD(thread_data) {
if (thread_data->init_delay != K_FOREVER) {
if (thread_data->init_delay != K_TICKS_FOREVER) {
schedule_new_thread(thread_data->init_thread,
thread_data->init_delay);
K_MSEC(thread_data->init_delay));
}
}
k_sched_unlock();

36
kernel/timeout.c

@ -24,7 +24,7 @@ static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list); @@ -24,7 +24,7 @@ static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
static struct k_spinlock timeout_lock;
#define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
? K_FOREVER : INT_MAX)
? K_TICKS_FOREVER : INT_MAX)
/* Cycles left to process in the currently-executing z_clock_announce() */
static int announce_remaining;
@ -83,8 +83,15 @@ static s32_t next_timeout(void) @@ -83,8 +83,15 @@ static s32_t next_timeout(void)
return ret;
}
void z_add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
k_timeout_t timeout)
{
#ifdef CONFIG_LEGACY_TIMEOUT_API
k_ticks_t ticks = timeout;
#else
k_ticks_t ticks = timeout.ticks + 1;
#endif
__ASSERT(!sys_dnode_is_linked(&to->node), "");
to->fn = fn;
ticks = MAX(1, ticks);
@ -150,7 +157,7 @@ s32_t z_timeout_remaining(struct _timeout *timeout) @@ -150,7 +157,7 @@ s32_t z_timeout_remaining(struct _timeout *timeout)
s32_t z_get_next_timeout_expiry(void)
{
s32_t ret = K_FOREVER;
s32_t ret = K_TICKS_FOREVER;
LOCKED(&timeout_lock) {
ret = next_timeout();
@ -162,7 +169,7 @@ void z_set_timeout_expiry(s32_t ticks, bool idle) @@ -162,7 +169,7 @@ void z_set_timeout_expiry(s32_t ticks, bool idle)
{
LOCKED(&timeout_lock) {
int next = next_timeout();
bool sooner = (next == K_FOREVER) || (ticks < next);
bool sooner = (next == K_TICKS_FOREVER) || (ticks < next);
bool imminent = next <= 1;
/* Only set new timeouts when they are sooner than
@ -248,3 +255,24 @@ static inline s64_t z_vrfy_k_uptime_get(void) @@ -248,3 +255,24 @@ static inline s64_t z_vrfy_k_uptime_get(void)
}
#include <syscalls/k_uptime_get_mrsh.c>
#endif
/* Returns the uptime expiration (relative to an unlocked "now"!) of a
* timeout object.
*/
u64_t z_timeout_end_calc(k_timeout_t timeout)
{
k_ticks_t dt;
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
return UINT64_MAX;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
return z_tick_get();
}
#ifdef CONFIG_LEGACY_TIMEOUT_API
dt = k_ms_to_ticks_ceil32(timeout);
#else
dt = timeout.ticks;
#endif
return z_tick_get() + MAX(1, dt);
}

43
kernel/timer.c

@ -52,7 +52,8 @@ void z_timer_expiration_handler(struct _timeout *t) @@ -52,7 +52,8 @@ void z_timer_expiration_handler(struct _timeout *t)
* if the timer is periodic, start it again; don't add _TICK_ALIGN
* since we're already aligned to a tick boundary
*/
if (timer->period > 0) {
if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timer->period, K_FOREVER)) {
z_add_timeout(&timer->timeout, z_timer_expiration_handler,
timer->period);
}
@ -105,29 +106,43 @@ void k_timer_init(struct k_timer *timer, @@ -105,29 +106,43 @@ void k_timer_init(struct k_timer *timer,
}
void z_impl_k_timer_start(struct k_timer *timer, s32_t duration, s32_t period)
void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
k_timeout_t period)
{
__ASSERT(duration >= 0 && period >= 0 &&
(duration != 0 || period != 0), "invalid parameters\n");
volatile s32_t period_in_ticks, duration_in_ticks;
period_in_ticks = k_ms_to_ticks_ceil32(period);
duration_in_ticks = k_ms_to_ticks_ceil32(duration);
#ifdef CONFIG_LEGACY_TIMEOUT_API
duration = k_ms_to_ticks_ceil32(duration);
period = k_ms_to_ticks_ceil32(period);
#else
/* z_add_timeout() always adds one to the incoming tick count
* to round up to the next tick (by convention it waits for
* "at least as long as the specified timeout"), but the
* period interval is always guaranteed to be reset from
* within the timer ISR, so no round up is desired. Subtract
* one.
*
* Note that the duration (!) value gets the same treatment
* for backwards compatibility. This is unfortunate
* (i.e. k_timer_start() doesn't treat its initial sleep
* argument the same way k_sleep() does), but historical. The
* timer_api test relies on this behavior.
*/
period.ticks = MAX(period.ticks - 1, 0);
duration.ticks = MAX(duration.ticks - 1, 0);
#endif
(void)z_abort_timeout(&timer->timeout);
timer->period = period_in_ticks;
timer->period = period;
timer->status = 0U;
z_add_timeout(&timer->timeout, z_timer_expiration_handler,
duration_in_ticks);
duration);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_timer_start(struct k_timer *timer,
s32_t duration, s32_t period)
k_timeout_t duration,
k_timeout_t period)
{
Z_OOPS(Z_SYSCALL_VERIFY(duration >= 0 && period >= 0 &&
(duration != 0 || period != 0)));
Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
z_impl_k_timer_start(timer, duration, period);
}

11
kernel/work_q.c

@ -82,7 +82,7 @@ static int work_cancel(struct k_delayed_work *work) @@ -82,7 +82,7 @@ static int work_cancel(struct k_delayed_work *work)
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
s32_t delay)
k_timeout_t delay)
{
k_spinlock_key_t key = k_spin_lock(&lock);
int err = 0;
@ -112,15 +112,18 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q, @@ -112,15 +112,18 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
/* Submit work directly if no delay. Note that this is a
* blocking operation, so release the lock first.
*/
if (delay == 0) {
if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
k_work_submit_to_queue(work_q, &work->work);
return 0;
}
#ifdef CONFIG_LEGACY_TIMEOUT_API
delay = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
#endif
/* Add timeout */
z_add_timeout(&work->timeout, work_timeout,
_TICK_ALIGN + k_ms_to_ticks_ceil32(delay));
z_add_timeout(&work->timeout, work_timeout, delay);
done:
k_spin_unlock(&lock, key);

1
lib/cmsis_rtos_v1/Kconfig

@ -5,6 +5,7 @@ config CMSIS_RTOS_V1 @@ -5,6 +5,7 @@ config CMSIS_RTOS_V1
bool "CMSIS RTOS v1 API"
depends on THREAD_CUSTOM_DATA
depends on POLL
select LEGACY_TIMEOUT_API
help
This enables CMSIS RTOS v1 API support. This is an OS-integration
layer which allows applications using CMSIS RTOS APIs to build on

1
lib/cmsis_rtos_v2/Kconfig

@ -9,6 +9,7 @@ config CMSIS_RTOS_V2 @@ -9,6 +9,7 @@ config CMSIS_RTOS_V2
depends on THREAD_MONITOR
depends on INIT_STACKS
depends on NUM_PREEMPT_PRIORITIES >= 56
select LEGACY_TIMEOUT_API
help
This enables CMSIS RTOS v2 API support. This is an OS-integration
layer which allows applications using CMSIS RTOS V2 APIs to build

4
lib/os/mutex.c

@ -30,7 +30,7 @@ static bool check_sys_mutex_addr(struct sys_mutex *addr) @@ -30,7 +30,7 @@ static bool check_sys_mutex_addr(struct sys_mutex *addr)
return Z_SYSCALL_MEMORY_WRITE(addr, sizeof(struct sys_mutex));
}
int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout)
int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, k_timeout_t timeout)
{
struct k_mutex *kernel_mutex = get_k_mutex(mutex);
@ -42,7 +42,7 @@ int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout) @@ -42,7 +42,7 @@ int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout)
}
static inline int z_vrfy_z_sys_mutex_kernel_lock(struct sys_mutex *mutex,
s32_t timeout)
k_timeout_t timeout)
{
if (check_sys_mutex_addr(mutex)) {
return -EACCES;

4
lib/os/sem.c

@ -79,7 +79,7 @@ int sys_sem_give(struct sys_sem *sem) @@ -79,7 +79,7 @@ int sys_sem_give(struct sys_sem *sem)
return ret;
}
int sys_sem_take(struct sys_sem *sem, s32_t timeout)
int sys_sem_take(struct sys_sem *sem, k_timeout_t timeout)
{
int ret = 0;
atomic_t old_value;
@ -120,7 +120,7 @@ int sys_sem_give(struct sys_sem *sem) @@ -120,7 +120,7 @@ int sys_sem_give(struct sys_sem *sem)
return 0;
}
int sys_sem_take(struct sys_sem *sem, s32_t timeout)
int sys_sem_take(struct sys_sem *sem, k_timeout_t timeout)
{
int ret_value = 0;

1
lib/posix/Kconfig

@ -12,6 +12,7 @@ config POSIX_MAX_FDS @@ -12,6 +12,7 @@ config POSIX_MAX_FDS
config POSIX_API
depends on !ARCH_POSIX
bool "POSIX APIs"
select LEGACY_TIMEOUT_API
help
Enable mostly-standards-compliant implementations of
various POSIX (IEEE 1003.1) APIs.

2
lib/posix/pthread_common.c

@ -24,7 +24,7 @@ s64_t timespec_to_timeoutms(const struct timespec *abstime) @@ -24,7 +24,7 @@ s64_t timespec_to_timeoutms(const struct timespec *abstime)
nsecs = abstime->tv_nsec - curtime.tv_nsec;
if (secs < 0 || (secs == 0 && nsecs < NSEC_PER_MSEC)) {
milli_secs = K_NO_WAIT;
milli_secs = 0;
} else {
milli_secs = secs * MSEC_PER_SEC + nsecs / NSEC_PER_MSEC;
}

8
samples/cpp_synchronization/src/main.cpp

@ -94,7 +94,7 @@ int cpp_semaphore::wait(void) @@ -94,7 +94,7 @@ int cpp_semaphore::wait(void)
*/
int cpp_semaphore::wait(int timeout)
{
return k_sem_take(&_sema_internal, timeout);
return k_sem_take(&_sema_internal, K_MSEC(timeout));
}
/**
@ -127,7 +127,7 @@ void coop_thread_entry(void) @@ -127,7 +127,7 @@ void coop_thread_entry(void)
printk("%s: Hello World!\n", __FUNCTION__);
/* wait a while, then let main thread have a turn */
k_timer_start(&timer, SLEEPTIME, 0);
k_timer_start(&timer, K_MSEC(SLEEPTIME), K_NO_WAIT);
k_timer_status_sync(&timer);
sem_main.give();
}
@ -139,7 +139,7 @@ void main(void) @@ -139,7 +139,7 @@ void main(void)
k_thread_create(&coop_thread, coop_stack, STACKSIZE,
(k_thread_entry_t) coop_thread_entry,
NULL, NULL, NULL, K_PRIO_COOP(7), 0, 0);
NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT);
k_timer_init(&timer, NULL, NULL);
while (1) {
@ -147,7 +147,7 @@ void main(void) @@ -147,7 +147,7 @@ void main(void)
printk("%s: Hello World!\n", __FUNCTION__);
/* wait a while, then let coop thread have a turn */
k_timer_start(&timer, SLEEPTIME, 0);
k_timer_start(&timer, K_MSEC(SLEEPTIME), K_NO_WAIT);
k_timer_status_sync(&timer);
sem_coop.give();

4
samples/scheduler/metairq_dispatch/src/msgdev.c

@ -77,7 +77,11 @@ static void timeout_reset(void) @@ -77,7 +77,11 @@ static void timeout_reset(void)
{
u32_t ticks = rand32() % MAX_EVENT_DELAY_TICKS;
#ifdef CONFIG_LEGACY_TIMEOUT_API
z_add_timeout(&timeout, dev_timer_expired, ticks);
#else
z_add_timeout(&timeout, dev_timer_expired, Z_TIMEOUT_TICKS(ticks));
#endif
}
void message_dev_init(void)

1
soc/arm/ti_simplelink/Kconfig

@ -4,6 +4,7 @@ @@ -4,6 +4,7 @@
config SOC_FAMILY_TISIMPLELINK
bool
select LEGACY_TIMEOUT_API
if SOC_FAMILY_TISIMPLELINK

1
subsys/console/Kconfig

@ -3,6 +3,7 @@ @@ -3,6 +3,7 @@
menuconfig CONSOLE_SUBSYS
bool "Console subsystem/support routines [EXPERIMENTAL]"
select LEGACY_TIMEOUT_API
help
Console subsystem and helper functions

1
subsys/net/Kconfig

@ -7,6 +7,7 @@ menu "Networking" @@ -7,6 +7,7 @@ menu "Networking"
config NET_BUF
bool "Network buffer support"
select LEGACY_TIMEOUT_API
help
This option enables support for generic network protocol
buffers.

4
subsys/power/policy/policy_residency.c

@ -49,7 +49,7 @@ enum power_states sys_pm_policy_next_state(s32_t ticks) @@ -49,7 +49,7 @@ enum power_states sys_pm_policy_next_state(s32_t ticks)
{
int i;
if ((ticks != K_FOREVER) && (ticks < pm_min_residency[0])) {
if ((ticks != K_TICKS_FOREVER) && (ticks < pm_min_residency[0])) {
LOG_DBG("Not enough time for PM operations: %d", ticks);
return SYS_POWER_STATE_ACTIVE;
}
@ -60,7 +60,7 @@ enum power_states sys_pm_policy_next_state(s32_t ticks) @@ -60,7 +60,7 @@ enum power_states sys_pm_policy_next_state(s32_t ticks)
continue;
}
#endif
if ((ticks == K_FOREVER) ||
if ((ticks == K_TICKS_FOREVER) ||
(ticks >= pm_min_residency[i])) {
LOG_DBG("Selected power state %d "
"(ticks: %d, min_residency: %u)",

2
tests/kernel/lifo/lifo_usage/src/main.c

@ -35,7 +35,7 @@ struct reply_packet { @@ -35,7 +35,7 @@ struct reply_packet {
struct timeout_order_data {
void *link_in_lifo;
struct k_lifo *klifo;
s32_t timeout;
k_ticks_t timeout;
s32_t timeout_order;
s32_t q_order;
};

5
tests/kernel/mbox/mbox_usage/src/main.c

@ -28,7 +28,7 @@ static enum mmsg_type { @@ -28,7 +28,7 @@ static enum mmsg_type {
TARGET_SOURCE
} info_type;
static void msg_sender(struct k_mbox *pmbox, s32_t timeout)
static void msg_sender(struct k_mbox *pmbox, k_timeout_t timeout)
{
struct k_mbox_msg mmsg;
@ -53,7 +53,8 @@ static void msg_sender(struct k_mbox *pmbox, s32_t timeout) @@ -53,7 +53,8 @@ static void msg_sender(struct k_mbox *pmbox, s32_t timeout)
}
}
static void msg_receiver(struct k_mbox *pmbox, k_tid_t thd_id, s32_t timeout)
static void msg_receiver(struct k_mbox *pmbox, k_tid_t thd_id,
k_timeout_t timeout)
{
struct k_mbox_msg mmsg;
char rxdata[MAIL_LEN];

1
tests/kernel/mem_protect/futex/prj.conf

@ -2,3 +2,4 @@ CONFIG_ZTEST=y @@ -2,3 +2,4 @@ CONFIG_ZTEST=y
CONFIG_IRQ_OFFLOAD=y
CONFIG_TEST_USERSPACE=y
CONFIG_MP_NUM_CPUS=1
CONFIG_LEGACY_TIMEOUT_API=y

3
tests/kernel/mem_protect/futex/src/main.c

@ -64,7 +64,8 @@ void futex_wait_task(void *p1, void *p2, void *p3) @@ -64,7 +64,8 @@ void futex_wait_task(void *p1, void *p2, void *p3)
s32_t ret_value;
int time_val = *(int *)p1;
zassert_true(time_val >= (int)K_FOREVER, "invalid timeout parameter");
zassert_true(time_val >= (int)K_TICKS_FOREVER,
"invalid timeout parameter");
ret_value = k_futex_wait(&simple_futex,
atomic_get(&simple_futex.val), time_val);

4
tests/kernel/pending/src/main.c

@ -116,7 +116,7 @@ static void sync_threads(struct k_work *work) @@ -116,7 +116,7 @@ static void sync_threads(struct k_work *work)
static void fifo_tests(s32_t timeout, volatile int *state,
void *(*get)(struct k_fifo *, s32_t),
int (*sem_take)(struct k_sem *, s32_t))
int (*sem_take)(struct k_sem *, k_timeout_t))
{
struct fifo_data *data;
@ -154,7 +154,7 @@ static void fifo_tests(s32_t timeout, volatile int *state, @@ -154,7 +154,7 @@ static void fifo_tests(s32_t timeout, volatile int *state,
static void lifo_tests(s32_t timeout, volatile int *state,
void *(*get)(struct k_lifo *, s32_t),
int (*sem_take)(struct k_sem *, s32_t))
int (*sem_take)(struct k_sem *, k_timeout_t))
{
struct lifo_data *data;

6
tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c

@ -42,7 +42,7 @@ K_SEM_DEFINE(end_sema, 0, 1); @@ -42,7 +42,7 @@ K_SEM_DEFINE(end_sema, 0, 1);
#endif
K_MEM_POOL_DEFINE(test_pool, SZ, SZ, 4, 4);
static void tpipe_put(struct k_pipe *ppipe, s32_t timeout)
static void tpipe_put(struct k_pipe *ppipe, k_timeout_t timeout)
{
size_t to_wt, wt_byte = 0;
@ -57,7 +57,7 @@ static void tpipe_put(struct k_pipe *ppipe, s32_t timeout) @@ -57,7 +57,7 @@ static void tpipe_put(struct k_pipe *ppipe, s32_t timeout)
}
static void tpipe_block_put(struct k_pipe *ppipe, struct k_sem *sema,
s32_t timeout)
k_timeout_t timeout)
{
struct k_mem_block block;
@ -73,7 +73,7 @@ static void tpipe_block_put(struct k_pipe *ppipe, struct k_sem *sema, @@ -73,7 +73,7 @@ static void tpipe_block_put(struct k_pipe *ppipe, struct k_sem *sema,
}
}
static void tpipe_get(struct k_pipe *ppipe, s32_t timeout)
static void tpipe_get(struct k_pipe *ppipe, k_timeout_t timeout)
{
unsigned char rx_data[PIPE_LEN];
size_t to_rd, rd_byte = 0;

2
tests/kernel/sleep/src/main.c

@ -251,7 +251,7 @@ static void forever_thread_entry(void *p1, void *p2, void *p3) @@ -251,7 +251,7 @@ static void forever_thread_entry(void *p1, void *p2, void *p3)
s32_t ret;
ret = k_sleep(K_FOREVER);
zassert_equal(ret, K_FOREVER, "unexpected return value");
zassert_equal(ret, K_TICKS_FOREVER, "unexpected return value");
k_sem_give(&test_thread_sem);
}

2
tests/kernel/workq/work_queue/src/main.c

@ -480,7 +480,7 @@ static void test_triggered_init(void) @@ -480,7 +480,7 @@ static void test_triggered_init(void)
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
static void test_triggered_submit(s32_t timeout)
static void test_triggered_submit(k_timeout_t timeout)
{
int i;

Loading…
Cancel
Save