Browse Source

clock: remove z_ from semi-public APIs

The clock/timer APIs are not application facing APIs, however, similar
to arch_ and a few other APIs they are available to implement drivers
and add support for new hardware and are documented and available to be
used outside of the clock/kernel subsystems.

Remove the leading z_ and provide them as clock_* APIs for someone
writing a new timer driver to use.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
pull/33517/head
Anas Nashif 4 years ago
parent
commit
9c1efe6b4b
  1. 8
      boards/arm/qemu_cortex_m0/nrf_timer_timer.c
  2. 20
      doc/reference/kernel/timing/clocks.rst
  3. 4
      drivers/timer/Kconfig
  4. 4
      drivers/timer/altera_avalon_timer_hal.c
  5. 16
      drivers/timer/apic_timer.c
  6. 16
      drivers/timer/arcv2_timer0.c
  7. 8
      drivers/timer/arm_arch_timer.c
  8. 8
      drivers/timer/cavs_timer.c
  9. 10
      drivers/timer/cc13x2_cc26x2_rtc_timer.c
  10. 20
      drivers/timer/cortex_m_systick.c
  11. 10
      drivers/timer/hpet.c
  12. 8
      drivers/timer/ite_it8xxx2_timer.c
  13. 18
      drivers/timer/legacy_api.h
  14. 6
      drivers/timer/leon_gptimer.c
  15. 6
      drivers/timer/litex_timer.c
  16. 18
      drivers/timer/mchp_xec_rtos_timer.c
  17. 16
      drivers/timer/native_posix_timer.c
  18. 14
      drivers/timer/npcx_itim_timer.c
  19. 8
      drivers/timer/nrf_rtc_timer.c
  20. 8
      drivers/timer/riscv_machine_timer.c
  21. 6
      drivers/timer/rv32m1_lptmr_timer.c
  22. 14
      drivers/timer/sam0_rtc_timer.c
  23. 10
      drivers/timer/stm32_lptim_timer.c
  24. 8
      drivers/timer/sys_clock_init.c
  25. 8
      drivers/timer/xlnx_psttc_timer.c
  26. 8
      drivers/timer/xtensa_sys_timer.c
  27. 33
      include/drivers/timer/system_timer.h
  28. 2
      kernel/idle.c
  29. 2
      kernel/sched.c
  30. 16
      kernel/timeout.c
  31. 2
      subsys/shell/modules/kernel_service.c
  32. 2
      tests/kernel/timer/starve/README.txt
  33. 2
      tests/kernel/timer/timer_error_case/src/main.c

8
boards/arm/qemu_cortex_m0/nrf_timer_timer.c

@ -157,10 +157,10 @@ void timer0_nrf_isr(void *arg) @@ -157,10 +157,10 @@ void timer0_nrf_isr(void *arg)
set_absolute_ticks(last_count + CYC_PER_TICK);
}
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : (dticks > 0));
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : (dticks > 0));
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -185,7 +185,7 @@ int z_clock_driver_init(const struct device *device) @@ -185,7 +185,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
uint32_t cyc;
@ -234,7 +234,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -234,7 +234,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
NVIC_ClearPendingIRQ(TIMER0_IRQn);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

20
doc/reference/kernel/timing/clocks.rst

@ -155,7 +155,7 @@ Kernel timing at the tick level is driven by a timer driver with a @@ -155,7 +155,7 @@ Kernel timing at the tick level is driven by a timer driver with a
comparatively simple API.
* The driver is expected to be able to "announce" new ticks to the
kernel via the ``z_clock_announce()`` call, which passes an integer
kernel via the ``sys_clock_announce()`` call, which passes an integer
number of ticks that have elapsed since the last announce call (or
system boot). These calls can occur at any time, but the driver is
expected to attempt to ensure (to the extent practical given
@ -164,7 +164,7 @@ comparatively simple API. @@ -164,7 +164,7 @@ comparatively simple API.
be correct over time and subject to minimal skew vs. other counters
and real world time.
* The driver is expected to provide a ``z_clock_set_timeout()`` call
* The driver is expected to provide a ``sys_clock_set_timeout()`` call
to the kernel which indicates how many ticks may elapse before the
kernel must receive an announce call to trigger registered timeouts.
It is legal to announce new ticks before that moment (though they
@ -175,10 +175,10 @@ comparatively simple API. @@ -175,10 +175,10 @@ comparatively simple API.
implementations of this function are subject to bugs where the
fractional tick gets "reset" incorrectly and causes clock skew.
* The driver is expected to provide a ``z_clock_elapsed()`` call which
* The driver is expected to provide a ``sys_clock_elapsed()`` call which
provides a current indication of how many ticks have elapsed (as
compared to a real world clock) since the last call to
``z_clock_announce()``, which the kernel needs to test newly
``sys_clock_announce()``, which the kernel needs to test newly
arriving timeouts for expiration.
Note that a natural implementation of this API results in a "tickless"
@ -191,10 +191,10 @@ counter driver can be trivially implemented also: @@ -191,10 +191,10 @@ counter driver can be trivially implemented also:
the OS tick rate, calling z_clock_anounce() with an argument of one
each time.
* The driver can ignore calls to ``z_clock_set_timeout()``, as every
* The driver can ignore calls to ``sys_clock_set_timeout()``, as every
tick will be announced regardless of timeout status.
* The driver can return zero for every call to ``z_clock_elapsed()``
* The driver can return zero for every call to ``sys_clock_elapsed()``
as no more than one tick can be detected as having elapsed (because
otherwise an interrupt would have been received).
@ -211,7 +211,7 @@ and minimal. But some notes are important to detail: @@ -211,7 +211,7 @@ and minimal. But some notes are important to detail:
have every timer interrupt handled on a single processor. Existing
SMP architectures implement symmetric timer drivers.
* The ``z_clock_announce()`` call is expected to be globally
* The ``sys_clock_announce()`` call is expected to be globally
synchronized at the driver level. The kernel does not do any
per-CPU tracking, and expects that if two timer interrupts fire near
simultaneously, that only one will provide the current tick count to
@ -225,10 +225,10 @@ and minimal. But some notes are important to detail: @@ -225,10 +225,10 @@ and minimal. But some notes are important to detail:
driver, not the kernel.
* The next timeout value passed back to the driver via
:c:func:`z_clock_set_timeout` is done identically for every CPU.
:c:func:`sys_clock_set_timeout` is done identically for every CPU.
So by default, every CPU will see simultaneous timer interrupts for
every event, even though by definition only one of them should see a
non-zero ticks argument to ``z_clock_announce()``. This is probably
non-zero ticks argument to ``sys_clock_announce()``. This is probably
a correct default for timing sensitive applications (because it
minimizes the chance that an errant ISR or interrupt lock will delay
a timeout), but may be a performance problem in some cases. The
@ -246,7 +246,7 @@ tracked independently on each CPU in an SMP context. @@ -246,7 +246,7 @@ tracked independently on each CPU in an SMP context.
Because there may be no other hardware available to drive timeslicing,
Zephyr multiplexes the existing timer driver. This means that the
value passed to :c:func:`z_clock_set_timeout` may be clamped to a
value passed to :c:func:`sys_clock_set_timeout` may be clamped to a
smaller value than the current next timeout when a time sliced thread
is currently scheduled.

4
drivers/timer/Kconfig

@ -346,9 +346,9 @@ config TICKLESS_CAPABLE @@ -346,9 +346,9 @@ config TICKLESS_CAPABLE
help
Timer drivers should select this flag if they are capable of
supporting tickless operation. That is, a call to
z_clock_set_timeout() with a number of ticks greater than
sys_clock_set_timeout() with a number of ticks greater than
one should be expected not to produce a call to
z_clock_announce() (really, not to produce an interrupt at
sys_clock_announce() (really, not to produce an interrupt at
all) until the specified expiration.
endmenu

4
drivers/timer/altera_avalon_timer_hal.c

@ -28,10 +28,10 @@ static void timer_irq_handler(const void *unused) @@ -28,10 +28,10 @@ static void timer_irq_handler(const void *unused)
/* Clear the interrupt */
alt_handle_irq((void *)TIMER_0_BASE, TIMER_0_IRQ);
z_clock_announce(_sys_idle_elapsed_ticks);
sys_clock_announce(_sys_idle_elapsed_ticks);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);

16
drivers/timer/apic_timer.c

@ -76,9 +76,9 @@ static uint32_t cached_icr = CYCLES_PER_TICK; @@ -76,9 +76,9 @@ static uint32_t cached_icr = CYCLES_PER_TICK;
#ifdef CONFIG_TICKLESS_KERNEL
static uint64_t last_announcement; /* last time we called z_clock_announce() */
static uint64_t last_announcement; /* last time we called sys_clock_announce() */
void z_clock_set_timeout(int32_t n, bool idle)
void sys_clock_set_timeout(int32_t n, bool idle)
{
ARG_UNUSED(idle);
@ -117,7 +117,7 @@ void z_clock_set_timeout(int32_t n, bool idle) @@ -117,7 +117,7 @@ void z_clock_set_timeout(int32_t n, bool idle)
k_spin_unlock(&lock, key);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
uint32_t ccr;
uint32_t ticks;
@ -143,7 +143,7 @@ static void isr(const void *arg) @@ -143,7 +143,7 @@ static void isr(const void *arg)
/*
* If we get here and the CCR isn't zero, then this interrupt is
* stale: it was queued while z_clock_set_timeout() was setting
* stale: it was queued while sys_clock_set_timeout() was setting
* a new counter. Just ignore it. See above for more info.
*/
@ -161,7 +161,7 @@ static void isr(const void *arg) @@ -161,7 +161,7 @@ static void isr(const void *arg)
ticks = (total_cycles - last_announcement) / CYCLES_PER_TICK;
last_announcement = total_cycles;
k_spin_unlock(&lock, key);
z_clock_announce(ticks);
sys_clock_announce(ticks);
}
#else
@ -175,10 +175,10 @@ static void isr(const void *arg) @@ -175,10 +175,10 @@ static void isr(const void *arg)
x86_write_loapic(LOAPIC_TIMER_ICR, cached_icr);
k_spin_unlock(&lock, key);
z_clock_announce(1);
sys_clock_announce(1);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0U;
}
@ -213,7 +213,7 @@ uint32_t z_timer_cycle_get_32(void) @@ -213,7 +213,7 @@ uint32_t z_timer_cycle_get_32(void)
#endif
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
uint32_t val;

16
drivers/timer/arcv2_timer0.c

@ -67,7 +67,7 @@ static uint32_t last_load; @@ -67,7 +67,7 @@ static uint32_t last_load;
/*
* This local variable holds the amount of timer cycles elapsed
* and it is updated in z_timer_int_handler and z_clock_set_timeout().
* and it is updated in z_timer_int_handler and sys_clock_set_timeout().
*
* Note:
* At an arbitrary point in time the "current" value of the
@ -166,7 +166,7 @@ static ALWAYS_INLINE void timer0_limit_register_set(uint32_t count) @@ -166,7 +166,7 @@ static ALWAYS_INLINE void timer0_limit_register_set(uint32_t count)
/* This internal function calculates the amount of HW cycles that have
* elapsed since the last time the absolute HW cycles counter has been
* updated. 'cycle_count' may be updated either by the ISR, or
* in z_clock_set_timeout().
* in sys_clock_set_timeout().
*
* Additionally, the function updates the 'overflow_cycles' counter, that
* holds the amount of elapsed HW cycles due to (possibly) multiple
@ -241,13 +241,13 @@ static void timer_int_handler(const void *unused) @@ -241,13 +241,13 @@ static void timer_int_handler(const void *unused)
k_spin_unlock(&lock, key);
z_clock_announce(dticks);
sys_clock_announce(dticks);
#else
/* timer_int_handler may be triggered by timer irq or
* software helper irq
*/
/* irq with higher priority may call z_clock_set_timeout
/* irq with higher priority may call sys_clock_set_timeout
* so need a lock here
*/
uint32_t key;
@ -262,7 +262,7 @@ static void timer_int_handler(const void *unused) @@ -262,7 +262,7 @@ static void timer_int_handler(const void *unused)
dticks = (cycle_count - announced_cycles) / CYC_PER_TICK;
announced_cycles += dticks * CYC_PER_TICK;
z_clock_announce(TICKLESS ? dticks : 1);
sys_clock_announce(TICKLESS ? dticks : 1);
#endif
}
@ -277,7 +277,7 @@ static void timer_int_handler(const void *unused) @@ -277,7 +277,7 @@ static void timer_int_handler(const void *unused)
*
* @return 0
*/
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -314,7 +314,7 @@ int z_clock_driver_init(const struct device *device) @@ -314,7 +314,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
/* If the kernel allows us to miss tick announcements in idle,
* then shut off the counter. (Note: we can assume if idle==true
@ -417,7 +417,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -417,7 +417,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!TICKLESS) {
return 0;

8
drivers/timer/arm_arch_timer.c

@ -43,10 +43,10 @@ static void arm_arch_timer_compare_isr(const void *arg) @@ -43,10 +43,10 @@ static void arm_arch_timer_compare_isr(const void *arg)
k_spin_unlock(&lock, key);
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? delta_ticks : 1);
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? delta_ticks : 1);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -61,7 +61,7 @@ int z_clock_driver_init(const struct device *device) @@ -61,7 +61,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
#if defined(CONFIG_TICKLESS_KERNEL)
@ -95,7 +95,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -95,7 +95,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

8
drivers/timer/cavs_timer.c

@ -118,10 +118,10 @@ static void compare_isr(const void *arg) @@ -118,10 +118,10 @@ static void compare_isr(const void *arg)
k_spin_unlock(&lock, key);
z_clock_announce(dticks);
sys_clock_announce(dticks);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
uint64_t curr = count();
@ -132,7 +132,7 @@ int z_clock_driver_init(const struct device *device) @@ -132,7 +132,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -164,7 +164,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -164,7 +164,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

10
drivers/timer/cc13x2_cc26x2_rtc_timer.c

@ -109,7 +109,7 @@ void rtc_isr(const void *arg) @@ -109,7 +109,7 @@ void rtc_isr(const void *arg)
rtc_last += ticks * RTC_COUNTS_PER_TICK;
k_spin_unlock(&lock, key);
z_clock_announce(ticks);
sys_clock_announce(ticks);
#else /* !CONFIG_TICKLESS_KERNEL */
@ -123,7 +123,7 @@ void rtc_isr(const void *arg) @@ -123,7 +123,7 @@ void rtc_isr(const void *arg)
rtc_last += RTC_COUNTS_PER_TICK;
z_clock_announce(1);
sys_clock_announce(1);
#endif /* CONFIG_TICKLESS_KERNEL */
}
@ -183,7 +183,7 @@ static void startDevice(void) @@ -183,7 +183,7 @@ static void startDevice(void)
irq_unlock(key);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -201,7 +201,7 @@ int z_clock_driver_init(const struct device *device) @@ -201,7 +201,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -230,7 +230,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -230,7 +230,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif /* CONFIG_TICKLESS_KERNEL */
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
uint32_t ret = (AONRTCCurrent64BitValueGet() - rtc_last) /
RTC_COUNTS_PER_TICK;

20
drivers/timer/cortex_m_systick.c

@ -35,7 +35,7 @@ static uint32_t last_load; @@ -35,7 +35,7 @@ static uint32_t last_load;
/*
* This local variable holds the amount of SysTick HW cycles elapsed
* and it is updated in z_clock_isr() and z_clock_set_timeout().
* and it is updated in z_clock_isr() and sys_clock_set_timeout().
*
* Note:
* At an arbitrary point in time the "current" value of the SysTick
@ -65,7 +65,7 @@ static volatile uint32_t overflow_cyc; @@ -65,7 +65,7 @@ static volatile uint32_t overflow_cyc;
/* This internal function calculates the amount of HW cycles that have
* elapsed since the last time the absolute HW cycles counter has been
* updated. 'cycle_count' may be updated either by the ISR, or when we
* re-program the SysTick.LOAD register, in z_clock_set_timeout().
* re-program the SysTick.LOAD register, in sys_clock_set_timeout().
*
* Additionally, the function updates the 'overflow_cyc' counter, that
* holds the amount of elapsed HW cycles due to (possibly) multiple
@ -129,11 +129,11 @@ void z_clock_isr(void *arg) @@ -129,11 +129,11 @@ void z_clock_isr(void *arg)
if (TICKLESS) {
/* In TICKLESS mode, the SysTick.LOAD is re-programmed
* in z_clock_set_timeout(), followed by resetting of
* in sys_clock_set_timeout(), followed by resetting of
* the counter (VAL = 0).
*
* If a timer wrap occurs right when we re-program LOAD,
* the ISR is triggered immediately after z_clock_set_timeout()
* the ISR is triggered immediately after sys_clock_set_timeout()
* returns; in that case we shall not increment the cycle_count
* because the value has been updated before LOAD re-program.
*
@ -142,14 +142,14 @@ void z_clock_isr(void *arg) @@ -142,14 +142,14 @@ void z_clock_isr(void *arg)
dticks = (cycle_count - announced_cycles) / CYC_PER_TICK;
announced_cycles += dticks * CYC_PER_TICK;
z_clock_announce(dticks);
sys_clock_announce(dticks);
} else {
z_clock_announce(1);
sys_clock_announce(1);
}
z_arm_int_exit();
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -164,7 +164,7 @@ int z_clock_driver_init(const struct device *device) @@ -164,7 +164,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
/* Fast CPUs and a 24 bit counter mean that even idle systems
* need to wake up multiple times per second. If the kernel
@ -225,7 +225,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -225,7 +225,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!TICKLESS) {
return 0;
@ -247,7 +247,7 @@ uint32_t z_timer_cycle_get_32(void) @@ -247,7 +247,7 @@ uint32_t z_timer_cycle_get_32(void)
return ret;
}
void z_clock_idle_exit(void)
void sys_clock_idle_exit(void)
{
if (last_load == TIMER_STOPPED) {
SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;

10
drivers/timer/hpet.c

@ -90,7 +90,7 @@ static void hpet_isr(const void *arg) @@ -90,7 +90,7 @@ static void hpet_isr(const void *arg)
}
k_spin_unlock(&lock, key);
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
}
static void set_timer0_irq(unsigned int irq)
@ -106,7 +106,7 @@ static void set_timer0_irq(unsigned int irq) @@ -106,7 +106,7 @@ static void set_timer0_irq(unsigned int irq)
TIMER0_CONF_REG = val;
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
extern int z_clock_hw_cycles_per_sec;
uint32_t hz;
@ -154,7 +154,7 @@ void smp_timer_init(void) @@ -154,7 +154,7 @@ void smp_timer_init(void)
*/
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -191,7 +191,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -191,7 +191,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;
@ -209,7 +209,7 @@ uint32_t z_timer_cycle_get_32(void) @@ -209,7 +209,7 @@ uint32_t z_timer_cycle_get_32(void)
return MAIN_COUNTER_REG;
}
void z_clock_idle_exit(void)
void sys_clock_idle_exit(void)
{
GENERAL_CONF_REG |= GCONF_ENABLE;
}

8
drivers/timer/ite_it8xxx2_timer.c

@ -201,10 +201,10 @@ static void timer_isr(const void *unused) @@ -201,10 +201,10 @@ static void timer_isr(const void *unused)
- accumulated_cycle_count) / CYC_PER_TICK;
accumulated_cycle_count += dticks * CYC_PER_TICK;
k_spin_unlock(&lock, key);
z_clock_announce(dticks);
sys_clock_announce(dticks);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
timer_init_combine(CTIMER_HW_TIMER_INDEX, TRUE);
timer_init(CTIMER_HW_TIMER_INDEX, ET_PSR_32K, TRUE, FALSE, 0);
@ -215,7 +215,7 @@ int z_clock_driver_init(const struct device *device) @@ -215,7 +215,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -227,7 +227,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -227,7 +227,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
k_spin_unlock(&lock, key);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

18
drivers/timer/legacy_api.h

@ -6,7 +6,7 @@ @@ -6,7 +6,7 @@
#ifndef ZEPHYR_LEGACY_SET_TIME_H__
#define ZEPHYR_LEGACY_SET_TIME_H__
/* Stub implementation of z_clock_set_timeout() and z_clock_elapsed()
/* Stub implementation of sys_clock_set_timeout() and sys_clock_elapsed()
* in terms of the original APIs. Used by older timer drivers.
* Should be replaced.
*
@ -16,7 +16,7 @@ @@ -16,7 +16,7 @@
#ifdef CONFIG_TICKLESS_IDLE
void z_timer_idle_enter(int32_t ticks);
void z_clock_idle_exit(void);
void clock_idle_exit(void);
#endif
#ifdef CONFIG_TICKLESS_KERNEL
@ -26,9 +26,9 @@ extern uint32_t z_get_remaining_program_time(void); @@ -26,9 +26,9 @@ extern uint32_t z_get_remaining_program_time(void);
extern uint32_t z_get_elapsed_program_time(void);
#endif
extern uint64_t z_clock_uptime(void);
extern uint64_t clock_uptime(void);
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
#if defined(CONFIG_TICKLESS_IDLE) && defined(CONFIG_TICKLESS_KERNEL)
if (idle) {
@ -46,10 +46,10 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -46,10 +46,10 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
*/
static uint32_t driver_uptime;
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
return (uint32_t)(z_clock_uptime() - driver_uptime);
return (uint32_t)(clock_uptime() - driver_uptime);
#else
return 0;
#endif
@ -58,16 +58,16 @@ uint32_t z_clock_elapsed(void) @@ -58,16 +58,16 @@ uint32_t z_clock_elapsed(void)
static void wrapped_announce(int32_t ticks)
{
driver_uptime += ticks;
z_clock_announce(ticks);
sys_clock_announce(ticks);
}
#define z_clock_announce(t) wrapped_announce(t)
#define sys_clock_announce(t) wrapped_announce(t)
#define _sys_clock_always_on (0)
static inline void z_tick_set(int64_t val)
{
/* noop with current kernel code, use z_clock_announce() */
/* noop with current kernel code, use sys_clock_announce() */
ARG_UNUSED(val);
}

6
drivers/timer/leon_gptimer.c

@ -78,10 +78,10 @@ static void timer_isr(const void *unused) @@ -78,10 +78,10 @@ static void timer_isr(const void *unused)
tmr->ctrl = GPTIMER_CTRL_IE | GPTIMER_CTRL_RS |
GPTIMER_CTRL_EN | gptimer_ctrl_clear_ip;
z_clock_announce(1);
sys_clock_announce(1);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0;
}
@ -101,7 +101,7 @@ static void init_downcounter(volatile struct gptimer_timer_regs *tmr) @@ -101,7 +101,7 @@ static void init_downcounter(volatile struct gptimer_timer_regs *tmr)
tmr->ctrl = GPTIMER_CTRL_LD | GPTIMER_CTRL_RS | GPTIMER_CTRL_EN;
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
const int timer_interrupt = get_timer_irq();
volatile struct gptimer_regs *regs = get_regs();

6
drivers/timer/litex_timer.c

@ -34,7 +34,7 @@ static void litex_timer_irq_handler(const void *device) @@ -34,7 +34,7 @@ static void litex_timer_irq_handler(const void *device)
int key = irq_lock();
sys_write8(TIMER_EV, TIMER_EV_PENDING_ADDR);
z_clock_announce(1);
sys_clock_announce(1);
irq_unlock(key);
}
@ -54,12 +54,12 @@ uint32_t z_timer_cycle_get_32(void) @@ -54,12 +54,12 @@ uint32_t z_timer_cycle_get_32(void)
}
/* tickless kernel is not supported */
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0;
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
IRQ_CONNECT(TIMER_IRQ, DT_INST_IRQ(0, priority),

18
drivers/timer/mchp_xec_rtos_timer.c

@ -112,7 +112,7 @@ static inline uint32_t timer_count(void) @@ -112,7 +112,7 @@ static inline uint32_t timer_count(void)
#ifdef CONFIG_TICKLESS_KERNEL
static uint32_t last_announcement; /* last time we called z_clock_announce() */
static uint32_t last_announcement; /* last time we called sys_clock_announce() */
/*
* Request a timeout n Zephyr ticks in the future from now.
@ -126,7 +126,7 @@ static uint32_t last_announcement; /* last time we called z_clock_announce() */ @@ -126,7 +126,7 @@ static uint32_t last_announcement; /* last time we called z_clock_announce() */
* Writing a new value to preload only takes effect once the count
* register reaches 0.
*/
void z_clock_set_timeout(int32_t n, bool idle)
void sys_clock_set_timeout(int32_t n, bool idle)
{
ARG_UNUSED(idle);
@ -185,10 +185,10 @@ void z_clock_set_timeout(int32_t n, bool idle) @@ -185,10 +185,10 @@ void z_clock_set_timeout(int32_t n, bool idle)
/*
* Return the number of Zephyr ticks elapsed from last call to
* z_clock_announce in the ISR. The caller casts uint32_t to int32_t.
* sys_clock_announce in the ISR. The caller casts uint32_t to int32_t.
* We must make sure bit[31] is 0 in the return value.
*/
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
uint32_t ccr;
uint32_t ticks;
@ -242,7 +242,7 @@ static void xec_rtos_timer_isr(const void *arg) @@ -242,7 +242,7 @@ static void xec_rtos_timer_isr(const void *arg)
last_announcement = total_cycles;
k_spin_unlock(&lock, key);
z_clock_announce(ticks);
sys_clock_announce(ticks);
}
#else
@ -266,10 +266,10 @@ static void xec_rtos_timer_isr(const void *arg) @@ -266,10 +266,10 @@ static void xec_rtos_timer_isr(const void *arg)
total_cycles = temp & TIMER_COUNT_MASK;
k_spin_unlock(&lock, key);
z_clock_announce(1);
sys_clock_announce(1);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0U;
}
@ -301,7 +301,7 @@ uint32_t z_timer_cycle_get_32(void) @@ -301,7 +301,7 @@ uint32_t z_timer_cycle_get_32(void)
return ret;
}
void z_clock_idle_exit(void)
void sys_clock_idle_exit(void)
{
if (cached_icr == TIMER_STOPPED) {
cached_icr = CYCLES_PER_TICK;
@ -314,7 +314,7 @@ void sys_clock_disable(void) @@ -314,7 +314,7 @@ void sys_clock_disable(void)
TIMER_REGS->CTRL = 0U;
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);

16
drivers/timer/native_posix_timer.c

@ -44,7 +44,7 @@ static void np_timer_isr(const void *arg) @@ -44,7 +44,7 @@ static void np_timer_isr(const void *arg)
int32_t elapsed_ticks = (now - last_tick_time)/tick_period;
last_tick_time += elapsed_ticks*tick_period;
z_clock_announce(elapsed_ticks);
sys_clock_announce(elapsed_ticks);
}
/**
@ -60,7 +60,7 @@ void np_timer_isr_test_hook(const void *arg) @@ -60,7 +60,7 @@ void np_timer_isr_test_hook(const void *arg)
*
* Enable the hw timer, setting its tick period, and setup its interrupt
*/
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -79,7 +79,7 @@ int z_clock_driver_init(const struct device *device) @@ -79,7 +79,7 @@ int z_clock_driver_init(const struct device *device)
* @brief Set system clock timeout
*
* Informs the system clock driver that the next needed call to
* z_clock_announce() will not be until the specified number of ticks
* sys_clock_announce() will not be until the specified number of ticks
* from the the current time have elapsed.
*
* See system_timer.h for more information
@ -88,7 +88,7 @@ int z_clock_driver_init(const struct device *device) @@ -88,7 +88,7 @@ int z_clock_driver_init(const struct device *device)
* @param idle Hint to the driver that the system is about to enter
* the idle state immediately after setting the timeout
*/
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -96,7 +96,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -96,7 +96,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
uint64_t silent_ticks;
/* Note that we treat INT_MAX literally as anyhow the maximum amount of
* ticks we can report with z_clock_announce() is INT_MAX
* ticks we can report with sys_clock_announce() is INT_MAX
*/
if (ticks == K_TICKS_FOREVER) {
silent_ticks = INT64_MAX;
@ -110,14 +110,14 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -110,14 +110,14 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
}
/**
* @brief Ticks elapsed since last z_clock_announce() call
* @brief Ticks elapsed since last sys_clock_announce() call
*
* Queries the clock driver for the current time elapsed since the
* last call to z_clock_announce() was made. The kernel will call
* last call to sys_clock_announce() was made. The kernel will call
* this with appropriate locking, the driver needs only provide an
* instantaneous answer.
*/
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return (hwm_get_time() - last_tick_time)/tick_period;
}

14
drivers/timer/npcx_itim_timer.c

@ -65,7 +65,7 @@ static struct itim32_reg *const evt_tmr = (struct itim32_reg *) @@ -65,7 +65,7 @@ static struct itim32_reg *const evt_tmr = (struct itim32_reg *)
static const struct npcx_clk_cfg itim_clk_cfg[] = NPCX_DT_CLK_CFG_ITEMS_LIST(0);
static struct k_spinlock lock;
/* Announced cycles in system timer before executing z_clock_announce() */
/* Announced cycles in system timer before executing sys_clock_announce() */
static uint64_t cyc_sys_announced;
/* Current target cycles of time-out signal in event timer */
static uint32_t cyc_evt_timeout;
@ -178,13 +178,13 @@ static void npcx_itim_evt_isr(const struct device *dev) @@ -178,13 +178,13 @@ static void npcx_itim_evt_isr(const struct device *dev)
k_spin_unlock(&lock, key);
/* Informs kernel that specified number of ticks have elapsed */
z_clock_announce(delta_ticks);
sys_clock_announce(delta_ticks);
} else {
/* Enable event timer for ticking and wait to it take effect */
npcx_itim_evt_enable();
/* Informs kernel that one tick has elapsed */
z_clock_announce(1);
sys_clock_announce(1);
}
}
@ -224,7 +224,7 @@ static uint32_t npcx_itim_evt_elapsed_cyc32(void) @@ -224,7 +224,7 @@ static uint32_t npcx_itim_evt_elapsed_cyc32(void)
#endif /* CONFIG_PM */
/* System timer api functions */
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -238,7 +238,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -238,7 +238,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
npcx_itim_start_evt_tmr_by_tick(ticks);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
/* Always return 0 for tickful kernel system */
@ -250,7 +250,7 @@ uint32_t z_clock_elapsed(void) @@ -250,7 +250,7 @@ uint32_t z_clock_elapsed(void)
k_spin_unlock(&lock, key);
/* Return how many ticks elapsed since last z_clock_announce() call */
/* Return how many ticks elapsed since last sys_clock_announce() call */
return (uint32_t)((current - cyc_sys_announced) / SYS_CYCLES_PER_TICK);
}
@ -265,7 +265,7 @@ uint32_t z_timer_cycle_get_32(void) @@ -265,7 +265,7 @@ uint32_t z_timer_cycle_get_32(void)
return (uint32_t)(current);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
int ret;

8
drivers/timer/nrf_rtc_timer.c

@ -242,7 +242,7 @@ static void sys_clock_timeout_handler(uint32_t chan, @@ -242,7 +242,7 @@ static void sys_clock_timeout_handler(uint32_t chan,
sys_clock_timeout_handler, NULL);
}
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
dticks : (dticks > 0));
}
@ -299,7 +299,7 @@ void z_nrf_rtc_timer_chan_free(uint32_t chan) @@ -299,7 +299,7 @@ void z_nrf_rtc_timer_chan_free(uint32_t chan)
atomic_or(&alloc_mask, BIT(chan));
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
static const enum nrf_lfclk_start_mode mode =
@ -339,7 +339,7 @@ int z_clock_driver_init(const struct device *device) @@ -339,7 +339,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
uint32_t cyc;
@ -380,7 +380,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -380,7 +380,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
compare_set(0, cyc, sys_clock_timeout_handler, NULL);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

8
drivers/timer/riscv_machine_timer.c

@ -76,10 +76,10 @@ static void timer_isr(const void *arg) @@ -76,10 +76,10 @@ static void timer_isr(const void *arg)
}
k_spin_unlock(&lock, key);
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -90,7 +90,7 @@ int z_clock_driver_init(const struct device *device) @@ -90,7 +90,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -130,7 +130,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -130,7 +130,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

6
drivers/timer/rv32m1_lptmr_timer.c

@ -50,10 +50,10 @@ static void lptmr_irq_handler(const struct device *unused) @@ -50,10 +50,10 @@ static void lptmr_irq_handler(const struct device *unused)
SYSTEM_TIMER_INSTANCE->CSR |= LPTMR_CSR_TCF(1); /* Rearm timer. */
cycle_count += CYCLES_PER_TICK; /* Track cycles. */
z_clock_announce(1); /* Poke the scheduler. */
sys_clock_announce(1); /* Poke the scheduler. */
}
int z_clock_driver_init(const struct device *unused)
int sys_clock_driver_init(const struct device *unused)
{
uint32_t csr, psr, sircdiv; /* LPTMR registers */
@ -139,7 +139,7 @@ uint32_t z_timer_cycle_get_32(void) @@ -139,7 +139,7 @@ uint32_t z_timer_cycle_get_32(void)
/*
* Since we're not tickless, this is identically zero.
*/
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0;
}

14
drivers/timer/sam0_rtc_timer.c

@ -155,7 +155,7 @@ static void rtc_isr(const void *arg) @@ -155,7 +155,7 @@ static void rtc_isr(const void *arg)
if (count != rtc_last) {
uint32_t ticks = (count - rtc_last) / CYCLES_PER_TICK;
z_clock_announce(ticks);
sys_clock_announce(ticks);
rtc_last += ticks * CYCLES_PER_TICK;
}
@ -164,18 +164,18 @@ static void rtc_isr(const void *arg) @@ -164,18 +164,18 @@ static void rtc_isr(const void *arg)
if (status) {
/* RTC just ticked one more tick... */
if (++rtc_counter == rtc_timeout) {
z_clock_announce(rtc_counter - rtc_last);
sys_clock_announce(rtc_counter - rtc_last);
rtc_last = rtc_counter;
}
} else {
/* ISR was invoked directly from z_clock_set_timeout. */
z_clock_announce(0);
/* ISR was invoked directly from sys_clock_set_timeout. */
sys_clock_announce(0);
}
#endif /* CONFIG_TICKLESS_KERNEL */
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -252,7 +252,7 @@ int z_clock_driver_init(const struct device *device) @@ -252,7 +252,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -301,7 +301,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -301,7 +301,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif /* CONFIG_TICKLESS_KERNEL */
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
return (rtc_count() - rtc_last) / CYCLES_PER_TICK;

10
drivers/timer/stm32_lptim_timer.c

@ -55,7 +55,7 @@ static void lptim_irq_handler(const struct device *unused) @@ -55,7 +55,7 @@ static void lptim_irq_handler(const struct device *unused)
k_spinlock_key_t key = k_spin_lock(&lock);
/* do not change ARR yet, z_clock_announce will do */
/* do not change ARR yet, sys_clock_announce will do */
LL_LPTIM_ClearFLAG_ARRM(LPTIM1);
/* increase the total nb of autoreload count
@ -73,12 +73,12 @@ static void lptim_irq_handler(const struct device *unused) @@ -73,12 +73,12 @@ static void lptim_irq_handler(const struct device *unused)
* CONFIG_SYS_CLOCK_TICKS_PER_SEC)
/ LPTIM_CLOCK;
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL)
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL)
? dticks : (dticks > 0));
}
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -188,7 +188,7 @@ static inline uint32_t z_clock_lptim_getcounter(void) @@ -188,7 +188,7 @@ static inline uint32_t z_clock_lptim_getcounter(void)
return lp_time;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
/* new LPTIM1 AutoReload value to set (aligned on Kernel ticks) */
uint32_t next_arr = 0;
@ -268,7 +268,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -268,7 +268,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
k_spin_unlock(&lock, key);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

8
drivers/timer/sys_clock_init.c

@ -23,7 +23,7 @@ void __weak z_clock_isr(void *arg) @@ -23,7 +23,7 @@ void __weak z_clock_isr(void *arg)
__ASSERT_NO_MSG(false);
}
int __weak z_clock_driver_init(const struct device *device)
int __weak sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -37,11 +37,11 @@ int __weak z_clock_device_ctrl(const struct device *device, @@ -37,11 +37,11 @@ int __weak z_clock_device_ctrl(const struct device *device,
return -ENOTSUP;
}
void __weak z_clock_set_timeout(int32_t ticks, bool idle)
void __weak sys_clock_set_timeout(int32_t ticks, bool idle)
{
}
void __weak z_clock_idle_exit(void)
void __weak sys_clock_idle_exit(void)
{
}
@ -49,5 +49,5 @@ void __weak sys_clock_disable(void) @@ -49,5 +49,5 @@ void __weak sys_clock_disable(void)
{
}
SYS_DEVICE_DEFINE("sys_clock", z_clock_driver_init, z_clock_device_ctrl,
SYS_DEVICE_DEFINE("sys_clock", sys_clock_driver_init, z_clock_device_ctrl,
PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);

8
drivers/timer/xlnx_psttc_timer.c

@ -93,10 +93,10 @@ static void ttc_isr(const void *arg) @@ -93,10 +93,10 @@ static void ttc_isr(const void *arg)
#endif
/* Announce to the kernel*/
z_clock_announce(ticks);
sys_clock_announce(ticks);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
uint32_t reg_val;
@ -152,7 +152,7 @@ int z_clock_driver_init(const struct device *device) @@ -152,7 +152,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
#ifdef CONFIG_TICKLESS_KERNEL
uint32_t cycles;
@ -173,7 +173,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -173,7 +173,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
uint32_t cycles;

8
drivers/timer/xtensa_sys_timer.c

@ -53,10 +53,10 @@ static void ccompare_isr(const void *arg) @@ -53,10 +53,10 @@ static void ccompare_isr(const void *arg)
}
k_spin_unlock(&lock, key);
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -66,7 +66,7 @@ int z_clock_driver_init(const struct device *device) @@ -66,7 +66,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -97,7 +97,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle) @@ -97,7 +97,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

33
include/drivers/timer/system_timer.h

@ -31,7 +31,7 @@ extern "C" { @@ -31,7 +31,7 @@ extern "C" {
* initialization callback. It is a weak symbol that will be
* implemented as a noop if undefined in the clock driver.
*/
extern int z_clock_driver_init(const struct device *device);
extern int sys_clock_driver_init(const struct device *device);
/**
* @brief Initialize system clock driver
@ -41,7 +41,7 @@ extern int z_clock_driver_init(const struct device *device); @@ -41,7 +41,7 @@ extern int z_clock_driver_init(const struct device *device);
* management. It is a weak symbol that will be implemented as a noop
* if undefined in the clock driver.
*/
extern int z_clock_device_ctrl(const struct device *device,
extern int clock_device_ctrl(const struct device *device,
uint32_t ctrl_command,
void *context, device_pm_cb cb, void *arg);
@ -49,9 +49,9 @@ extern int z_clock_device_ctrl(const struct device *device, @@ -49,9 +49,9 @@ extern int z_clock_device_ctrl(const struct device *device,
* @brief Set system clock timeout
*
* Informs the system clock driver that the next needed call to
* z_clock_announce() will not be until the specified number of ticks
* sys_clock_announce() will not be until the specified number of ticks
* from the the current time have elapsed. Note that spurious calls
* to z_clock_announce() are allowed (i.e. it's legal to announce
* to sys_clock_announce() are allowed (i.e. it's legal to announce
* every tick and implement this function as a noop), the requirement
* is that one tick announcement should occur within one tick BEFORE
* the specified expiration (that is, passing ticks==1 means "announce
@ -71,13 +71,13 @@ extern int z_clock_device_ctrl(const struct device *device, @@ -71,13 +71,13 @@ extern int z_clock_device_ctrl(const struct device *device,
* have a specific event to await. The intent here is that the driver
* will schedule any needed timeout as far into the future as
* possible. For the specific case of INT_MAX, the next call to
* z_clock_announce() may occur at any point in the future, not just
* sys_clock_announce() may occur at any point in the future, not just
* at INT_MAX ticks. But the correspondence between the announced
* ticks and real-world time must be correct.
*
* A final note about SMP: note that the call to z_clock_set_timeout()
* A final note about SMP: note that the call to sys_clock_set_timeout()
* is made on any CPU, and reflects the next timeout desired globally.
* The resulting calls(s) to z_clock_announce() must be properly
* The resulting calls(s) to sys_clock_announce() must be properly
* serialized by the driver such that a given tick is announced
* exactly once across the system. The kernel does not (cannot,
* really) attempt to serialize things by "assigning" timeouts to
@ -87,7 +87,7 @@ extern int z_clock_device_ctrl(const struct device *device, @@ -87,7 +87,7 @@ extern int z_clock_device_ctrl(const struct device *device,
* @param idle Hint to the driver that the system is about to enter
* the idle state immediately after setting the timeout
*/
extern void z_clock_set_timeout(int32_t ticks, bool idle);
extern void sys_clock_set_timeout(int32_t ticks, bool idle);
/**
* @brief Timer idle exit notification
@ -97,34 +97,37 @@ extern void z_clock_set_timeout(int32_t ticks, bool idle); @@ -97,34 +97,37 @@ extern void z_clock_set_timeout(int32_t ticks, bool idle);
* operation and compute elapsed ticks.
*
* @note Legacy timer drivers also use this opportunity to call back
* into z_clock_announce() to notify the kernel of expired ticks.
* into sys_clock_announce() to notify the kernel of expired ticks.
* This is allowed for compatibility, but not recommended. The kernel
* will figure that out on its own.
*/
extern void z_clock_idle_exit(void);
extern void sys_clock_idle_exit(void);
/**
* @brief Announce time progress to the kernel
*
* Informs the kernel that the specified number of ticks have elapsed
* since the last call to z_clock_announce() (or system startup for
* since the last call to sys_clock_announce() (or system startup for
* the first call). The timer driver is expected to delivery these
* announcements as close as practical (subject to hardware and
* latency limitations) to tick boundaries.
*
* @param ticks Elapsed time, in ticks
*/
extern void z_clock_announce(int32_t ticks);
extern void sys_clock_announce(int32_t ticks);
/**
* @brief Ticks elapsed since last z_clock_announce() call
* @brief Ticks elapsed since last sys_clock_announce() call
*
* Queries the clock driver for the current time elapsed since the
* last call to z_clock_announce() was made. The kernel will call
* last call to sys_clock_announce() was made. The kernel will call
* this with appropriate locking, the driver needs only provide an
* instantaneous answer.
*/
extern uint32_t z_clock_elapsed(void);
extern uint32_t sys_clock_elapsed(void);
/**
* @}
*/
#ifdef __cplusplus
}

2
kernel/idle.c

@ -59,7 +59,7 @@ void z_pm_save_idle_exit(int32_t ticks) @@ -59,7 +59,7 @@ void z_pm_save_idle_exit(int32_t ticks)
*/
pm_system_resume();
#endif /* CONFIG_PM */
z_clock_idle_exit();
sys_clock_idle_exit();
}
void idle(void *unused1, void *unused2, void *unused3)

2
kernel/sched.c

@ -351,7 +351,7 @@ void z_reset_time_slice(void) @@ -351,7 +351,7 @@ void z_reset_time_slice(void)
* FUTURE z_time_slice() call.
*/
if (slice_time != 0) {
_current_cpu->slice_ticks = slice_time + z_clock_elapsed();
_current_cpu->slice_ticks = slice_time + sys_clock_elapsed();
z_set_timeout_expiry(slice_time, false);
}
}

16
kernel/timeout.c

@ -21,7 +21,7 @@ static struct k_spinlock timeout_lock; @@ -21,7 +21,7 @@ static struct k_spinlock timeout_lock;
#define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
? K_TICKS_FOREVER : INT_MAX)
/* Cycles left to process in the currently-executing z_clock_announce() */
/* Cycles left to process in the currently-executing sys_clock_announce() */
static int announce_remaining;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
@ -61,7 +61,7 @@ static void remove_timeout(struct _timeout *t) @@ -61,7 +61,7 @@ static void remove_timeout(struct _timeout *t)
static int32_t elapsed(void)
{
return announce_remaining == 0 ? z_clock_elapsed() : 0U;
return announce_remaining == 0 ? sys_clock_elapsed() : 0U;
}
static int32_t next_timeout(void)
@ -131,10 +131,10 @@ void z_add_timeout(struct _timeout *to, _timeout_func_t fn, @@ -131,10 +131,10 @@ void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
if (next_time == 0 ||
_current_cpu->slice_ticks != next_time) {
z_clock_set_timeout(next_time, false);
sys_clock_set_timeout(next_time, false);
}
#else
z_clock_set_timeout(next_timeout(), false);
sys_clock_set_timeout(next_timeout(), false);
#endif /* CONFIG_TIMESLICING */
}
}
@ -224,12 +224,12 @@ void z_set_timeout_expiry(int32_t ticks, bool is_idle) @@ -224,12 +224,12 @@ void z_set_timeout_expiry(int32_t ticks, bool is_idle)
* in.
*/
if (!imminent && (sooner || IS_ENABLED(CONFIG_SMP))) {
z_clock_set_timeout(MIN(ticks, next_to), is_idle);
sys_clock_set_timeout(MIN(ticks, next_to), is_idle);
}
}
}
void z_clock_announce(int32_t ticks)
void sys_clock_announce(int32_t ticks)
{
#ifdef CONFIG_TIMESLICING
z_time_slice(ticks);
@ -260,7 +260,7 @@ void z_clock_announce(int32_t ticks) @@ -260,7 +260,7 @@ void z_clock_announce(int32_t ticks)
curr_tick += announce_remaining;
announce_remaining = 0;
z_clock_set_timeout(next_timeout(), false);
sys_clock_set_timeout(next_timeout(), false);
k_spin_unlock(&timeout_lock, key);
}
@ -270,7 +270,7 @@ int64_t z_tick_get(void) @@ -270,7 +270,7 @@ int64_t z_tick_get(void)
uint64_t t = 0U;
LOCKED(&timeout_lock) {
t = curr_tick + z_clock_elapsed();
t = curr_tick + sys_clock_elapsed();
}
return t;
}

2
subsys/shell/modules/kernel_service.c

@ -139,7 +139,7 @@ static int cmd_kernel_threads(const struct shell *shell, @@ -139,7 +139,7 @@ static int cmd_kernel_threads(const struct shell *shell,
ARG_UNUSED(argc);
ARG_UNUSED(argv);
shell_print(shell, "Scheduler: %u since last call", z_clock_elapsed());
shell_print(shell, "Scheduler: %u since last call", sys_clock_elapsed());
shell_print(shell, "Threads:");
k_thread_foreach(shell_tdata_dump, (void *)shell);
return 0;

2
tests/kernel/timer/starve/README.txt

@ -6,7 +6,7 @@ timeout is repeatedly rescheduled before it has a chance to fire. In @@ -6,7 +6,7 @@ timeout is repeatedly rescheduled before it has a chance to fire. In
some implementations this may prevent the timer interrupt handler from
ever being invoked, which in turn prevents an announcement of ticks.
Lack of tick announcement propagates into a monotonic increase in the
value returned by z_clock_elapsed().
value returned by sys_clock_elapsed().
This test is not run in automatic test suites because it generally takes
minutes, hours, or days to fail, depending on the hardware clock rate

2
tests/kernel/timer/timer_error_case/src/main.c

@ -336,7 +336,7 @@ void test_timer_add_timeout(void) @@ -336,7 +336,7 @@ void test_timer_add_timeout(void)
}
extern uint64_t z_timeout_end_calc(k_timeout_t timeout);
extern void z_clock_announce(int32_t ticks);
extern void sys_clock_announce(int32_t ticks);
void test_timeout_end_calc(void)
{
int ret;

Loading…
Cancel
Save