Browse Source

debug: Added CPU usage callback

- Added a CPU load callback with threshold
- Changed cpu_load to use k_timer instead of k_work

Signed-off-by: Kristoffer Rist Skøien <kristoffer.skoien@nordicsemi.no>
pull/92038/merge
Kristoffer Rist Skøien 3 months ago committed by Dan Kalowsky
parent
commit
ca2e98c4c9
  1. 20
      include/zephyr/debug/cpu_load.h
  2. 44
      subsys/debug/cpu_load.c
  3. 42
      tests/subsys/debug/cpu_load/src/main.c
  4. 4
      tests/subsys/debug/cpu_load/testcase.yaml

20
include/zephyr/debug/cpu_load.h

@ -8,6 +8,7 @@ @@ -8,6 +8,7 @@
#define ZEPHYR_INCLUDE_DEBUG_CPU_LOAD_H_
#include <stdbool.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
@ -48,6 +49,25 @@ int cpu_load_get(bool reset); @@ -48,6 +49,25 @@ int cpu_load_get(bool reset);
*/
void cpu_load_log_control(bool enable);
/* Optional callback for cpu_load_cb_reg
*
* This will be called from the k_timer expiry_fn used for periodic logging.
* CONFIG_CPU_LOAD_LOG_PERIODICALLY must be configured to a positive value.
* Time spent in this callback must be kept to a minimum.
*/
typedef void (*cpu_load_cb_t)(uint8_t percent);
/** @brief Optional registration of callback when load is greater or equal to the threshold.
*
* @param cb Pointer to the callback function. NULL will cancel the callback.
* @param threshold_percent Threshold [0...100]. CPU load equal or greater that this
* will trigger the callback.
*
* @retval 0 - Callback registered/cancelled.
* @retval -EINVAL if the threshold is invalid.
*/
int cpu_load_cb_reg(cpu_load_cb_t cb, uint8_t threshold_percent);
/**
* @}
*/

44
subsys/debug/cpu_load.c

@ -22,33 +22,49 @@ static uint32_t enter_ts; @@ -22,33 +22,49 @@ static uint32_t enter_ts;
static uint32_t cyc_start;
static uint32_t ticks_idle;
static struct k_work_delayable cpu_load_log;
static cpu_load_cb_t load_cb;
static uint8_t cpu_load_threshold_percent;
static void cpu_load_log_fn(struct k_timer *dummy)
{
int load = cpu_load_get(true);
uint32_t percent = load / 10;
uint32_t fraction = load % 10;
LOG_INF("Load:%d.%03d%%", percent, fraction);
if (load_cb != NULL && percent >= cpu_load_threshold_percent) {
load_cb(percent);
}
}
K_TIMER_DEFINE(cpu_load_timer, cpu_load_log_fn, NULL);
void cpu_load_log_control(bool enable)
{
if (CONFIG_CPU_LOAD_LOG_PERIODICALLY == 0) {
return;
}
if (enable) {
(void)cpu_load_get(true);
k_work_schedule(&cpu_load_log, K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY));
k_timer_start(&cpu_load_timer, K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY),
K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY));
} else {
k_work_cancel_delayable(&cpu_load_log);
k_timer_stop(&cpu_load_timer);
}
}
#if CONFIG_CPU_LOAD_USE_COUNTER || CONFIG_CPU_LOAD_LOG_PERIODICALLY
static void cpu_load_log_fn(struct k_work *work)
int cpu_load_cb_reg(cpu_load_cb_t cb, uint8_t threshold_percent)
{
int load = cpu_load_get(true);
uint32_t percent = load / 10;
uint32_t fraction = load % 10;
if (threshold_percent > 100) {
return -EINVAL;
}
LOG_INF("Load:%d.%03d%%", percent, fraction);
cpu_load_log_control(true);
cpu_load_threshold_percent = threshold_percent;
load_cb = cb;
return 0;
}
#if CONFIG_CPU_LOAD_USE_COUNTER || CONFIG_CPU_LOAD_LOG_PERIODICALLY
static int cpu_load_init(void)
{
if (IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER)) {
@ -59,8 +75,8 @@ static int cpu_load_init(void) @@ -59,8 +75,8 @@ static int cpu_load_init(void)
}
if (CONFIG_CPU_LOAD_LOG_PERIODICALLY > 0) {
k_work_init_delayable(&cpu_load_log, cpu_load_log_fn);
return k_work_schedule(&cpu_load_log, K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY));
k_timer_start(&cpu_load_timer, K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY),
K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY));
}
return 0;

42
tests/subsys/debug/cpu_load/src/main.c

@ -94,6 +94,48 @@ ZTEST(cpu_load, test_periodic_report) @@ -94,6 +94,48 @@ ZTEST(cpu_load, test_periodic_report)
cpu_load_log_control(false);
log_backend_disable(&dummy);
}
void low_load_cb(uint8_t percent)
{
/* Should never be called */
zassert_true(false, NULL);
}
static uint32_t num_load_callbacks;
static uint8_t last_cpu_load_percent;
void high_load_cb(uint8_t percent)
{
last_cpu_load_percent = percent;
num_load_callbacks++;
}
ZTEST(cpu_load, test_callback_load_low)
{
int ret = cpu_load_cb_reg(low_load_cb, 99);
zassert_equal(ret, 0);
k_msleep(CONFIG_CPU_LOAD_LOG_PERIODICALLY * 4);
zassert_equal(num_load_callbacks, 0);
}
ZTEST(cpu_load, test_callback_load_high)
{
int ret = cpu_load_cb_reg(high_load_cb, 99);
zassert_equal(ret, 0);
k_busy_wait(CONFIG_CPU_LOAD_LOG_PERIODICALLY * 4 * 1000);
zassert_between_inclusive(last_cpu_load_percent, 99, 100);
zassert_between_inclusive(num_load_callbacks, 2, 7);
/* Reset the callback */
ret = cpu_load_cb_reg(NULL, 99);
num_load_callbacks = 0;
zassert_equal(ret, 0);
k_busy_wait(CONFIG_CPU_LOAD_LOG_PERIODICALLY * 4 * 1000);
zassert_equal(num_load_callbacks, 0);
}
#endif /* CONFIG_CPU_LOAD_LOG_PERIODICALLY > 0 */
ZTEST_SUITE(cpu_load, NULL, NULL, NULL, NULL, NULL);

4
tests/subsys/debug/cpu_load/testcase.yaml

@ -8,17 +8,21 @@ tests: @@ -8,17 +8,21 @@ tests:
debug.cpu_load:
integration_platforms:
- mps2/an385
- qemu_cortex_m3
debug.cpu_load.counter:
platform_allow:
- nrf52840dk/nrf52840
- nrf54h20dk/nrf54h20/cpuapp
- qemu_cortex_m3
integration_platforms:
- nrf52840dk/nrf52840
- qemu_cortex_m3
extra_configs:
- CONFIG_CPU_LOAD_USE_COUNTER=y
debug.cpu_load.periodic_report:
integration_platforms:
- mps2/an385
- qemu_cortex_m3
extra_configs:
- CONFIG_CPU_LOAD_LOG_PERIODICALLY=50
- CONFIG_LOG=y

Loading…
Cancel
Save