Browse Source

tracing: trace mutex/semaphore using dedicated calls

Instead of using generic trace calls, use dedicated functions for
tracing sempahores and mutexes.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
pull/27741/head
Anas Nashif 5 years ago committed by Carles Cufí
parent
commit
390537bf68
  1. 38
      include/tracing/tracing.h
  2. 6
      kernel/mutex.c
  3. 11
      kernel/sem.c
  4. 30
      samples/subsys/tracing/src/tracing_string_format_test.c
  5. 42
      subsys/tracing/ctf/ctf_top.c
  6. 56
      subsys/tracing/ctf/ctf_top.h
  7. 6
      subsys/tracing/ctf/tracing_ctf.h
  8. 48
      subsys/tracing/ctf/tsdl/metadata
  9. 6
      subsys/tracing/include/tracing_cpu_stats.h
  10. 7
      subsys/tracing/include/tracing_test.h
  11. 38
      subsys/tracing/sysview/sysview.c
  12. 6
      subsys/tracing/sysview/tracing_sysview.h

38
include/tracing/tracing.h

@ -50,16 +50,19 @@ @@ -50,16 +50,19 @@
/**
* @brief Called when setting priority of a thread
* @param thread Thread structure
*/
#define sys_trace_thread_priority_set(thread)
/**
* @brief Called when a thread is being created
* @param thread Thread structure
*/
#define sys_trace_thread_create(thread)
/**
* @brief Called when a thread is being aborted
* @param thread Thread structure
*
*/
#define sys_trace_thread_abort(thread)
@ -132,6 +135,41 @@ @@ -132,6 +135,41 @@
*/
#define sys_trace_idle()
/**
* @brief Trace initialisation of a Semaphore
* @param sem Semaphore object
*/
#define sys_trace_semaphore_init(sem)
/**
* @brief Trace taking a Semaphore
* @param sem Semaphore object
*/
#define sys_trace_semaphore_take(sem)
/**
* @brief Trace giving a Semaphore
* @param sem Semaphore object
*/
#define sys_trace_semaphore_give(sem)
/**
* @brief Trace initialisation of a Mutex
* @param mutex Mutex object
*/
#define sys_trace_mutex_init(mutex)
/**
* @brief Trace locking a Mutex
* @param mutex Mutex object
*/
#define sys_trace_mutex_lock(mutex)
/**
* @brief Trace unlocking a Mutex
* @param mutex Mutex object
*/
#define sys_trace_mutex_unlock(mutex)
/**
* @}
*/

6
kernel/mutex.c

@ -73,7 +73,7 @@ int z_impl_k_mutex_init(struct k_mutex *mutex) @@ -73,7 +73,7 @@ int z_impl_k_mutex_init(struct k_mutex *mutex)
mutex->owner = NULL;
mutex->lock_count = 0U;
sys_trace_void(SYS_TRACE_ID_MUTEX_INIT);
sys_trace_mutex_init(mutex);
z_waitq_init(&mutex->wait_q);
@ -124,7 +124,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) @@ -124,7 +124,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
__ASSERT(!arch_is_in_isr(), "mutexes cannot be used inside ISRs");
sys_trace_void(SYS_TRACE_ID_MUTEX_LOCK);
sys_trace_mutex_lock(mutex);
key = k_spin_lock(&lock);
if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) {
@ -233,7 +233,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex) @@ -233,7 +233,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex)
*/
__ASSERT_NO_MSG(mutex->lock_count > 0U);
sys_trace_void(SYS_TRACE_ID_MUTEX_UNLOCK);
sys_trace_mutex_unlock(mutex);
z_sched_lock();
LOG_DBG("mutex %p lock_count: %d", mutex, mutex->lock_count);

11
kernel/sem.c

@ -69,9 +69,9 @@ int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count, @@ -69,9 +69,9 @@ int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
return -EINVAL;
}
sys_trace_void(SYS_TRACE_ID_SEMA_INIT);
sem->count = initial_count;
sem->limit = limit;
sys_trace_semaphore_init(sem);
z_waitq_init(&sem->wait_q);
#if defined(CONFIG_POLL)
sys_dlist_init(&sem->poll_events);
@ -107,9 +107,10 @@ static inline void handle_poll_events(struct k_sem *sem) @@ -107,9 +107,10 @@ static inline void handle_poll_events(struct k_sem *sem)
void z_impl_k_sem_give(struct k_sem *sem)
{
k_spinlock_key_t key = k_spin_lock(&lock);
struct k_thread *thread = z_unpend_first_thread(&sem->wait_q);
struct k_thread *thread;
sys_trace_void(SYS_TRACE_ID_SEMA_GIVE);
sys_trace_semaphore_give(sem);
thread = z_unpend_first_thread(&sem->wait_q);
if (thread != NULL) {
arch_thread_return_value_set(thread, 0);
@ -119,8 +120,8 @@ void z_impl_k_sem_give(struct k_sem *sem) @@ -119,8 +120,8 @@ void z_impl_k_sem_give(struct k_sem *sem)
handle_poll_events(sem);
}
sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE);
z_reschedule(&lock, key);
sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE);
}
#ifdef CONFIG_USERSPACE
@ -139,8 +140,8 @@ int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout) @@ -139,8 +140,8 @@ int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
k_spinlock_key_t key = k_spin_lock(&lock);
sys_trace_semaphore_take(sem);
if (likely(sem->count > 0U)) {
sem->count--;

30
samples/subsys/tracing/src/tracing_string_format_test.c

@ -92,3 +92,33 @@ void sys_trace_end_call(unsigned int id) @@ -92,3 +92,33 @@ void sys_trace_end_call(unsigned int id)
{
TRACING_STRING("%s %d\n", __func__, __LINE__);
}
void sys_trace_semaphore_init(struct k_sem *sem)
{
TRACING_STRING("%s %d\n", __func__, __LINE__);
}
void sys_trace_semaphore_take(struct k_sem *sem)
{
TRACING_STRING("%s %d\n", __func__, __LINE__);
}
void sys_trace_semaphore_give(struct k_sem *sem)
{
TRACING_STRING("%s %d\n", __func__, __LINE__);
}
void sys_trace_mutex_init(struct k_mutex *mutex)
{
TRACING_STRING("%s %d\n", __func__, __LINE__);
}
void sys_trace_mutex_lock(struct k_mutex *mutex)
{
TRACING_STRING("%s %d\n", __func__, __LINE__);
}
void sys_trace_mutex_unlock(struct k_mutex *mutex)
{
TRACING_STRING("%s %d\n", __func__, __LINE__);
}

42
subsys/tracing/ctf/ctf_top.c

@ -168,6 +168,48 @@ void sys_trace_void(unsigned int id) @@ -168,6 +168,48 @@ void sys_trace_void(unsigned int id)
ctf_top_void(id);
}
void sys_trace_semaphore_init(struct k_sem *sem)
{
ctf_top_semaphore_init(
(uint32_t)(uintptr_t)sem
);
}
void sys_trace_semaphore_take(struct k_sem *sem)
{
ctf_top_semaphore_take(
(uint32_t)(uintptr_t)sem
);
}
void sys_trace_semaphore_give(struct k_sem *sem)
{
ctf_top_semaphore_give(
(uint32_t)(uintptr_t)sem
);
}
void sys_trace_mutex_init(struct k_mutex *mutex)
{
ctf_top_mutex_init(
(uint32_t)(uintptr_t)mutex
);
}
void sys_trace_mutex_lock(struct k_mutex *mutex)
{
ctf_top_mutex_lock(
(uint32_t)(uintptr_t)mutex
);
}
void sys_trace_mutex_unlock(struct k_mutex *mutex)
{
ctf_top_mutex_unlock(
(uint32_t)(uintptr_t)mutex
);
}
void sys_trace_end_call(unsigned int id)
{
ctf_top_end_call(id);

56
subsys/tracing/ctf/ctf_top.h

@ -84,7 +84,13 @@ typedef enum { @@ -84,7 +84,13 @@ typedef enum {
CTF_EVENT_ISR_EXIT_TO_SCHEDULER = 0x22,
CTF_EVENT_IDLE = 0x30,
CTF_EVENT_ID_START_CALL = 0x41,
CTF_EVENT_ID_END_CALL = 0x42
CTF_EVENT_ID_END_CALL = 0x42,
CTF_EVENT_SEMAPHORE_INIT = 0x43,
CTF_EVENT_SEMAPHORE_GIVE = 0x44,
CTF_EVENT_SEMAPHORE_TAKE = 0x45,
CTF_EVENT_MUTEX_INIT = 0x46,
CTF_EVENT_MUTEX_LOCK = 0x47,
CTF_EVENT_MUTEX_UNLOCK = 0x48,
} ctf_event_t;
@ -268,4 +274,52 @@ static inline void ctf_top_end_call(uint32_t id) @@ -268,4 +274,52 @@ static inline void ctf_top_end_call(uint32_t id)
);
}
static inline void ctf_top_semaphore_init(uint32_t sem_id)
{
CTF_EVENT(
CTF_LITERAL(uint8_t, CTF_EVENT_SEMAPHORE_INIT),
sem_id
);
}
static inline void ctf_top_semaphore_take(uint32_t sem_id)
{
CTF_EVENT(
CTF_LITERAL(uint8_t, CTF_EVENT_SEMAPHORE_TAKE),
sem_id
);
}
static inline void ctf_top_semaphore_give(uint32_t sem_id)
{
CTF_EVENT(
CTF_LITERAL(uint8_t, CTF_EVENT_SEMAPHORE_GIVE),
sem_id
);
}
static inline void ctf_top_mutex_init(uint32_t mutex_id)
{
CTF_EVENT(
CTF_LITERAL(uint8_t, CTF_EVENT_MUTEX_INIT),
mutex_id
);
}
static inline void ctf_top_mutex_lock(uint32_t mutex_id)
{
CTF_EVENT(
CTF_LITERAL(uint8_t, CTF_EVENT_MUTEX_LOCK),
mutex_id
);
}
static inline void ctf_top_mutex_unlock(uint32_t mutex_id)
{
CTF_EVENT(
CTF_LITERAL(uint8_t, CTF_EVENT_MUTEX_UNLOCK),
mutex_id
);
}
#endif /* SUBSYS_DEBUG_TRACING_CTF_TOP_H */

6
subsys/tracing/ctf/tracing_ctf.h

@ -32,6 +32,12 @@ void sys_trace_isr_exit_to_scheduler(void); @@ -32,6 +32,12 @@ void sys_trace_isr_exit_to_scheduler(void);
void sys_trace_idle(void);
void sys_trace_void(unsigned int id);
void sys_trace_end_call(unsigned int id);
void sys_trace_semaphore_init(struct k_sem *sem);
void sys_trace_semaphore_take(struct k_sem *sem);
void sys_trace_semaphore_give(struct k_sem *sem);
void sys_trace_mutex_init(struct k_mutex *mutex);
void sys_trace_mutex_lock(struct k_mutex *mutex);
void sys_trace_mutex_unlock(struct k_mutex *mutex);
#ifdef __cplusplus
}

48
subsys/tracing/ctf/tsdl/metadata

@ -167,3 +167,51 @@ event { @@ -167,3 +167,51 @@ event {
call_id id;
};
};
event {
name = semaphore_init;
id = 0x43;
fields := struct {
uint32_t id;
};
};
event {
name = semaphore_take;
id = 0x45;
fields := struct {
uint32_t id;
};
};
event {
name = semaphore_give;
id = 0x44;
fields := struct {
uint32_t id;
};
};
event {
name = mutex_init;
id = 0x46;
fields := struct {
uint32_t id;
};
};
event {
name = mutex_lock;
id = 0x47;
fields := struct {
uint32_t id;
};
};
event {
name = mutex_unlock;
id = 0x48;
fields := struct {
uint32_t id;
};
};

6
subsys/tracing/include/tracing_cpu_stats.h

@ -44,6 +44,12 @@ void cpu_stats_reset_counters(void); @@ -44,6 +44,12 @@ void cpu_stats_reset_counters(void);
#define sys_trace_void(id)
#define sys_trace_end_call(id)
#define sys_trace_semaphore_init(sem)
#define sys_trace_semaphore_take(sem)
#define sys_trace_semaphore_give(sem)
#define sys_trace_mutex_init(mutex)
#define sys_trace_mutex_lock(mutex)
#define sys_trace_mutex_unlock(mutex)
#ifdef __cplusplus
}

7
subsys/tracing/include/tracing_test.h

@ -31,7 +31,12 @@ void sys_trace_isr_exit_to_scheduler(void); @@ -31,7 +31,12 @@ void sys_trace_isr_exit_to_scheduler(void);
void sys_trace_idle(void);
void sys_trace_void(unsigned int id);
void sys_trace_end_call(unsigned int id);
void sys_trace_semaphore_init(struct k_sem *sem);
void sys_trace_semaphore_take(struct k_sem *sem);
void sys_trace_semaphore_give(struct k_sem *sem);
void sys_trace_mutex_init(struct k_mutex *mutex);
void sys_trace_mutex_lock(struct k_mutex *mutex);
void sys_trace_mutex_unlock(struct k_mutex *mutex);
#ifdef __cplusplus
}
#endif

38
subsys/tracing/sysview/sysview.c

@ -62,6 +62,44 @@ void sys_trace_idle(void) @@ -62,6 +62,44 @@ void sys_trace_idle(void)
SEGGER_SYSVIEW_OnIdle();
}
void sys_trace_semaphore_init(struct k_sem *sem)
{
SEGGER_SYSVIEW_RecordU32(
SYS_TRACE_ID_SEMA_INIT, (uint32_t)(uintptr_t)sem);
}
void sys_trace_semaphore_take(struct k_sem *sem)
{
SEGGER_SYSVIEW_RecordU32(
SYS_TRACE_ID_SEMA_TAKE, (uint32_t)(uintptr_t)sem);
}
void sys_trace_semaphore_give(struct k_sem *sem)
{
SEGGER_SYSVIEW_RecordU32(
SYS_TRACE_ID_SEMA_GIVE, (uint32_t)(uintptr_t)sem);
}
void sys_trace_mutex_init(struct k_mutex *mutex)
{
SEGGER_SYSVIEW_RecordU32(
SYS_TRACE_ID_MUTEX_INIT, (uint32_t)(uintptr_t)mutex);
}
void sys_trace_mutex_lock(struct k_mutex *mutex)
{
SEGGER_SYSVIEW_RecordU32(
SYS_TRACE_ID_MUTEX_LOCK, (uint32_t)(uintptr_t)mutex);
}
void sys_trace_mutex_unlock(struct k_mutex *mutex)
{
SEGGER_SYSVIEW_RecordU32(
SYS_TRACE_ID_MUTEX_UNLOCK, (uint32_t)(uintptr_t)mutex);
}
static void send_task_list_cb(void)
{
struct k_thread *thread;

6
subsys/tracing/sysview/tracing_sysview.h

@ -18,6 +18,12 @@ void sys_trace_isr_enter(void); @@ -18,6 +18,12 @@ void sys_trace_isr_enter(void);
void sys_trace_isr_exit(void);
void sys_trace_isr_exit_to_scheduler(void);
void sys_trace_idle(void);
void sys_trace_semaphore_init(struct k_sem *sem);
void sys_trace_semaphore_take(struct k_sem *sem);
void sys_trace_semaphore_give(struct k_sem *sem);
void sys_trace_mutex_init(struct k_mutex *mutex);
void sys_trace_mutex_lock(struct k_mutex *mutex);
void sys_trace_mutex_unlock(struct k_mutex *mutex);
#define sys_trace_thread_priority_set(thread)

Loading…
Cancel
Save