Browse Source

kernel/spinlock: Move validation out of header inlines

The validation checking recently added to spinlocks is useful, but
requires kernel-internals like _current and _current_cpu in a header
context that tends to be needed before those are declared (or where we
don't want them declared), and is causing big header dependency
headaches.

Move it to C code, it's just a validation tool, not a performance
thing.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
pull/13188/head
Andy Ross 7 years ago committed by Anas Nashif
parent
commit
5aa7460e5c
  1. 18
      include/spinlock.h
  2. 26
      kernel/thread.c

18
include/spinlock.h

@ -9,7 +9,10 @@ @@ -9,7 +9,10 @@
#include <atomic.h>
#if defined(CONFIG_ASSERT) && (CONFIG_MP_NUM_CPUS < 4)
#include <kernel_structs.h>
#include <misc/__assert.h>
struct k_spinlock;
int z_spin_lock_valid(struct k_spinlock *l);
int z_spin_unlock_valid(struct k_spinlock *l);
#define SPIN_VALIDATE
#endif
@ -43,11 +46,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) @@ -43,11 +46,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
k.key = _arch_irq_lock();
#ifdef SPIN_VALIDATE
if (l->thread_cpu) {
__ASSERT((l->thread_cpu & 3) != _current_cpu->id,
"Recursive spinlock");
}
l->thread_cpu = _current_cpu->id | (u32_t)_current;
__ASSERT(z_spin_lock_valid(l), "Recursive spinlock");
#endif
#ifdef CONFIG_SMP
@ -61,13 +60,11 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) @@ -61,13 +60,11 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
k_spinlock_key_t key)
{
ARG_UNUSED(l);
#ifdef SPIN_VALIDATE
__ASSERT(l->thread_cpu == (_current_cpu->id | (u32_t)_current),
"Not my spinlock!");
l->thread_cpu = 0;
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
#endif
#ifdef CONFIG_SMP
/* Strictly we don't need atomic_clear() here (which is an
* exchange operation that returns the old value). We are always
@ -86,6 +83,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, @@ -86,6 +83,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
*/
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
{
ARG_UNUSED(l);
#ifdef SPIN_VALIDATE
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
#endif

26
kernel/thread.c

@ -16,6 +16,7 @@ @@ -16,6 +16,7 @@
#include <toolchain.h>
#include <linker/sections.h>
#include <spinlock.h>
#include <kernel_structs.h>
#include <misc/printk.h>
#include <sys_clock.h>
@ -704,3 +705,28 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, @@ -704,3 +705,28 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
_thread_entry(entry, p1, p2, p3);
#endif
}
/* These spinlock assertion predicates are defined here because having
* them in spinlock.h is a giant header ordering headache.
*/
#ifdef SPIN_VALIDATE
int z_spin_lock_valid(struct k_spinlock *l)
{
if (l->thread_cpu) {
if ((l->thread_cpu & 3) == _current_cpu->id) {
return 0;
}
}
l->thread_cpu = _current_cpu->id | (u32_t)_current;
return 1;
}
int z_spin_unlock_valid(struct k_spinlock *l)
{
if (l->thread_cpu != (_current_cpu->id | (u32_t)_current)) {
return 0;
}
l->thread_cpu = 0;
return 1;
}
#endif

Loading…
Cancel
Save