diff --git a/include/spinlock.h b/include/spinlock.h index c8367fc0e39..6c79d356fc0 100644 --- a/include/spinlock.h +++ b/include/spinlock.h @@ -9,7 +9,10 @@ #include #if defined(CONFIG_ASSERT) && (CONFIG_MP_NUM_CPUS < 4) -#include +#include +struct k_spinlock; +int z_spin_lock_valid(struct k_spinlock *l); +int z_spin_unlock_valid(struct k_spinlock *l); #define SPIN_VALIDATE #endif @@ -43,11 +46,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) k.key = _arch_irq_lock(); #ifdef SPIN_VALIDATE - if (l->thread_cpu) { - __ASSERT((l->thread_cpu & 3) != _current_cpu->id, - "Recursive spinlock"); - } - l->thread_cpu = _current_cpu->id | (u32_t)_current; + __ASSERT(z_spin_lock_valid(l), "Recursive spinlock"); #endif #ifdef CONFIG_SMP @@ -61,13 +60,11 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key) { + ARG_UNUSED(l); #ifdef SPIN_VALIDATE - __ASSERT(l->thread_cpu == (_current_cpu->id | (u32_t)_current), - "Not my spinlock!"); - l->thread_cpu = 0; + __ASSERT(z_spin_unlock_valid(l), "Not my spinlock!"); #endif - #ifdef CONFIG_SMP /* Strictly we don't need atomic_clear() here (which is an * exchange operation that returns the old value). We are always @@ -86,6 +83,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, */ static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l) { + ARG_UNUSED(l); #ifdef SPIN_VALIDATE __ASSERT(z_spin_unlock_valid(l), "Not my spinlock!"); #endif diff --git a/kernel/thread.c b/kernel/thread.c index 50613de798a..80c391508ca 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -704,3 +705,28 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, _thread_entry(entry, p1, p2, p3); #endif } + +/* These spinlock assertion predicates are defined here because having + * them in spinlock.h is a giant header ordering headache. + */ +#ifdef SPIN_VALIDATE +int z_spin_lock_valid(struct k_spinlock *l) +{ + if (l->thread_cpu) { + if ((l->thread_cpu & 3) == _current_cpu->id) { + return 0; + } + } + l->thread_cpu = _current_cpu->id | (u32_t)_current; + return 1; +} + +int z_spin_unlock_valid(struct k_spinlock *l) +{ + if (l->thread_cpu != (_current_cpu->id | (u32_t)_current)) { + return 0; + } + l->thread_cpu = 0; + return 1; +} +#endif