Browse Source

kernel: fix return type for atomic_cas()

In some cases this was a bool, in some cases an integer value
of 1 or 0. Standardize on bool.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
pull/23377/head
Andrew Boie 5 years ago committed by Anas Nashif
parent
commit
60e0019751
  1. 4
      include/sys/atomic.h
  2. 18
      kernel/atomic_c.c
  3. 4
      tests/kernel/common/src/atomic.c

4
include/sys/atomic.h

@ -49,11 +49,11 @@ static inline bool atomic_cas(atomic_t *target, atomic_val_t old_value, @@ -49,11 +49,11 @@ static inline bool atomic_cas(atomic_t *target, atomic_val_t old_value,
__ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall int atomic_cas(atomic_t *target, atomic_val_t old_value,
__syscall bool atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
#else
extern int atomic_cas(atomic_t *target, atomic_val_t old_value,
extern bool atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
#endif

18
kernel/atomic_c.c

@ -63,10 +63,10 @@ static struct k_spinlock lock; @@ -63,10 +63,10 @@ static struct k_spinlock lock;
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
* function returns true.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
* is not done and the function returns false.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
@ -75,19 +75,19 @@ static struct k_spinlock lock; @@ -75,19 +75,19 @@ static struct k_spinlock lock;
* @param target address to be tested
* @param old_value value to compare against
* @param new_value value to compare against
* @return Returns 1 if <new_value> is written, 0 otherwise.
* @return Returns true if <new_value> is written, false otherwise.
*/
int z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
bool z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
k_spinlock_key_t key;
int ret = 0;
int ret = false;
key = k_spin_lock(&lock);
if (*target == old_value) {
*target = new_value;
ret = 1;
ret = true;
}
k_spin_unlock(&lock, key);
@ -96,8 +96,8 @@ int z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value, @@ -96,8 +96,8 @@ int z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
}
#ifdef CONFIG_USERSPACE
int z_vrfy_atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
bool z_vrfy_atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t)));

4
tests/kernel/common/src/atomic.c

@ -34,9 +34,9 @@ void test_atomic(void) @@ -34,9 +34,9 @@ void test_atomic(void)
oldvalue = 6;
/* atomic_cas() */
zassert_true((atomic_cas(&target, oldvalue, value) == 0), "atomic_cas");
zassert_false(atomic_cas(&target, oldvalue, value), "atomic_cas");
target = 6;
zassert_true((atomic_cas(&target, oldvalue, value) == 1), "atomic_cas");
zassert_true(atomic_cas(&target, oldvalue, value), "atomic_cas");
zassert_true((target == value), "atomic_cas");
/* atomic_add() */

Loading…
Cancel
Save