Browse Source

kernel: Make statements evaluate boolean expressions

MISRA-C requires that the if statement has essentially Boolean type.

MISRA-C rule 14.4

Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
pull/6826/head
Flavio Ceolin 7 years ago committed by Anas Nashif
parent
commit
76b3518ce6
  1. 4
      include/kernel.h
  2. 2
      kernel/include/syscall_handler.h
  3. 2
      kernel/int_latency_bench.c
  4. 7
      kernel/mailbox.c
  5. 2
      kernel/mem_domain.c
  6. 2
      kernel/msg_q.c
  7. 8
      kernel/pipes.c
  8. 10
      kernel/poll.c
  9. 2
      kernel/queue.c
  10. 4
      kernel/thread.c
  11. 7
      kernel/timer.c
  12. 12
      kernel/userspace.c
  13. 2
      kernel/userspace_handler.c
  14. 16
      lib/rbtree/rb.c

4
include/kernel.h

@ -2589,7 +2589,7 @@ static inline int k_work_submit_to_user_queue(struct k_work_q *work_q, @@ -2589,7 +2589,7 @@ static inline int k_work_submit_to_user_queue(struct k_work_q *work_q,
/* Couldn't insert into the queue. Clear the pending bit
* so the work item can be submitted again
*/
if (ret) {
if (ret != 0) {
atomic_clear_bit(work->flags, K_WORK_STATE_PENDING);
}
}
@ -4211,7 +4211,7 @@ extern void *k_calloc(size_t nmemb, size_t size); @@ -4211,7 +4211,7 @@ extern void *k_calloc(size_t nmemb, size_t size);
/* private - implementation data created as needed, per-type */
struct _poller {
struct k_thread *thread;
volatile int is_polling;
volatile bool is_polling;
};
/* private - types bit positions */

2
kernel/include/syscall_handler.h

@ -399,7 +399,7 @@ static inline int _obj_validation_check(struct _k_object *ko, @@ -399,7 +399,7 @@ static inline int _obj_validation_check(struct _k_object *ko,
ret = _k_object_validate(ko, otype, init);
#ifdef CONFIG_PRINTK
if (ret) {
if (ret != 0) {
_dump_object_error(ret, obj, ko, otype);
}
#else

2
kernel/int_latency_bench.c

@ -176,7 +176,7 @@ void int_latency_show(void) @@ -176,7 +176,7 @@ void int_latency_show(void)
{
u32_t intHandlerLatency = 0U;
if (!int_latency_bench_ready) {
if (int_latency_bench_ready == 0) {
printk("error: int_latency_init() has not been invoked\n");
return;
}

7
kernel/mailbox.c

@ -204,7 +204,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg) @@ -204,7 +204,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
* asynchronous send: free asynchronous message descriptor +
* dummy thread pair, then give semaphore (if needed)
*/
if (sending_thread->base.thread_state & _THREAD_DUMMY) {
if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0) {
struct k_sem *async_sem = tx_msg->_async_sem;
mbox_async_free((struct k_mbox_async *)sending_thread);
@ -274,7 +274,8 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -274,7 +274,8 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
* note: dummy sending thread sits (unqueued)
* until the receiver consumes the message
*/
if (sending_thread->base.thread_state & _THREAD_DUMMY) {
if ((sending_thread->base.thread_state & _THREAD_DUMMY)
!= 0) {
_reschedule(key);
return 0;
}
@ -297,7 +298,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, @@ -297,7 +298,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/* asynchronous send: dummy thread waits on tx queue for receiver */
if (sending_thread->base.thread_state & _THREAD_DUMMY) {
if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0) {
_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
irq_unlock(key);
return 0;

2
kernel/mem_domain.c

@ -84,7 +84,7 @@ void k_mem_domain_init(struct k_mem_domain *domain, u8_t num_parts, @@ -84,7 +84,7 @@ void k_mem_domain_init(struct k_mem_domain *domain, u8_t num_parts,
domain->num_partitions = 0;
(void)memset(domain->partitions, 0, sizeof(domain->partitions));
if (num_parts) {
if (num_parts != 0) {
u32_t i;
for (i = 0U; i < num_parts; i++) {

2
kernel/msg_q.c

@ -101,7 +101,7 @@ void k_msgq_cleanup(struct k_msgq *q) @@ -101,7 +101,7 @@ void k_msgq_cleanup(struct k_msgq *q)
{
__ASSERT_NO_MSG(!_waitq_head(&q->wait_q));
if (q->flags & K_MSGQ_FLAG_ALLOC) {
if ((q->flags & K_MSGQ_FLAG_ALLOC) != 0) {
k_free(q->buffer_start);
q->flags &= ~K_MSGQ_FLAG_ALLOC;
}

8
kernel/pipes.c

@ -150,9 +150,9 @@ int _impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size) @@ -150,9 +150,9 @@ int _impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
void *buffer;
int ret;
if (size) {
if (size != 0) {
buffer = z_thread_malloc(size);
if (buffer) {
if (buffer != NULL) {
k_pipe_init(pipe, buffer, size);
pipe->flags = K_PIPE_FLAG_ALLOC;
ret = 0;
@ -181,7 +181,7 @@ void k_pipe_cleanup(struct k_pipe *pipe) @@ -181,7 +181,7 @@ void k_pipe_cleanup(struct k_pipe *pipe)
__ASSERT_NO_MSG(!_waitq_head(&pipe->wait_q.readers));
__ASSERT_NO_MSG(!_waitq_head(&pipe->wait_q.writers));
if (pipe->flags & K_PIPE_FLAG_ALLOC) {
if ((pipe->flags & K_PIPE_FLAG_ALLOC) != 0) {
k_free(pipe->buffer);
pipe->buffer = NULL;
pipe->flags &= ~K_PIPE_FLAG_ALLOC;
@ -415,7 +415,7 @@ static void pipe_thread_ready(struct k_thread *thread) @@ -415,7 +415,7 @@ static void pipe_thread_ready(struct k_thread *thread)
unsigned int key;
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
if (thread->base.thread_state & _THREAD_DUMMY) {
if ((thread->base.thread_state & _THREAD_DUMMY) != 0) {
pipe_async_finish((struct k_pipe_async *)thread);
return;
}

10
kernel/poll.c

@ -59,7 +59,7 @@ static inline int is_condition_met(struct k_poll_event *event, u32_t *state) @@ -59,7 +59,7 @@ static inline int is_condition_met(struct k_poll_event *event, u32_t *state)
}
break;
case K_POLL_TYPE_SIGNAL:
if (event->signal->signaled) {
if (event->signal->signaled != 0) {
*state = K_POLL_STATE_SIGNALED;
return 1;
}
@ -183,7 +183,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) @@ -183,7 +183,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
int last_registered = -1, rc;
unsigned int key;
struct _poller poller = { .thread = _current, .is_polling = 1, };
struct _poller poller = { .thread = _current, .is_polling = true, };
/* find events whose condition is already fulfilled */
for (int ii = 0; ii < num_events; ii++) {
@ -192,7 +192,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) @@ -192,7 +192,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
key = irq_lock();
if (is_condition_met(&events[ii], &state)) {
set_event_ready(&events[ii], state);
poller.is_polling = 0;
poller.is_polling = false;
} else if (timeout != K_NO_WAIT && poller.is_polling) {
rc = register_event(&events[ii], &poller);
if (rc == 0) {
@ -217,7 +217,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) @@ -217,7 +217,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
return 0;
}
poller.is_polling = 0;
poller.is_polling = false;
if (timeout == K_NO_WAIT) {
irq_unlock(key);
@ -331,7 +331,7 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state) @@ -331,7 +331,7 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state)
__ASSERT(event->poller->thread != NULL,
"poller should have a thread\n");
event->poller->is_polling = 0;
event->poller->is_polling = true;
if (!_is_thread_pending(thread)) {
goto ready_event;

2
kernel/queue.c

@ -237,7 +237,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail) @@ -237,7 +237,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
#if !defined(CONFIG_POLL)
struct k_thread *thread = NULL;
if (head) {
if (head != NULL) {
thread = _unpend_first_thread(&queue->wait_q);
}

4
kernel/thread.c

@ -244,7 +244,7 @@ void _check_stack_sentinel(void) @@ -244,7 +244,7 @@ void _check_stack_sentinel(void)
{
u32_t *stack;
if (_current->base.thread_state & _THREAD_DUMMY) {
if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
return;
}
@ -403,7 +403,7 @@ void _setup_new_thread(struct k_thread *new_thread, @@ -403,7 +403,7 @@ void _setup_new_thread(struct k_thread *new_thread,
new_thread);
}
if (options & K_INHERIT_PERMS) {
if ((options & K_INHERIT_PERMS) != 0) {
_thread_perms_inherit(_current, new_thread);
}
#endif

7
kernel/timer.c

@ -9,6 +9,7 @@ @@ -9,6 +9,7 @@
#include <init.h>
#include <wait_q.h>
#include <syscall_handler.h>
#include <stdbool.h>
extern struct k_timer _k_timer_list_start[];
extern struct k_timer _k_timer_list_end[];
@ -64,7 +65,7 @@ void _timer_expiration_handler(struct _timeout *t) @@ -64,7 +65,7 @@ void _timer_expiration_handler(struct _timeout *t)
timer->status += 1;
/* invoke timer expiry function */
if (timer->expiry_fn) {
if (timer->expiry_fn != NULL) {
timer->expiry_fn(timer);
}
@ -149,7 +150,7 @@ Z_SYSCALL_HANDLER(k_timer_start, timer, duration_p, period_p) @@ -149,7 +150,7 @@ Z_SYSCALL_HANDLER(k_timer_start, timer, duration_p, period_p)
void _impl_k_timer_stop(struct k_timer *timer)
{
unsigned int key = irq_lock();
int inactive = (_abort_timeout(&timer->timeout) == _INACTIVE);
bool inactive = (_abort_timeout(&timer->timeout) == _INACTIVE);
irq_unlock(key);
@ -157,7 +158,7 @@ void _impl_k_timer_stop(struct k_timer *timer) @@ -157,7 +158,7 @@ void _impl_k_timer_stop(struct k_timer *timer)
return;
}
if (timer->stop_fn) {
if (timer->stop_fn != NULL) {
timer->stop_fn(timer);
}

12
kernel/userspace.c

@ -174,7 +174,7 @@ static bool _thread_idx_alloc(u32_t *tidx) @@ -174,7 +174,7 @@ static bool _thread_idx_alloc(u32_t *tidx)
for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
idx = find_lsb_set(_thread_idx_map[i]);
if (idx) {
if (idx != 0) {
*tidx = base + (idx - 1);
sys_bitfield_clear_bit((mem_addr_t)_thread_idx_map,
@ -334,7 +334,7 @@ static int thread_index_get(struct k_thread *t) @@ -334,7 +334,7 @@ static int thread_index_get(struct k_thread *t)
static void unref_check(struct _k_object *ko)
{
for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
if (ko->perms[i]) {
if (ko->perms[i] != 0) {
return;
}
}
@ -360,7 +360,7 @@ static void unref_check(struct _k_object *ko) @@ -360,7 +360,7 @@ static void unref_check(struct _k_object *ko)
}
#ifdef CONFIG_DYNAMIC_OBJECTS
if (ko->flags & K_OBJ_FLAG_ALLOC) {
if ((ko->flags & K_OBJ_FLAG_ALLOC) != 0) {
struct dyn_obj *dyn_obj =
CONTAINER_OF(ko, struct dyn_obj, kobj);
rb_remove(&obj_rb_tree, &dyn_obj->node);
@ -438,7 +438,7 @@ static int thread_perms_test(struct _k_object *ko) @@ -438,7 +438,7 @@ static int thread_perms_test(struct _k_object *ko)
{
int index;
if (ko->flags & K_OBJ_FLAG_PUBLIC) {
if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0) {
return 1;
}
@ -663,7 +663,7 @@ char *z_user_string_alloc_copy(char *src, size_t maxlen) @@ -663,7 +663,7 @@ char *z_user_string_alloc_copy(char *src, size_t maxlen)
key = irq_lock();
actual_len = z_user_string_nlen(src, maxlen, &err);
if (err) {
if (err != 0) {
goto out;
}
if (actual_len == maxlen) {
@ -690,7 +690,7 @@ int z_user_string_copy(char *dst, char *src, size_t maxlen) @@ -690,7 +690,7 @@ int z_user_string_copy(char *dst, char *src, size_t maxlen)
key = irq_lock();
actual_len = z_user_string_nlen(src, maxlen, &err);
if (err) {
if (err != 0) {
ret = EFAULT;
goto out;
}

2
kernel/userspace_handler.c

@ -19,7 +19,7 @@ static struct _k_object *validate_any_object(void *obj) @@ -19,7 +19,7 @@ static struct _k_object *validate_any_object(void *obj)
* initialized
*/
ret = _k_object_validate(ko, K_OBJ_ANY, _OBJ_INIT_ANY);
if (ret) {
if (ret != 0) {
#ifdef CONFIG_PRINTK
_dump_object_error(ret, obj, ko, K_OBJ_ANY);
#endif

16
lib/rbtree/rb.c

@ -21,7 +21,7 @@ enum rb_color { RED = 0, BLACK = 1 }; @@ -21,7 +21,7 @@ enum rb_color { RED = 0, BLACK = 1 };
static struct rbnode *get_child(struct rbnode *n, int side)
{
CHECK(n);
if (side) {
if (side != 0) {
return n->children[1];
}
@ -34,7 +34,7 @@ static struct rbnode *get_child(struct rbnode *n, int side) @@ -34,7 +34,7 @@ static struct rbnode *get_child(struct rbnode *n, int side)
static void set_child(struct rbnode *n, int side, void *val)
{
CHECK(n);
if (side) {
if (side != 0) {
n->children[1] = val;
} else {
uintptr_t old = (uintptr_t) n->children[0];
@ -87,7 +87,7 @@ static int find_and_stack(struct rbtree *tree, struct rbnode *node, @@ -87,7 +87,7 @@ static int find_and_stack(struct rbtree *tree, struct rbnode *node,
int side = tree->lessthan_fn(node, stack[sz - 1]) ? 0 : 1;
struct rbnode *ch = get_child(stack[sz - 1], side);
if (ch) {
if (ch != NULL) {
stack[sz++] = ch;
} else {
break;
@ -400,7 +400,7 @@ void rb_remove(struct rbtree *tree, struct rbnode *node) @@ -400,7 +400,7 @@ void rb_remove(struct rbtree *tree, struct rbnode *node)
* pointers, so the stack tracking this structure
* needs to be swapped too!
*/
if (hiparent) {
if (hiparent != NULL) {
set_child(hiparent, get_side(hiparent, node), node2);
} else {
tree->root = node2;
@ -440,7 +440,7 @@ void rb_remove(struct rbtree *tree, struct rbnode *node) @@ -440,7 +440,7 @@ void rb_remove(struct rbtree *tree, struct rbnode *node)
/* Removing the root */
if (stacksz < 2) {
tree->root = child;
if (child) {
if (child != NULL) {
set_color(child, BLACK);
} else {
tree->max_depth = 0;
@ -523,7 +523,7 @@ static inline struct rbnode *stack_left_limb(struct rbnode *n, @@ -523,7 +523,7 @@ static inline struct rbnode *stack_left_limb(struct rbnode *n,
f->stack[f->top] = n;
f->is_left[f->top] = 0;
while ((n = get_child(n, 0))) {
while ((n = get_child(n, 0)) != NULL) {
f->top++;
f->stack[f->top] = n;
f->is_left[f->top] = 1;
@ -568,7 +568,7 @@ struct rbnode *_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f) @@ -568,7 +568,7 @@ struct rbnode *_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f)
* above with is_left set to 0, so this condition still works
* even if node has no parent).
*/
if (f->is_left[f->top]) {
if (f->is_left[f->top] != 0) {
return f->stack[--f->top];
}
@ -576,7 +576,7 @@ struct rbnode *_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f) @@ -576,7 +576,7 @@ struct rbnode *_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f)
* parent was already walked, so walk up the stack looking for
* a left child (whose parent is unwalked, and thus next).
*/
while (f->top > 0 && !f->is_left[f->top]) {
while ((f->top > 0) && (f->is_left[f->top] == 0)) {
f->top--;
}

Loading…
Cancel
Save