From 76b3518ce6c0da6c4f3a5f688ea4e5beb827ac2c Mon Sep 17 00:00:00 2001 From: Flavio Ceolin Date: Sun, 16 Dec 2018 12:48:29 -0800 Subject: [PATCH] kernel: Make statements evaluate boolean expressions MISRA-C requires that the if statement has essentially Boolean type. MISRA-C rule 14.4 Signed-off-by: Flavio Ceolin --- include/kernel.h | 4 ++-- kernel/include/syscall_handler.h | 2 +- kernel/int_latency_bench.c | 2 +- kernel/mailbox.c | 7 ++++--- kernel/mem_domain.c | 2 +- kernel/msg_q.c | 2 +- kernel/pipes.c | 8 ++++---- kernel/poll.c | 10 +++++----- kernel/queue.c | 2 +- kernel/thread.c | 4 ++-- kernel/timer.c | 7 ++++--- kernel/userspace.c | 12 ++++++------ kernel/userspace_handler.c | 2 +- lib/rbtree/rb.c | 16 ++++++++-------- 14 files changed, 41 insertions(+), 39 deletions(-) diff --git a/include/kernel.h b/include/kernel.h index 2de99c56e3e..5eda316f7fb 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -2589,7 +2589,7 @@ static inline int k_work_submit_to_user_queue(struct k_work_q *work_q, /* Couldn't insert into the queue. Clear the pending bit * so the work item can be submitted again */ - if (ret) { + if (ret != 0) { atomic_clear_bit(work->flags, K_WORK_STATE_PENDING); } } @@ -4211,7 +4211,7 @@ extern void *k_calloc(size_t nmemb, size_t size); /* private - implementation data created as needed, per-type */ struct _poller { struct k_thread *thread; - volatile int is_polling; + volatile bool is_polling; }; /* private - types bit positions */ diff --git a/kernel/include/syscall_handler.h b/kernel/include/syscall_handler.h index a26e1442745..837eb0a4af2 100644 --- a/kernel/include/syscall_handler.h +++ b/kernel/include/syscall_handler.h @@ -399,7 +399,7 @@ static inline int _obj_validation_check(struct _k_object *ko, ret = _k_object_validate(ko, otype, init); #ifdef CONFIG_PRINTK - if (ret) { + if (ret != 0) { _dump_object_error(ret, obj, ko, otype); } #else diff --git a/kernel/int_latency_bench.c b/kernel/int_latency_bench.c index 052d71ea917..1b737f97acd 100644 --- a/kernel/int_latency_bench.c +++ b/kernel/int_latency_bench.c @@ -176,7 +176,7 @@ void int_latency_show(void) { u32_t intHandlerLatency = 0U; - if (!int_latency_bench_ready) { + if (int_latency_bench_ready == 0) { printk("error: int_latency_init() has not been invoked\n"); return; } diff --git a/kernel/mailbox.c b/kernel/mailbox.c index bb331021897..890558cf1db 100644 --- a/kernel/mailbox.c +++ b/kernel/mailbox.c @@ -204,7 +204,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg) * asynchronous send: free asynchronous message descriptor + * dummy thread pair, then give semaphore (if needed) */ - if (sending_thread->base.thread_state & _THREAD_DUMMY) { + if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0) { struct k_sem *async_sem = tx_msg->_async_sem; mbox_async_free((struct k_mbox_async *)sending_thread); @@ -274,7 +274,8 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, * note: dummy sending thread sits (unqueued) * until the receiver consumes the message */ - if (sending_thread->base.thread_state & _THREAD_DUMMY) { + if ((sending_thread->base.thread_state & _THREAD_DUMMY) + != 0) { _reschedule(key); return 0; } @@ -297,7 +298,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) /* asynchronous send: dummy thread waits on tx queue for receiver */ - if (sending_thread->base.thread_state & _THREAD_DUMMY) { + if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0) { _pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER); irq_unlock(key); return 0; diff --git a/kernel/mem_domain.c b/kernel/mem_domain.c index 0b56ad36f52..86bcf1aa8a7 100644 --- a/kernel/mem_domain.c +++ b/kernel/mem_domain.c @@ -84,7 +84,7 @@ void k_mem_domain_init(struct k_mem_domain *domain, u8_t num_parts, domain->num_partitions = 0; (void)memset(domain->partitions, 0, sizeof(domain->partitions)); - if (num_parts) { + if (num_parts != 0) { u32_t i; for (i = 0U; i < num_parts; i++) { diff --git a/kernel/msg_q.c b/kernel/msg_q.c index b20ab0c3f65..69ad7182516 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -101,7 +101,7 @@ void k_msgq_cleanup(struct k_msgq *q) { __ASSERT_NO_MSG(!_waitq_head(&q->wait_q)); - if (q->flags & K_MSGQ_FLAG_ALLOC) { + if ((q->flags & K_MSGQ_FLAG_ALLOC) != 0) { k_free(q->buffer_start); q->flags &= ~K_MSGQ_FLAG_ALLOC; } diff --git a/kernel/pipes.c b/kernel/pipes.c index 7f375877e16..919ba1fb654 100644 --- a/kernel/pipes.c +++ b/kernel/pipes.c @@ -150,9 +150,9 @@ int _impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size) void *buffer; int ret; - if (size) { + if (size != 0) { buffer = z_thread_malloc(size); - if (buffer) { + if (buffer != NULL) { k_pipe_init(pipe, buffer, size); pipe->flags = K_PIPE_FLAG_ALLOC; ret = 0; @@ -181,7 +181,7 @@ void k_pipe_cleanup(struct k_pipe *pipe) __ASSERT_NO_MSG(!_waitq_head(&pipe->wait_q.readers)); __ASSERT_NO_MSG(!_waitq_head(&pipe->wait_q.writers)); - if (pipe->flags & K_PIPE_FLAG_ALLOC) { + if ((pipe->flags & K_PIPE_FLAG_ALLOC) != 0) { k_free(pipe->buffer); pipe->buffer = NULL; pipe->flags &= ~K_PIPE_FLAG_ALLOC; @@ -415,7 +415,7 @@ static void pipe_thread_ready(struct k_thread *thread) unsigned int key; #if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) - if (thread->base.thread_state & _THREAD_DUMMY) { + if ((thread->base.thread_state & _THREAD_DUMMY) != 0) { pipe_async_finish((struct k_pipe_async *)thread); return; } diff --git a/kernel/poll.c b/kernel/poll.c index fbdcdeda08b..1d0debe4cd7 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -59,7 +59,7 @@ static inline int is_condition_met(struct k_poll_event *event, u32_t *state) } break; case K_POLL_TYPE_SIGNAL: - if (event->signal->signaled) { + if (event->signal->signaled != 0) { *state = K_POLL_STATE_SIGNALED; return 1; } @@ -183,7 +183,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) int last_registered = -1, rc; unsigned int key; - struct _poller poller = { .thread = _current, .is_polling = 1, }; + struct _poller poller = { .thread = _current, .is_polling = true, }; /* find events whose condition is already fulfilled */ for (int ii = 0; ii < num_events; ii++) { @@ -192,7 +192,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) key = irq_lock(); if (is_condition_met(&events[ii], &state)) { set_event_ready(&events[ii], state); - poller.is_polling = 0; + poller.is_polling = false; } else if (timeout != K_NO_WAIT && poller.is_polling) { rc = register_event(&events[ii], &poller); if (rc == 0) { @@ -217,7 +217,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) return 0; } - poller.is_polling = 0; + poller.is_polling = false; if (timeout == K_NO_WAIT) { irq_unlock(key); @@ -331,7 +331,7 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state) __ASSERT(event->poller->thread != NULL, "poller should have a thread\n"); - event->poller->is_polling = 0; + event->poller->is_polling = true; if (!_is_thread_pending(thread)) { goto ready_event; diff --git a/kernel/queue.c b/kernel/queue.c index 5e076392a03..3d6ff1727cb 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -237,7 +237,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail) #if !defined(CONFIG_POLL) struct k_thread *thread = NULL; - if (head) { + if (head != NULL) { thread = _unpend_first_thread(&queue->wait_q); } diff --git a/kernel/thread.c b/kernel/thread.c index 5354e59f57e..189339209ac 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -244,7 +244,7 @@ void _check_stack_sentinel(void) { u32_t *stack; - if (_current->base.thread_state & _THREAD_DUMMY) { + if ((_current->base.thread_state & _THREAD_DUMMY) != 0) { return; } @@ -403,7 +403,7 @@ void _setup_new_thread(struct k_thread *new_thread, new_thread); } - if (options & K_INHERIT_PERMS) { + if ((options & K_INHERIT_PERMS) != 0) { _thread_perms_inherit(_current, new_thread); } #endif diff --git a/kernel/timer.c b/kernel/timer.c index b3d6b8a5bed..544c5b1731f 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -9,6 +9,7 @@ #include #include #include +#include extern struct k_timer _k_timer_list_start[]; extern struct k_timer _k_timer_list_end[]; @@ -64,7 +65,7 @@ void _timer_expiration_handler(struct _timeout *t) timer->status += 1; /* invoke timer expiry function */ - if (timer->expiry_fn) { + if (timer->expiry_fn != NULL) { timer->expiry_fn(timer); } @@ -149,7 +150,7 @@ Z_SYSCALL_HANDLER(k_timer_start, timer, duration_p, period_p) void _impl_k_timer_stop(struct k_timer *timer) { unsigned int key = irq_lock(); - int inactive = (_abort_timeout(&timer->timeout) == _INACTIVE); + bool inactive = (_abort_timeout(&timer->timeout) == _INACTIVE); irq_unlock(key); @@ -157,7 +158,7 @@ void _impl_k_timer_stop(struct k_timer *timer) return; } - if (timer->stop_fn) { + if (timer->stop_fn != NULL) { timer->stop_fn(timer); } diff --git a/kernel/userspace.c b/kernel/userspace.c index 7554893f093..d83033cadfd 100644 --- a/kernel/userspace.c +++ b/kernel/userspace.c @@ -174,7 +174,7 @@ static bool _thread_idx_alloc(u32_t *tidx) for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) { idx = find_lsb_set(_thread_idx_map[i]); - if (idx) { + if (idx != 0) { *tidx = base + (idx - 1); sys_bitfield_clear_bit((mem_addr_t)_thread_idx_map, @@ -334,7 +334,7 @@ static int thread_index_get(struct k_thread *t) static void unref_check(struct _k_object *ko) { for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) { - if (ko->perms[i]) { + if (ko->perms[i] != 0) { return; } } @@ -360,7 +360,7 @@ static void unref_check(struct _k_object *ko) } #ifdef CONFIG_DYNAMIC_OBJECTS - if (ko->flags & K_OBJ_FLAG_ALLOC) { + if ((ko->flags & K_OBJ_FLAG_ALLOC) != 0) { struct dyn_obj *dyn_obj = CONTAINER_OF(ko, struct dyn_obj, kobj); rb_remove(&obj_rb_tree, &dyn_obj->node); @@ -438,7 +438,7 @@ static int thread_perms_test(struct _k_object *ko) { int index; - if (ko->flags & K_OBJ_FLAG_PUBLIC) { + if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0) { return 1; } @@ -663,7 +663,7 @@ char *z_user_string_alloc_copy(char *src, size_t maxlen) key = irq_lock(); actual_len = z_user_string_nlen(src, maxlen, &err); - if (err) { + if (err != 0) { goto out; } if (actual_len == maxlen) { @@ -690,7 +690,7 @@ int z_user_string_copy(char *dst, char *src, size_t maxlen) key = irq_lock(); actual_len = z_user_string_nlen(src, maxlen, &err); - if (err) { + if (err != 0) { ret = EFAULT; goto out; } diff --git a/kernel/userspace_handler.c b/kernel/userspace_handler.c index b0659517078..22d77444ca9 100644 --- a/kernel/userspace_handler.c +++ b/kernel/userspace_handler.c @@ -19,7 +19,7 @@ static struct _k_object *validate_any_object(void *obj) * initialized */ ret = _k_object_validate(ko, K_OBJ_ANY, _OBJ_INIT_ANY); - if (ret) { + if (ret != 0) { #ifdef CONFIG_PRINTK _dump_object_error(ret, obj, ko, K_OBJ_ANY); #endif diff --git a/lib/rbtree/rb.c b/lib/rbtree/rb.c index 48682417569..e809c662024 100644 --- a/lib/rbtree/rb.c +++ b/lib/rbtree/rb.c @@ -21,7 +21,7 @@ enum rb_color { RED = 0, BLACK = 1 }; static struct rbnode *get_child(struct rbnode *n, int side) { CHECK(n); - if (side) { + if (side != 0) { return n->children[1]; } @@ -34,7 +34,7 @@ static struct rbnode *get_child(struct rbnode *n, int side) static void set_child(struct rbnode *n, int side, void *val) { CHECK(n); - if (side) { + if (side != 0) { n->children[1] = val; } else { uintptr_t old = (uintptr_t) n->children[0]; @@ -87,7 +87,7 @@ static int find_and_stack(struct rbtree *tree, struct rbnode *node, int side = tree->lessthan_fn(node, stack[sz - 1]) ? 0 : 1; struct rbnode *ch = get_child(stack[sz - 1], side); - if (ch) { + if (ch != NULL) { stack[sz++] = ch; } else { break; @@ -400,7 +400,7 @@ void rb_remove(struct rbtree *tree, struct rbnode *node) * pointers, so the stack tracking this structure * needs to be swapped too! */ - if (hiparent) { + if (hiparent != NULL) { set_child(hiparent, get_side(hiparent, node), node2); } else { tree->root = node2; @@ -440,7 +440,7 @@ void rb_remove(struct rbtree *tree, struct rbnode *node) /* Removing the root */ if (stacksz < 2) { tree->root = child; - if (child) { + if (child != NULL) { set_color(child, BLACK); } else { tree->max_depth = 0; @@ -523,7 +523,7 @@ static inline struct rbnode *stack_left_limb(struct rbnode *n, f->stack[f->top] = n; f->is_left[f->top] = 0; - while ((n = get_child(n, 0))) { + while ((n = get_child(n, 0)) != NULL) { f->top++; f->stack[f->top] = n; f->is_left[f->top] = 1; @@ -568,7 +568,7 @@ struct rbnode *_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f) * above with is_left set to 0, so this condition still works * even if node has no parent). */ - if (f->is_left[f->top]) { + if (f->is_left[f->top] != 0) { return f->stack[--f->top]; } @@ -576,7 +576,7 @@ struct rbnode *_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f) * parent was already walked, so walk up the stack looking for * a left child (whose parent is unwalked, and thus next). */ - while (f->top > 0 && !f->is_left[f->top]) { + while ((f->top > 0) && (f->is_left[f->top] == 0)) { f->top--; }