Browse Source

net: ip: net_mgmt: Prevent loss of event

The current implementation doesn't handle
re-entrant calls correctly, causing events
to get lost.

Under specific conditions, re-entrancy happens
from event callback done by net_mgmt.c back into
'net_mgmt_event_notify_with_info' causing circular
buffer administration corruption.

These conditions are:

- llmnr_iface_event_handler
- mdns_iface_event_handler
- wifi_mgmt_raise_connect_result_event
- dhcpv4_immediate_timeout uses a workaround

Validated the fix on a Nucleo_H743ZI, using an
sample app.

Signed-off-by: Jeroen van Dooren <jeroen.van.dooren@nobleo.nl>
pull/55029/head
Jeroen van Dooren 2 years ago committed by Fabio Baltieri
parent
commit
a63fcb67b6
  1. 134
      subsys/net/ip/net_mgmt.c

134
subsys/net/ip/net_mgmt.c

@ -19,112 +19,79 @@ LOG_MODULE_REGISTER(net_mgmt, CONFIG_NET_MGMT_EVENT_LOG_LEVEL); @@ -19,112 +19,79 @@ LOG_MODULE_REGISTER(net_mgmt, CONFIG_NET_MGMT_EVENT_LOG_LEVEL);
#include "net_private.h"
struct mgmt_event_entry {
uint32_t event;
struct net_if *iface;
#ifdef CONFIG_NET_MGMT_EVENT_INFO
uint8_t info[NET_EVENT_INFO_MAX_SIZE];
size_t info_length;
#endif /* CONFIG_NET_MGMT_EVENT_INFO */
uint32_t event;
struct net_if *iface;
};
BUILD_ASSERT((sizeof(struct mgmt_event_entry) % sizeof(uint32_t)) == 0,
"The structure must be a multiple of sizeof(uint32_t)");
struct mgmt_event_wait {
struct k_sem sync_call;
struct net_if *iface;
};
static K_SEM_DEFINE(network_event, 0, K_SEM_MAX_LIMIT);
static K_MUTEX_DEFINE(net_mgmt_lock);
static K_MUTEX_DEFINE(net_mgmt_callback_lock);
static K_MUTEX_DEFINE(net_mgmt_event_lock);
K_KERNEL_STACK_DEFINE(mgmt_stack, CONFIG_NET_MGMT_EVENT_STACK_SIZE);
static struct k_thread mgmt_thread_data;
static struct mgmt_event_entry events[CONFIG_NET_MGMT_EVENT_QUEUE_SIZE];
static uint32_t global_event_mask;
static sys_slist_t event_callbacks = SYS_SLIST_STATIC_INIT(&event_callbacks);
static int16_t in_event;
static int16_t out_event;
/* event structure used to prevent increasing the stack usage on the caller thread */
static struct mgmt_event_entry new_event;
K_MSGQ_DEFINE(event_msgq, sizeof(struct mgmt_event_entry),
CONFIG_NET_MGMT_EVENT_QUEUE_SIZE, sizeof(uint32_t));
static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
const void *info, size_t length)
{
int16_t i_idx;
#ifndef CONFIG_NET_MGMT_EVENT_INFO
ARG_UNUSED(info);
ARG_UNUSED(length);
#endif /* CONFIG_NET_MGMT_EVENT_INFO */
(void)k_mutex_lock(&net_mgmt_lock, K_FOREVER);
(void)k_mutex_lock(&net_mgmt_event_lock, K_FOREVER);
i_idx = in_event + 1;
if (i_idx == CONFIG_NET_MGMT_EVENT_QUEUE_SIZE) {
i_idx = 0;
}
memset(&new_event, 0, sizeof(struct mgmt_event_entry));
#ifdef CONFIG_NET_MGMT_EVENT_INFO
if (info && length) {
if (length <= NET_EVENT_INFO_MAX_SIZE) {
memcpy(events[i_idx].info, info, length);
events[i_idx].info_length = length;
memcpy(new_event.info, info, length);
new_event.info_length = length;
} else {
NET_ERR("Event %u info length %zu > max size %zu",
mgmt_event, length, NET_EVENT_INFO_MAX_SIZE);
(void)k_mutex_unlock(&net_mgmt_lock);
(void)k_mutex_unlock(&net_mgmt_event_lock);
return;
}
} else {
events[i_idx].info_length = 0;
}
#endif /* CONFIG_NET_MGMT_EVENT_INFO */
events[i_idx].event = mgmt_event;
events[i_idx].iface = iface;
if (i_idx == out_event) {
uint16_t o_idx = out_event + 1;
if (o_idx == CONFIG_NET_MGMT_EVENT_QUEUE_SIZE) {
o_idx = 0U;
}
new_event.event = mgmt_event;
new_event.iface = iface;
if (events[o_idx].event) {
out_event = o_idx;
}
} else if (out_event < 0) {
out_event = i_idx;
if (k_msgq_put(&event_msgq, &new_event, K_MSEC(10)) != 0) {
NET_WARN("Failure to push event (%u), "
"try increasing the 'CONFIG_NET_MGMT_EVENT_QUEUE_SIZE'",
mgmt_event);
}
in_event = i_idx;
(void)k_mutex_unlock(&net_mgmt_lock);
(void)k_mutex_unlock(&net_mgmt_event_lock);
}
static inline struct mgmt_event_entry *mgmt_pop_event(void)
static inline void mgmt_pop_event(struct mgmt_event_entry *dst)
{
int16_t o_idx;
if (out_event < 0 || !events[out_event].event) {
return NULL;
}
o_idx = out_event;
out_event++;
if (o_idx == in_event) {
in_event = -1;
out_event = -1;
} else if (out_event == CONFIG_NET_MGMT_EVENT_QUEUE_SIZE) {
out_event = 0;
}
return &events[o_idx];
}
static inline void mgmt_clean_event(struct mgmt_event_entry *mgmt_event)
{
mgmt_event->event = 0U;
mgmt_event->iface = NULL;
do {
} while (k_msgq_get(&event_msgq, dst, K_FOREVER) != 0);
}
static inline void mgmt_add_event_mask(uint32_t event_mask)
@ -156,7 +123,7 @@ static inline bool mgmt_is_event_handled(uint32_t mgmt_event) @@ -156,7 +123,7 @@ static inline bool mgmt_is_event_handled(uint32_t mgmt_event)
NET_MGMT_GET_COMMAND(mgmt_event)));
}
static inline void mgmt_run_callbacks(struct mgmt_event_entry *mgmt_event)
static inline void mgmt_run_callbacks(const struct mgmt_event_entry * const mgmt_event)
{
sys_snode_t *prev = NULL;
struct net_mgmt_event_callback *cb, *tmp;
@ -222,35 +189,21 @@ static inline void mgmt_run_callbacks(struct mgmt_event_entry *mgmt_event) @@ -222,35 +189,21 @@ static inline void mgmt_run_callbacks(struct mgmt_event_entry *mgmt_event)
static void mgmt_thread(void)
{
struct mgmt_event_entry *mgmt_event;
struct mgmt_event_entry mgmt_event;
while (1) {
k_sem_take(&network_event, K_FOREVER);
(void)k_mutex_lock(&net_mgmt_lock, K_FOREVER);
mgmt_pop_event(&mgmt_event);
NET_DBG("Handling events, forwarding it relevantly");
mgmt_event = mgmt_pop_event();
if (!mgmt_event) {
/* System is over-loaded?
* At this point we have most probably notified
* more events than we could handle
*/
NET_DBG("Some event got probably lost (%u)",
k_sem_count_get(&network_event));
k_sem_init(&network_event, 0, K_SEM_MAX_LIMIT);
(void)k_mutex_unlock(&net_mgmt_lock);
continue;
}
mgmt_run_callbacks(mgmt_event);
/* take the lock to prevent changes to the callback structure during use */
(void)k_mutex_lock(&net_mgmt_callback_lock, K_FOREVER);
mgmt_clean_event(mgmt_event);
mgmt_run_callbacks(&mgmt_event);
(void)k_mutex_unlock(&net_mgmt_lock);
(void)k_mutex_unlock(&net_mgmt_callback_lock);
/* forcefully give up our timeslot, to give time to the callback */
k_yield();
}
}
@ -312,26 +265,26 @@ void net_mgmt_add_event_callback(struct net_mgmt_event_callback *cb) @@ -312,26 +265,26 @@ void net_mgmt_add_event_callback(struct net_mgmt_event_callback *cb)
{
NET_DBG("Adding event callback %p", cb);
(void)k_mutex_lock(&net_mgmt_lock, K_FOREVER);
(void)k_mutex_lock(&net_mgmt_callback_lock, K_FOREVER);
sys_slist_prepend(&event_callbacks, &cb->node);
mgmt_add_event_mask(cb->event_mask);
(void)k_mutex_unlock(&net_mgmt_lock);
(void)k_mutex_unlock(&net_mgmt_callback_lock);
}
void net_mgmt_del_event_callback(struct net_mgmt_event_callback *cb)
{
NET_DBG("Deleting event callback %p", cb);
(void)k_mutex_lock(&net_mgmt_lock, K_FOREVER);
(void)k_mutex_lock(&net_mgmt_callback_lock, K_FOREVER);
sys_slist_find_and_remove(&event_callbacks, &cb->node);
mgmt_rebuild_global_event_mask();
(void)k_mutex_unlock(&net_mgmt_lock);
(void)k_mutex_unlock(&net_mgmt_callback_lock);
}
void net_mgmt_event_notify_with_info(uint32_t mgmt_event, struct net_if *iface,
@ -344,7 +297,6 @@ void net_mgmt_event_notify_with_info(uint32_t mgmt_event, struct net_if *iface, @@ -344,7 +297,6 @@ void net_mgmt_event_notify_with_info(uint32_t mgmt_event, struct net_if *iface,
NET_MGMT_GET_COMMAND(mgmt_event));
mgmt_push_event(mgmt_event, iface, info, length);
k_sem_give(&network_event);
}
}
@ -377,12 +329,6 @@ int net_mgmt_event_wait_on_iface(struct net_if *iface, @@ -377,12 +329,6 @@ int net_mgmt_event_wait_on_iface(struct net_if *iface,
void net_mgmt_event_init(void)
{
in_event = -1;
out_event = -1;
(void)memset(events, 0, CONFIG_NET_MGMT_EVENT_QUEUE_SIZE *
sizeof(struct mgmt_event_entry));
#if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
/* Lowest priority cooperative thread */
#define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)

Loading…
Cancel
Save