@ -19,112 +19,79 @@ LOG_MODULE_REGISTER(net_mgmt, CONFIG_NET_MGMT_EVENT_LOG_LEVEL);
# include "net_private.h"
# include "net_private.h"
struct mgmt_event_entry {
struct mgmt_event_entry {
uint32_t event ;
struct net_if * iface ;
# ifdef CONFIG_NET_MGMT_EVENT_INFO
# ifdef CONFIG_NET_MGMT_EVENT_INFO
uint8_t info [ NET_EVENT_INFO_MAX_SIZE ] ;
uint8_t info [ NET_EVENT_INFO_MAX_SIZE ] ;
size_t info_length ;
size_t info_length ;
# endif /* CONFIG_NET_MGMT_EVENT_INFO */
# endif /* CONFIG_NET_MGMT_EVENT_INFO */
uint32_t event ;
struct net_if * iface ;
} ;
} ;
BUILD_ASSERT ( ( sizeof ( struct mgmt_event_entry ) % sizeof ( uint32_t ) ) = = 0 ,
" The structure must be a multiple of sizeof(uint32_t) " ) ;
struct mgmt_event_wait {
struct mgmt_event_wait {
struct k_sem sync_call ;
struct k_sem sync_call ;
struct net_if * iface ;
struct net_if * iface ;
} ;
} ;
static K_SE M_DEFINE ( network_event , 0 , K_SEM_MAX_LIMIT ) ;
static K_MUTEX _DEFINE ( net_mgmt_callback_lock ) ;
static K_MUTEX_DEFINE ( net_mgmt_lock ) ;
static K_MUTEX_DEFINE ( net_mgmt_event_ lock ) ;
K_KERNEL_STACK_DEFINE ( mgmt_stack , CONFIG_NET_MGMT_EVENT_STACK_SIZE ) ;
K_KERNEL_STACK_DEFINE ( mgmt_stack , CONFIG_NET_MGMT_EVENT_STACK_SIZE ) ;
static struct k_thread mgmt_thread_data ;
static struct k_thread mgmt_thread_data ;
static struct mgmt_event_entry events [ CONFIG_NET_MGMT_EVENT_QUEUE_SIZE ] ;
static uint32_t global_event_mask ;
static uint32_t global_event_mask ;
static sys_slist_t event_callbacks = SYS_SLIST_STATIC_INIT ( & event_callbacks ) ;
static sys_slist_t event_callbacks = SYS_SLIST_STATIC_INIT ( & event_callbacks ) ;
static int16_t in_event ;
static int16_t out_event ;
/* event structure used to prevent increasing the stack usage on the caller thread */
static struct mgmt_event_entry new_event ;
K_MSGQ_DEFINE ( event_msgq , sizeof ( struct mgmt_event_entry ) ,
CONFIG_NET_MGMT_EVENT_QUEUE_SIZE , sizeof ( uint32_t ) ) ;
static inline void mgmt_push_event ( uint32_t mgmt_event , struct net_if * iface ,
static inline void mgmt_push_event ( uint32_t mgmt_event , struct net_if * iface ,
const void * info , size_t length )
const void * info , size_t length )
{
{
int16_t i_idx ;
# ifndef CONFIG_NET_MGMT_EVENT_INFO
# ifndef CONFIG_NET_MGMT_EVENT_INFO
ARG_UNUSED ( info ) ;
ARG_UNUSED ( info ) ;
ARG_UNUSED ( length ) ;
ARG_UNUSED ( length ) ;
# endif /* CONFIG_NET_MGMT_EVENT_INFO */
# endif /* CONFIG_NET_MGMT_EVENT_INFO */
( void ) k_mutex_lock ( & net_mgmt_lock , K_FOREVER ) ;
( void ) k_mutex_lock ( & net_mgmt_event_ lock , K_FOREVER ) ;
i_idx = in_event + 1 ;
memset ( & new_event , 0 , sizeof ( struct mgmt_event_entry ) ) ;
if ( i_idx = = CONFIG_NET_MGMT_EVENT_QUEUE_SIZE ) {
i_idx = 0 ;
}
# ifdef CONFIG_NET_MGMT_EVENT_INFO
# ifdef CONFIG_NET_MGMT_EVENT_INFO
if ( info & & length ) {
if ( info & & length ) {
if ( length < = NET_EVENT_INFO_MAX_SIZE ) {
if ( length < = NET_EVENT_INFO_MAX_SIZE ) {
memcpy ( events [ i_idx ] . info , info , length ) ;
memcpy ( new_ event. info , info , length ) ;
events [ i_idx ] . info_length = length ;
new_ event. info_length = length ;
} else {
} else {
NET_ERR ( " Event %u info length %zu > max size %zu " ,
NET_ERR ( " Event %u info length %zu > max size %zu " ,
mgmt_event , length , NET_EVENT_INFO_MAX_SIZE ) ;
mgmt_event , length , NET_EVENT_INFO_MAX_SIZE ) ;
( void ) k_mutex_unlock ( & net_mgmt_lock ) ;
( void ) k_mutex_unlock ( & net_mgmt_event_ lock ) ;
return ;
return ;
}
}
} else {
events [ i_idx ] . info_length = 0 ;
}
}
# endif /* CONFIG_NET_MGMT_EVENT_INFO */
# endif /* CONFIG_NET_MGMT_EVENT_INFO */
events [ i_idx ] . event = mgmt_event ;
new_event . event = mgmt_event ;
events [ i_idx ] . iface = iface ;
new_event . iface = iface ;
if ( i_idx = = out_event ) {
uint16_t o_idx = out_event + 1 ;
if ( o_idx = = CONFIG_NET_MGMT_EVENT_QUEUE_SIZE ) {
o_idx = 0U ;
}
if ( events [ o_idx ] . event ) {
out_event = o_idx ;
}
} else if ( out_event < 0 ) {
out_event = i_idx ;
}
in_event = i_idx ;
( void ) k_mutex_unlock ( & net_mgmt_lock ) ;
}
static inline struct mgmt_event_entry * mgmt_pop_event ( void )
{
int16_t o_idx ;
if ( out_event < 0 | | ! events [ out_event ] . event ) {
return NULL ;
}
o_idx = out_event ;
out_event + + ;
if ( o_idx = = in_event ) {
if ( k_msgq_put ( & event_msgq , & new_event , K_MSEC ( 10 ) ) ! = 0 ) {
in_event = - 1 ;
NET_WARN ( " Failure to push event (%u), "
out_event = - 1 ;
" try increasing the 'CONFIG_NET_MGMT_EVENT_QUEUE_SIZE' " ,
} else if ( out_event = = CONFIG_NET_MGMT_EVENT_QUEUE_SIZE ) {
mgmt_event ) ;
out_event = 0 ;
}
}
return & events [ o_idx ] ;
( void ) k_mutex_unlock ( & net_mgmt_event_lock ) ;
}
}
static inline void mgmt_clean _event ( struct mgmt_event_entry * mgmt_even t)
static inline void mgmt_pop_event ( struct mgmt_event_entry * dst )
{
{
mgmt_event - > event = 0U ;
do {
mgmt_event - > iface = NULL ;
} while ( k_msgq_get ( & event_msgq , dst , K_FOREVER ) ! = 0 ) ;
}
}
static inline void mgmt_add_event_mask ( uint32_t event_mask )
static inline void mgmt_add_event_mask ( uint32_t event_mask )
@ -156,7 +123,7 @@ static inline bool mgmt_is_event_handled(uint32_t mgmt_event)
NET_MGMT_GET_COMMAND ( mgmt_event ) ) ) ;
NET_MGMT_GET_COMMAND ( mgmt_event ) ) ) ;
}
}
static inline void mgmt_run_callbacks ( struct mgmt_event_entry * mgmt_event )
static inline void mgmt_run_callbacks ( const struct mgmt_event_entry * const mgmt_event )
{
{
sys_snode_t * prev = NULL ;
sys_snode_t * prev = NULL ;
struct net_mgmt_event_callback * cb , * tmp ;
struct net_mgmt_event_callback * cb , * tmp ;
@ -222,35 +189,21 @@ static inline void mgmt_run_callbacks(struct mgmt_event_entry *mgmt_event)
static void mgmt_thread ( void )
static void mgmt_thread ( void )
{
{
struct mgmt_event_entry * mgmt_event ;
struct mgmt_event_entry mgmt_event ;
while ( 1 ) {
while ( 1 ) {
k_sem_take ( & network_event , K_FOREVER ) ;
mgmt_pop_event ( & mgmt_event ) ;
( void ) k_mutex_lock ( & net_mgmt_lock , K_FOREVER ) ;
NET_DBG ( " Handling events, forwarding it relevantly " ) ;
NET_DBG ( " Handling events, forwarding it relevantly " ) ;
mgmt_event = mgmt_pop_event ( ) ;
/* take the lock to prevent changes to the callback structure during use */
if ( ! mgmt_event ) {
( void ) k_mutex_lock ( & net_mgmt_callback_lock , K_FOREVER ) ;
/* System is over-loaded?
* At this point we have most probably notified
* more events than we could handle
*/
NET_DBG ( " Some event got probably lost (%u) " ,
k_sem_count_get ( & network_event ) ) ;
k_sem_init ( & network_event , 0 , K_SEM_MAX_LIMIT ) ;
( void ) k_mutex_unlock ( & net_mgmt_lock ) ;
continue ;
}
mgmt_run_callbacks ( mgmt_event ) ;
mgmt_clean_event ( mgmt_event ) ;
mgmt_run_callbacks ( & mgmt_event ) ;
( void ) k_mutex_unlock ( & net_mgmt_lock ) ;
( void ) k_mutex_unlock ( & net_mgmt_callback_lock ) ;
/* forcefully give up our timeslot, to give time to the callback */
k_yield ( ) ;
k_yield ( ) ;
}
}
}
}
@ -312,26 +265,26 @@ void net_mgmt_add_event_callback(struct net_mgmt_event_callback *cb)
{
{
NET_DBG ( " Adding event callback %p " , cb ) ;
NET_DBG ( " Adding event callback %p " , cb ) ;
( void ) k_mutex_lock ( & net_mgmt_lock , K_FOREVER ) ;
( void ) k_mutex_lock ( & net_mgmt_callback_ lock , K_FOREVER ) ;
sys_slist_prepend ( & event_callbacks , & cb - > node ) ;
sys_slist_prepend ( & event_callbacks , & cb - > node ) ;
mgmt_add_event_mask ( cb - > event_mask ) ;
mgmt_add_event_mask ( cb - > event_mask ) ;
( void ) k_mutex_unlock ( & net_mgmt_lock ) ;
( void ) k_mutex_unlock ( & net_mgmt_callback_ lock ) ;
}
}
void net_mgmt_del_event_callback ( struct net_mgmt_event_callback * cb )
void net_mgmt_del_event_callback ( struct net_mgmt_event_callback * cb )
{
{
NET_DBG ( " Deleting event callback %p " , cb ) ;
NET_DBG ( " Deleting event callback %p " , cb ) ;
( void ) k_mutex_lock ( & net_mgmt_lock , K_FOREVER ) ;
( void ) k_mutex_lock ( & net_mgmt_callback_ lock , K_FOREVER ) ;
sys_slist_find_and_remove ( & event_callbacks , & cb - > node ) ;
sys_slist_find_and_remove ( & event_callbacks , & cb - > node ) ;
mgmt_rebuild_global_event_mask ( ) ;
mgmt_rebuild_global_event_mask ( ) ;
( void ) k_mutex_unlock ( & net_mgmt_lock ) ;
( void ) k_mutex_unlock ( & net_mgmt_callback_ lock ) ;
}
}
void net_mgmt_event_notify_with_info ( uint32_t mgmt_event , struct net_if * iface ,
void net_mgmt_event_notify_with_info ( uint32_t mgmt_event , struct net_if * iface ,
@ -344,7 +297,6 @@ void net_mgmt_event_notify_with_info(uint32_t mgmt_event, struct net_if *iface,
NET_MGMT_GET_COMMAND ( mgmt_event ) ) ;
NET_MGMT_GET_COMMAND ( mgmt_event ) ) ;
mgmt_push_event ( mgmt_event , iface , info , length ) ;
mgmt_push_event ( mgmt_event , iface , info , length ) ;
k_sem_give ( & network_event ) ;
}
}
}
}
@ -377,12 +329,6 @@ int net_mgmt_event_wait_on_iface(struct net_if *iface,
void net_mgmt_event_init ( void )
void net_mgmt_event_init ( void )
{
{
in_event = - 1 ;
out_event = - 1 ;
( void ) memset ( events , 0 , CONFIG_NET_MGMT_EVENT_QUEUE_SIZE *
sizeof ( struct mgmt_event_entry ) ) ;
# if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
# if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
/* Lowest priority cooperative thread */
/* Lowest priority cooperative thread */
# define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
# define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)