@ -20,10 +20,12 @@ LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
@@ -20,10 +20,12 @@ LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
# define PM_DOMAIN(_pm) NULL
# endif
# ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
# ifdef CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ
K_THREAD_STACK_DEFINE ( pm_device_runtime_stack , CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_STACK_SIZE ) ;
static struct k_work_q pm_device_runtime_wq ;
# endif /* CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ */
# endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
# define EVENT_STATE_ACTIVE BIT(PM_DEVICE_STATE_ACTIVE)
# define EVENT_STATE_SUSPENDED BIT(PM_DEVICE_STATE_SUSPENDED)
@ -84,12 +86,14 @@ static int runtime_suspend(const struct device *dev, bool async,
@@ -84,12 +86,14 @@ static int runtime_suspend(const struct device *dev, bool async,
if ( async ) {
/* queue suspend */
# ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
pm - > base . state = PM_DEVICE_STATE_SUSPENDING ;
# ifdef CONFIG_PM_DEVICE_RUNTIME_USE_SYSTEM_WQ
( void ) k_work_schedule ( & pm - > work , delay ) ;
# else
( void ) k_work_schedule_for_queue ( & pm_device_runtime_wq , & pm - > work , delay ) ;
# endif /* CONFIG_PM_DEVICE_RUNTIME_USE_SYSTEM_WQ */
# endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
} else {
/* suspend now */
ret = pm - > base . action_cb ( pm - > dev , PM_DEVICE_ACTION_SUSPEND ) ;
@ -109,6 +113,7 @@ unlock:
@@ -109,6 +113,7 @@ unlock:
return ret ;
}
# ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
static void runtime_suspend_work ( struct k_work * work )
{
int ret ;
@ -138,6 +143,7 @@ static void runtime_suspend_work(struct k_work *work)
@@ -138,6 +143,7 @@ static void runtime_suspend_work(struct k_work *work)
__ASSERT ( ret = = 0 , " Could not suspend device (%d) " , ret ) ;
}
# endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
static int get_sync_locked ( const struct device * dev )
{
@ -235,6 +241,7 @@ int pm_device_runtime_get(const struct device *dev)
@@ -235,6 +241,7 @@ int pm_device_runtime_get(const struct device *dev)
pm - > base . usage + + ;
# ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
/*
* Check if the device has a pending suspend operation ( not started
* yet ) and cancel it . This way we avoid unnecessary operations because
@ -260,6 +267,7 @@ int pm_device_runtime_get(const struct device *dev)
@@ -260,6 +267,7 @@ int pm_device_runtime_get(const struct device *dev)
( void ) k_sem_take ( & pm - > lock , K_FOREVER ) ;
}
}
# endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
if ( pm - > base . usage > 1U ) {
goto unlock ;
@ -358,6 +366,7 @@ int pm_device_runtime_put(const struct device *dev)
@@ -358,6 +366,7 @@ int pm_device_runtime_put(const struct device *dev)
int pm_device_runtime_put_async ( const struct device * dev , k_timeout_t delay )
{
# ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
int ret ;
if ( dev - > pm_base = = NULL ) {
@ -378,6 +387,10 @@ int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay)
@@ -378,6 +387,10 @@ int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay)
SYS_PORT_TRACING_FUNC_EXIT ( pm , device_runtime_put_async , dev , delay , ret ) ;
return ret ;
# else
LOG_WRN ( " Function not available " ) ;
return - ENOSYS ;
# endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
}
__boot_func
@ -449,7 +462,9 @@ int pm_device_runtime_enable(const struct device *dev)
@@ -449,7 +462,9 @@ int pm_device_runtime_enable(const struct device *dev)
/* lazy init of PM fields */
if ( pm - > dev = = NULL ) {
pm - > dev = dev ;
# ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
k_work_init_delayable ( & pm - > work , runtime_suspend_work ) ;
# endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
}
if ( pm - > base . state = = PM_DEVICE_STATE_ACTIVE ) {
@ -522,6 +537,7 @@ int pm_device_runtime_disable(const struct device *dev)
@@ -522,6 +537,7 @@ int pm_device_runtime_disable(const struct device *dev)
( void ) k_sem_take ( & pm - > lock , K_FOREVER ) ;
}
# ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
if ( ! k_is_pre_kernel ( ) ) {
if ( ( pm - > base . state = = PM_DEVICE_STATE_SUSPENDING ) & &
( ( k_work_cancel_delayable ( & pm - > work ) & K_WORK_RUNNING ) = = 0 ) ) {
@ -539,6 +555,7 @@ int pm_device_runtime_disable(const struct device *dev)
@@ -539,6 +555,7 @@ int pm_device_runtime_disable(const struct device *dev)
( void ) k_sem_take ( & pm - > lock , K_FOREVER ) ;
}
}
# endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
/* wake up the device if suspended */
if ( pm - > base . state = = PM_DEVICE_STATE_SUSPENDED ) {
@ -549,8 +566,9 @@ int pm_device_runtime_disable(const struct device *dev)
@@ -549,8 +566,9 @@ int pm_device_runtime_disable(const struct device *dev)
pm - > base . state = PM_DEVICE_STATE_ACTIVE ;
}
# ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
clear_bit :
# endif
atomic_clear_bit ( & pm - > base . flags , PM_DEVICE_FLAG_RUNTIME_ENABLED ) ;
unlock :
@ -580,6 +598,7 @@ int pm_device_runtime_usage(const struct device *dev)
@@ -580,6 +598,7 @@ int pm_device_runtime_usage(const struct device *dev)
return dev - > pm_base - > usage ;
}
# ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
# ifdef CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ
static int pm_device_runtime_wq_init ( void )
@ -599,3 +618,4 @@ SYS_INIT(pm_device_runtime_wq_init, POST_KERNEL,
@@ -599,3 +618,4 @@ SYS_INIT(pm_device_runtime_wq_init, POST_KERNEL,
CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_INIT_PRIO ) ;
# endif /* CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ */
# endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */