@ -28,12 +28,21 @@ enum spi_ctx_runtime_op_mode {
@@ -28,12 +28,21 @@ enum spi_ctx_runtime_op_mode {
struct spi_context {
const struct spi_config * config ;
# ifdef CONFIG_MULTITHREADING
const struct spi_config * owner ;
# endif
const struct gpio_dt_spec * cs_gpios ;
size_t num_cs_gpios ;
# ifdef CONFIG_MULTITHREADING
struct k_sem lock ;
struct k_sem sync ;
# else
/* An atomic flag that signals completed transfer
* when threads are not enabled .
*/
atomic_t ready ;
# endif /* CONFIG_MULTITHREADING */
int sync_status ;
# ifdef CONFIG_SPI_ASYNC
@ -106,6 +115,7 @@ static inline void spi_context_lock(struct spi_context *ctx,
@@ -106,6 +115,7 @@ static inline void spi_context_lock(struct spi_context *ctx,
void * callback_data ,
const struct spi_config * spi_cfg )
{
# ifdef CONFIG_MULTITHREADING
bool already_locked = ( spi_cfg - > operation & SPI_LOCK_ON ) & &
( k_sem_count_get ( & ctx - > lock ) = = 0 ) & &
( ctx - > owner = = spi_cfg ) ;
@ -114,6 +124,7 @@ static inline void spi_context_lock(struct spi_context *ctx,
@@ -114,6 +124,7 @@ static inline void spi_context_lock(struct spi_context *ctx,
k_sem_take ( & ctx - > lock , K_FOREVER ) ;
ctx - > owner = spi_cfg ;
}
# endif /* CONFIG_MULTITHREADING */
# ifdef CONFIG_SPI_ASYNC
ctx - > asynchronous = asynchronous ;
@ -131,6 +142,7 @@ static inline void spi_context_lock(struct spi_context *ctx,
@@ -131,6 +142,7 @@ static inline void spi_context_lock(struct spi_context *ctx,
*/
static inline void spi_context_release ( struct spi_context * ctx , int status )
{
# ifdef CONFIG_MULTITHREADING
# ifdef CONFIG_SPI_SLAVE
if ( status > = 0 & & ( ctx - > config - > operation & SPI_LOCK_ON ) ) {
return ;
@ -148,6 +160,7 @@ static inline void spi_context_release(struct spi_context *ctx, int status)
@@ -148,6 +160,7 @@ static inline void spi_context_release(struct spi_context *ctx, int status)
k_sem_give ( & ctx - > lock ) ;
}
# endif /* CONFIG_SPI_ASYNC */
# endif /* CONFIG_MULTITHREADING */
}
static inline size_t spi_context_total_tx_len ( struct spi_context * ctx ) ;
@ -173,6 +186,7 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx)
@@ -173,6 +186,7 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx)
if ( wait ) {
k_timeout_t timeout ;
uint32_t timeout_ms ;
/* Do not use any timeout in the slave mode, as in this case
* it is not known when the transfer will actually start and
@ -180,10 +194,10 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx)
@@ -180,10 +194,10 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx)
*/
if ( IS_ENABLED ( CONFIG_SPI_SLAVE ) & & spi_context_is_slave ( ctx ) ) {
timeout = K_FOREVER ;
timeout_ms = UINT32_MAX ;
} else {
uint32_t tx_len = spi_context_total_tx_len ( ctx ) ;
uint32_t rx_len = spi_context_total_rx_len ( ctx ) ;
uint32_t timeout_ms ;
timeout_ms = MAX ( tx_len , rx_len ) * 8 * 1000 /
ctx - > config - > frequency ;
@ -191,11 +205,38 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx)
@@ -191,11 +205,38 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx)
timeout = K_MSEC ( timeout_ms ) ;
}
# ifdef CONFIG_MULTITHREADING
if ( k_sem_take ( & ctx - > sync , timeout ) ) {
LOG_ERR ( " Timeout waiting for transfer complete " ) ;
return - ETIMEDOUT ;
}
# else
if ( timeout_ms = = UINT32_MAX ) {
/* In slave mode, we wait indefinitely, so we can go idle. */
unsigned int key = irq_lock ( ) ;
while ( ! atomic_get ( & ctx - > ready ) ) {
k_cpu_atomic_idle ( key ) ;
key = irq_lock ( ) ;
}
ctx - > ready = 0 ;
irq_unlock ( key ) ;
} else {
const uint32_t tms = k_uptime_get_32 ( ) ;
while ( ! atomic_get ( & ctx - > ready ) & & ( k_uptime_get_32 ( ) - tms < timeout_ms ) ) {
k_busy_wait ( 1 ) ;
}
if ( ! ctx - > ready ) {
LOG_ERR ( " Timeout waiting for transfer complete " ) ;
return - ETIMEDOUT ;
}
ctx - > ready = 0 ;
}
# endif /* CONFIG_MULTITHREADING */
status = ctx - > sync_status ;
}
@ -239,10 +280,15 @@ static inline void spi_context_complete(struct spi_context *ctx,
@@ -239,10 +280,15 @@ static inline void spi_context_complete(struct spi_context *ctx,
ctx - > owner = NULL ;
k_sem_give ( & ctx - > lock ) ;
}
}
# else
ctx - > sync_status = status ;
# ifdef CONFIG_MULTITHREADING
k_sem_give ( & ctx - > sync ) ;
# else
atomic_set ( & ctx - > ready , 1 ) ;
# endif /* CONFIG_MULTITHREADING */
# endif /* CONFIG_SPI_ASYNC */
}
@ -355,10 +401,12 @@ static inline void spi_context_unlock_unconditionally(struct spi_context *ctx)
@@ -355,10 +401,12 @@ static inline void spi_context_unlock_unconditionally(struct spi_context *ctx)
/* Forcing CS to go to inactive status */
_spi_context_cs_control ( ctx , false , true ) ;
# ifdef CONFIG_MULTITHREADING
if ( ! k_sem_count_get ( & ctx - > lock ) ) {
ctx - > owner = NULL ;
k_sem_give ( & ctx - > lock ) ;
}
# endif /* CONFIG_MULTITHREADING */
}
/*