Browse Source

drivers: spi: spi_context.h: remove multithreading dependency

Replace semaphores with proper atomic flags when used without
multithreading enabled.

Signed-off-by: Michal Kozikowski <michal.kozikowski@nordicsemi.no>
pull/90901/head
Michal Kozikowski 2 months ago committed by Benjamin Cabé
parent
commit
df65918cfa
  1. 1
      drivers/spi/Kconfig
  2. 1
      drivers/spi/Kconfig.nrfx
  3. 52
      drivers/spi/spi_context.h
  4. 10
      drivers/spi/spi_nrfx_spi.c
  5. 10
      drivers/spi/spi_nrfx_spim.c
  6. 31
      drivers/spi/spi_nrfx_spis.c

1
drivers/spi/Kconfig

@ -24,6 +24,7 @@ config SPI_SHELL @@ -24,6 +24,7 @@ config SPI_SHELL
config SPI_ASYNC
bool "Asynchronous call support"
depends on MULTITHREADING
select POLL
help
This option enables the asynchronous API calls.

1
drivers/spi/Kconfig.nrfx

@ -5,7 +5,6 @@ menuconfig SPI_NRFX @@ -5,7 +5,6 @@ menuconfig SPI_NRFX
bool "nRF SPI nrfx drivers"
default y
depends on SOC_FAMILY_NORDIC_NRF
depends on MULTITHREADING
select GPIO
select PINCTRL
help

52
drivers/spi/spi_context.h

@ -28,12 +28,21 @@ enum spi_ctx_runtime_op_mode { @@ -28,12 +28,21 @@ enum spi_ctx_runtime_op_mode {
struct spi_context {
const struct spi_config *config;
#ifdef CONFIG_MULTITHREADING
const struct spi_config *owner;
#endif
const struct gpio_dt_spec *cs_gpios;
size_t num_cs_gpios;
#ifdef CONFIG_MULTITHREADING
struct k_sem lock;
struct k_sem sync;
#else
/* An atomic flag that signals completed transfer
* when threads are not enabled.
*/
atomic_t ready;
#endif /* CONFIG_MULTITHREADING */
int sync_status;
#ifdef CONFIG_SPI_ASYNC
@ -106,6 +115,7 @@ static inline void spi_context_lock(struct spi_context *ctx, @@ -106,6 +115,7 @@ static inline void spi_context_lock(struct spi_context *ctx,
void *callback_data,
const struct spi_config *spi_cfg)
{
#ifdef CONFIG_MULTITHREADING
bool already_locked = (spi_cfg->operation & SPI_LOCK_ON) &&
(k_sem_count_get(&ctx->lock) == 0) &&
(ctx->owner == spi_cfg);
@ -114,6 +124,7 @@ static inline void spi_context_lock(struct spi_context *ctx, @@ -114,6 +124,7 @@ static inline void spi_context_lock(struct spi_context *ctx,
k_sem_take(&ctx->lock, K_FOREVER);
ctx->owner = spi_cfg;
}
#endif /* CONFIG_MULTITHREADING */
#ifdef CONFIG_SPI_ASYNC
ctx->asynchronous = asynchronous;
@ -131,6 +142,7 @@ static inline void spi_context_lock(struct spi_context *ctx, @@ -131,6 +142,7 @@ static inline void spi_context_lock(struct spi_context *ctx,
*/
static inline void spi_context_release(struct spi_context *ctx, int status)
{
#ifdef CONFIG_MULTITHREADING
#ifdef CONFIG_SPI_SLAVE
if (status >= 0 && (ctx->config->operation & SPI_LOCK_ON)) {
return;
@ -148,6 +160,7 @@ static inline void spi_context_release(struct spi_context *ctx, int status) @@ -148,6 +160,7 @@ static inline void spi_context_release(struct spi_context *ctx, int status)
k_sem_give(&ctx->lock);
}
#endif /* CONFIG_SPI_ASYNC */
#endif /* CONFIG_MULTITHREADING */
}
static inline size_t spi_context_total_tx_len(struct spi_context *ctx);
@ -173,6 +186,7 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx) @@ -173,6 +186,7 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx)
if (wait) {
k_timeout_t timeout;
uint32_t timeout_ms;
/* Do not use any timeout in the slave mode, as in this case
* it is not known when the transfer will actually start and
@ -180,10 +194,10 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx) @@ -180,10 +194,10 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx)
*/
if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) {
timeout = K_FOREVER;
timeout_ms = UINT32_MAX;
} else {
uint32_t tx_len = spi_context_total_tx_len(ctx);
uint32_t rx_len = spi_context_total_rx_len(ctx);
uint32_t timeout_ms;
timeout_ms = MAX(tx_len, rx_len) * 8 * 1000 /
ctx->config->frequency;
@ -191,11 +205,38 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx) @@ -191,11 +205,38 @@ static inline int spi_context_wait_for_completion(struct spi_context *ctx)
timeout = K_MSEC(timeout_ms);
}
#ifdef CONFIG_MULTITHREADING
if (k_sem_take(&ctx->sync, timeout)) {
LOG_ERR("Timeout waiting for transfer complete");
return -ETIMEDOUT;
}
#else
if (timeout_ms == UINT32_MAX) {
/* In slave mode, we wait indefinitely, so we can go idle. */
unsigned int key = irq_lock();
while (!atomic_get(&ctx->ready)) {
k_cpu_atomic_idle(key);
key = irq_lock();
}
ctx->ready = 0;
irq_unlock(key);
} else {
const uint32_t tms = k_uptime_get_32();
while (!atomic_get(&ctx->ready) && (k_uptime_get_32() - tms < timeout_ms)) {
k_busy_wait(1);
}
if (!ctx->ready) {
LOG_ERR("Timeout waiting for transfer complete");
return -ETIMEDOUT;
}
ctx->ready = 0;
}
#endif /* CONFIG_MULTITHREADING */
status = ctx->sync_status;
}
@ -239,10 +280,15 @@ static inline void spi_context_complete(struct spi_context *ctx, @@ -239,10 +280,15 @@ static inline void spi_context_complete(struct spi_context *ctx,
ctx->owner = NULL;
k_sem_give(&ctx->lock);
}
}
#else
ctx->sync_status = status;
#ifdef CONFIG_MULTITHREADING
k_sem_give(&ctx->sync);
#else
atomic_set(&ctx->ready, 1);
#endif /* CONFIG_MULTITHREADING */
#endif /* CONFIG_SPI_ASYNC */
}
@ -355,10 +401,12 @@ static inline void spi_context_unlock_unconditionally(struct spi_context *ctx) @@ -355,10 +401,12 @@ static inline void spi_context_unlock_unconditionally(struct spi_context *ctx)
/* Forcing CS to go to inactive status */
_spi_context_cs_control(ctx, false, true);
#ifdef CONFIG_MULTITHREADING
if (!k_sem_count_get(&ctx->lock)) {
ctx->owner = NULL;
k_sem_give(&ctx->lock);
}
#endif /* CONFIG_MULTITHREADING */
}
/*

10
drivers/spi/spi_nrfx_spi.c

@ -278,7 +278,11 @@ static int transceive(const struct device *dev, @@ -278,7 +278,11 @@ static int transceive(const struct device *dev,
finish_transaction(dev, -ETIMEDOUT);
/* Clean up the driver state. */
#ifdef CONFIG_MULTITHREADING
k_sem_reset(&dev_data->ctx.sync);
#else
dev_data->ctx.ready = 0;
#endif /* CONFIG_MULTITHREADING */
}
spi_context_cs_control(&dev_data->ctx, false);
@ -432,8 +436,10 @@ static int spi_nrfx_init(const struct device *dev) @@ -432,8 +436,10 @@ static int spi_nrfx_init(const struct device *dev)
nrfx_isr, nrfx_spi_##idx##_irq_handler, 0); \
} \
static struct spi_nrfx_data spi_##idx##_data = { \
SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx), \
SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx), \
IF_ENABLED(CONFIG_MULTITHREADING, \
(SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx),)) \
IF_ENABLED(CONFIG_MULTITHREADING, \
(SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx),)) \
SPI_CONTEXT_CS_GPIOS_INITIALIZE(SPI(idx), ctx) \
.dev = DEVICE_DT_GET(SPI(idx)), \
.busy = false, \

10
drivers/spi/spi_nrfx_spim.c

@ -598,7 +598,11 @@ static int transceive(const struct device *dev, @@ -598,7 +598,11 @@ static int transceive(const struct device *dev,
finish_transaction(dev, -ETIMEDOUT);
/* Clean up the driver state. */
#ifdef CONFIG_MULTITHREADING
k_sem_reset(&dev_data->ctx.sync);
#else
dev_data->ctx.ready = 0;
#endif /* CONFIG_MULTITHREADING */
#ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
anomaly_58_workaround_clear(dev_data);
#endif
@ -817,8 +821,10 @@ static int spi_nrfx_init(const struct device *dev) @@ -817,8 +821,10 @@ static int spi_nrfx_init(const struct device *dev)
[CONFIG_SPI_NRFX_RAM_BUFFER_SIZE] \
SPIM_MEMORY_SECTION(idx);)) \
static struct spi_nrfx_data spi_##idx##_data = { \
SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx), \
SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx), \
IF_ENABLED(CONFIG_MULTITHREADING, \
(SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx),)) \
IF_ENABLED(CONFIG_MULTITHREADING, \
(SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx),)) \
SPI_CONTEXT_CS_GPIOS_INITIALIZE(SPIM(idx), ctx) \
IF_ENABLED(SPI_BUFFER_IN_RAM, \
(.tx_buffer = spim_##idx##_tx_buffer, \

31
drivers/spi/spi_nrfx_spis.c

@ -39,7 +39,11 @@ BUILD_ASSERT(!IS_ENABLED(CONFIG_PM_DEVICE_SYSTEM_MANAGED)); @@ -39,7 +39,11 @@ BUILD_ASSERT(!IS_ENABLED(CONFIG_PM_DEVICE_SYSTEM_MANAGED));
struct spi_nrfx_data {
struct spi_context ctx;
const struct device *dev;
#ifdef CONFIG_MULTITHREADING
struct k_sem wake_sem;
#else
atomic_t woken_up;
#endif
struct gpio_callback wake_cb_data;
};
@ -193,7 +197,11 @@ static void wake_callback(const struct device *dev, struct gpio_callback *cb, @@ -193,7 +197,11 @@ static void wake_callback(const struct device *dev, struct gpio_callback *cb,
(void)gpio_pin_interrupt_configure_dt(&dev_config->wake_gpio,
GPIO_INT_DISABLE);
#ifdef CONFIG_MULTITHREADING
k_sem_give(&dev_data->wake_sem);
#else
atomic_set(&dev_data->woken_up, 1);
#endif /* CONFIG_MULTITHREADING */
}
static void wait_for_wake(struct spi_nrfx_data *dev_data,
@ -206,7 +214,19 @@ static void wait_for_wake(struct spi_nrfx_data *dev_data, @@ -206,7 +214,19 @@ static void wait_for_wake(struct spi_nrfx_data *dev_data,
dev_config->wake_gpio.pin) == 0) {
(void)gpio_pin_interrupt_configure_dt(&dev_config->wake_gpio,
GPIO_INT_LEVEL_HIGH);
#ifdef CONFIG_MULTITHREADING
(void)k_sem_take(&dev_data->wake_sem, K_FOREVER);
#else
unsigned int key = irq_lock();
while (!atomic_get(&dev_data->woken_up)) {
k_cpu_atomic_idle(key);
key = irq_lock();
}
dev_data->woken_up = 0;
irq_unlock(key);
#endif /* CONFIG_MULTITHREADING */
}
}
@ -482,11 +502,14 @@ static int spi_nrfx_init(const struct device *dev) @@ -482,11 +502,14 @@ static int spi_nrfx_init(const struct device *dev)
nrfx_isr, nrfx_spis_##idx##_irq_handler, 0); \
} \
static struct spi_nrfx_data spi_##idx##_data = { \
SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx), \
SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx), \
IF_ENABLED(CONFIG_MULTITHREADING, \
(SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx),)) \
IF_ENABLED(CONFIG_MULTITHREADING, \
(SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx),)) \
.dev = DEVICE_DT_GET(SPIS(idx)), \
.wake_sem = Z_SEM_INITIALIZER( \
spi_##idx##_data.wake_sem, 0, 1), \
IF_ENABLED(CONFIG_MULTITHREADING, \
(.wake_sem = Z_SEM_INITIALIZER( \
spi_##idx##_data.wake_sem, 0, 1),)) \
}; \
PINCTRL_DT_DEFINE(SPIS(idx)); \
static const struct spi_nrfx_config spi_##idx##z_config = { \

Loading…
Cancel
Save