Browse Source

drivers: spi_xlnx_axi_quadspi: Reduce IRQ work

This driver could end up doing a great deal of work inside the ISR when
large SPI transfers were in use, which could cause significant IRQ
latency. For the normal, non-async SPI transfer case, use events to
signal the calling thread to complete the work rather than performing
FIFO transfers inside the ISR.

Signed-off-by: Robert Hancock <robert.hancock@calian.com>
pull/74334/head
Robert Hancock 1 year ago committed by Anas Nashif
parent
commit
680fa154bf
  1. 1
      drivers/spi/Kconfig.xlnx
  2. 128
      drivers/spi/spi_xlnx_axi_quadspi.c

1
drivers/spi/Kconfig.xlnx

@ -7,5 +7,6 @@ config SPI_XLNX_AXI_QUADSPI @@ -7,5 +7,6 @@ config SPI_XLNX_AXI_QUADSPI
bool "Xilinx AXI Quad SPI driver"
default y
depends on DT_HAS_XLNX_XPS_SPI_2_00_A_ENABLED
select EVENTS
help
Enable Xilinx AXI Quad SPI v3.2 driver.

128
drivers/spi/spi_xlnx_axi_quadspi.c

@ -11,6 +11,7 @@ @@ -11,6 +11,7 @@
#include <zephyr/sys/sys_io.h>
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
#include <zephyr/kernel.h>
LOG_MODULE_REGISTER(xlnx_quadspi, CONFIG_SPI_LOG_LEVEL);
#include "spi_context.h"
@ -94,6 +95,7 @@ struct xlnx_quadspi_config { @@ -94,6 +95,7 @@ struct xlnx_quadspi_config {
struct xlnx_quadspi_data {
struct spi_context ctx;
struct k_event dtr_empty;
};
static inline uint32_t xlnx_quadspi_read32(const struct device *dev,
@ -227,7 +229,7 @@ static int xlnx_quadspi_configure(const struct device *dev, @@ -227,7 +229,7 @@ static int xlnx_quadspi_configure(const struct device *dev,
return 0;
}
static void xlnx_quadspi_start_tx(const struct device *dev)
static bool xlnx_quadspi_start_tx(const struct device *dev)
{
const struct xlnx_quadspi_config *config = dev->config;
struct xlnx_quadspi_data *data = dev->data;
@ -237,6 +239,7 @@ static void xlnx_quadspi_start_tx(const struct device *dev) @@ -237,6 +239,7 @@ static void xlnx_quadspi_start_tx(const struct device *dev)
uint32_t spisr;
uint32_t dtr = 0U;
uint32_t fifo_avail_words = config->fifo_size ? config->fifo_size : 1;
bool complete = false;
if (!spi_context_tx_on(ctx) && !spi_context_rx_on(ctx)) {
/* All done, de-assert slave select */
@ -250,7 +253,8 @@ static void xlnx_quadspi_start_tx(const struct device *dev) @@ -250,7 +253,8 @@ static void xlnx_quadspi_start_tx(const struct device *dev)
}
spi_context_complete(ctx, dev, 0);
return;
complete = true;
return complete;
}
if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) {
@ -318,6 +322,7 @@ static void xlnx_quadspi_start_tx(const struct device *dev) @@ -318,6 +322,7 @@ static void xlnx_quadspi_start_tx(const struct device *dev)
SPICR_OFFSET);
spi_context_complete(ctx, dev, -ENOTSUP);
complete = true;
}
if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) {
@ -325,6 +330,47 @@ static void xlnx_quadspi_start_tx(const struct device *dev) @@ -325,6 +330,47 @@ static void xlnx_quadspi_start_tx(const struct device *dev)
spicr &= ~(SPICR_MASTER_XFER_INH);
xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
}
return complete;
}
static void xlnx_quadspi_read_fifo(const struct device *dev)
{
const struct xlnx_quadspi_config *config = dev->config;
struct xlnx_quadspi_data *data = dev->data;
struct spi_context *ctx = &data->ctx;
uint32_t spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
/* RX FIFO occupancy register only exists if FIFO is implemented */
uint32_t rx_fifo_words = config->fifo_size ?
xlnx_quadspi_read32(dev, SPI_RX_FIFO_OCR_OFFSET) + 1 : 1;
/* Read RX data */
while (!(spisr & SPISR_RX_EMPTY)) {
uint32_t drr = xlnx_quadspi_read32(dev, SPI_DRR_OFFSET);
if (spi_context_rx_buf_on(ctx)) {
switch (config->num_xfer_bytes) {
case 1:
UNALIGNED_PUT(drr, (uint8_t *)ctx->rx_buf);
break;
case 2:
UNALIGNED_PUT(drr, (uint16_t *)ctx->rx_buf);
break;
case 4:
UNALIGNED_PUT(drr, (uint32_t *)ctx->rx_buf);
break;
default:
__ASSERT(0, "unsupported num_xfer_bytes");
}
}
spi_context_update_rx(ctx, config->num_xfer_bytes, 1);
if (--rx_fifo_words == 0) {
spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
rx_fifo_words = config->fifo_size ?
xlnx_quadspi_read32(dev, SPI_RX_FIFO_OCR_OFFSET) + 1 : 1;
}
}
}
static int xlnx_quadspi_transceive(const struct device *dev,
@ -352,7 +398,27 @@ static int xlnx_quadspi_transceive(const struct device *dev, @@ -352,7 +398,27 @@ static int xlnx_quadspi_transceive(const struct device *dev,
xlnx_quadspi_cs_control(dev, true);
xlnx_quadspi_start_tx(dev);
while (true) {
k_event_clear(&data->dtr_empty, 1);
bool complete = xlnx_quadspi_start_tx(dev);
if (complete || async) {
break;
}
/**
* 20ms should be long enough for 256 byte FIFO at any
* reasonable clock speed.
*/
if (!k_event_wait(&data->dtr_empty, 1, false,
K_MSEC(20 + CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE))) {
/* Timeout */
LOG_ERR("DTR empty timeout");
spi_context_complete(ctx, dev, -ETIMEDOUT);
break;
}
xlnx_quadspi_read_fifo(dev);
}
ret = spi_context_wait_for_completion(ctx);
out:
@ -405,9 +471,7 @@ static int xlnx_quadspi_release(const struct device *dev, @@ -405,9 +471,7 @@ static int xlnx_quadspi_release(const struct device *dev,
static void xlnx_quadspi_isr(const struct device *dev)
{
const struct xlnx_quadspi_config *config = dev->config;
struct xlnx_quadspi_data *data = dev->data;
struct spi_context *ctx = &data->ctx;
uint32_t ipisr;
/* Acknowledge interrupt */
@ -415,46 +479,20 @@ static void xlnx_quadspi_isr(const struct device *dev) @@ -415,46 +479,20 @@ static void xlnx_quadspi_isr(const struct device *dev)
xlnx_quadspi_write32(dev, ipisr, IPISR_OFFSET);
if (ipisr & IPIXR_DTR_EMPTY) {
uint32_t spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
/* RX FIFO occupancy register only exists if FIFO is implemented */
uint32_t rx_fifo_words = config->fifo_size ?
xlnx_quadspi_read32(dev, SPI_RX_FIFO_OCR_OFFSET) + 1 : 1;
/* Read RX data */
while (!(spisr & SPISR_RX_EMPTY)) {
uint32_t drr = xlnx_quadspi_read32(dev, SPI_DRR_OFFSET);
if (spi_context_rx_buf_on(ctx)) {
switch (config->num_xfer_bytes) {
case 1:
UNALIGNED_PUT(drr,
(uint8_t *)ctx->rx_buf);
break;
case 2:
UNALIGNED_PUT(drr,
(uint16_t *)ctx->rx_buf);
break;
case 4:
UNALIGNED_PUT(drr,
(uint32_t *)ctx->rx_buf);
break;
default:
__ASSERT(0,
"unsupported num_xfer_bytes");
}
}
spi_context_update_rx(ctx, config->num_xfer_bytes, 1);
if (--rx_fifo_words == 0) {
spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
rx_fifo_words = config->fifo_size ?
xlnx_quadspi_read32(dev, SPI_RX_FIFO_OCR_OFFSET) + 1 : 1;
}
/**
* For async mode, we need to read the RX FIFO and refill the TX FIFO
* if needed here.
* For sync mode, we do this in the caller's context to avoid doing too much
* work in the ISR, so just post the event.
*/
#ifdef CONFIG_SPI_ASYNC
if (ctx->asynchronous) {
xlnx_quadspi_read_fifo(dev);
xlnx_quadspi_start_tx(dev);
return;
}
/* Start next TX */
xlnx_quadspi_start_tx(dev);
#endif
k_event_post(&data->dtr_empty, 1);
} else {
LOG_WRN("unhandled interrupt, ipisr = 0x%08x", ipisr);
}
@ -514,6 +552,8 @@ static int xlnx_quadspi_init(const struct device *dev) @@ -514,6 +552,8 @@ static int xlnx_quadspi_init(const struct device *dev)
const struct xlnx_quadspi_config *config = dev->config;
struct xlnx_quadspi_data *data = dev->data;
k_event_init(&data->dtr_empty);
/* Reset controller */
xlnx_quadspi_write32(dev, SRR_SOFTRESET_MAGIC, SRR_OFFSET);

Loading…
Cancel
Save