Browse Source

drivers: dma: dma_xmc4xxx: Add multi-block support

Adds dma multi-block support xmc4xxx SoCs.

Signed-off-by: Andriy Gelman <andriy.gelman@gmail.com>
pull/85636/head
Andriy Gelman 5 months ago committed by Benjamin Cabé
parent
commit
51f3422192
  1. 8
      drivers/dma/Kconfig.xmc4xxx
  2. 195
      drivers/dma/dma_xmc4xxx.c

8
drivers/dma/Kconfig.xmc4xxx

@ -9,3 +9,11 @@ config DMA_XMC4XXX @@ -9,3 +9,11 @@ config DMA_XMC4XXX
depends on DT_HAS_INFINEON_XMC4XXX_DMA_ENABLED
help
DMA driver for Infineon xmc4xxx series MCUs.
config DMA_XMC4XXX_NUM_DESCRIPTORS
int "Max DMA descriptors in a linked list"
default 4
depends on DMA_XMC4XXX
help
Maximum number of blocks in a DMA block transfer configuration.
Only supported by dma0 channels 0 and 1.

195
drivers/dma/dma_xmc4xxx.c

@ -24,6 +24,12 @@ LOG_MODULE_REGISTER(dma_xmc4xxx, CONFIG_DMA_LOG_LEVEL); @@ -24,6 +24,12 @@ LOG_MODULE_REGISTER(dma_xmc4xxx, CONFIG_DMA_LOG_LEVEL);
#define DLR_SRSEL_RS_BITSIZE 4
#define DLR_SRSEL_RS_MSK 0xf
#define MULTI_BLOCK_NUM_CHANNELS 2
#define XMC_DMA_CTLL_MEMORY_TO_MEMORY 0
#define XMC_DMA_CTLL_MEMORY_TO_PERIPHERAL 1
#define XMC_DMA_CTLL_PERIPHERAL_TO_MEMORY 2
#define ALL_EVENTS \
(XMC_DMA_CH_EVENT_TRANSFER_COMPLETE | XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE | \
XMC_DMA_CH_EVENT_SRC_TRANSACTION_COMPLETE | XMC_DMA_CH_EVENT_DST_TRANSACTION_COMPLETE | \
@ -38,8 +44,27 @@ struct dma_xmc4xxx_channel { @@ -38,8 +44,27 @@ struct dma_xmc4xxx_channel {
uint8_t dlr_line;
uint8_t channel_direction;
uint8_t dest_addr_adj;
bool multi_block;
};
struct dma_xmc4xxx_descriptor {
uint32_t sar; /* source address */
uint32_t dar; /* destination address */
uint32_t llp; /* linked-list pointer to the next descriptor or null if last descriptor */
uint32_t ctll; /* control register low */
uint32_t ctlh; /* control register high */
uint32_t dstat; /* status register fetched from address DSTATAR after block completes*/
} __packed;
struct dma_xmc4xxx_scatter_gather {
bool enabled;
uint32_t interval;
uint16_t count;
};
static struct dma_xmc4xxx_descriptor descriptor_list[MULTI_BLOCK_NUM_CHANNELS]
[CONFIG_DMA_XMC4XXX_NUM_DESCRIPTORS];
struct dma_xmc4xxx_config {
XMC_DMA_t *dma;
void (*irq_configure)(void);
@ -123,6 +148,54 @@ static void dma_xmc4xxx_isr(const struct device *dev) @@ -123,6 +148,54 @@ static void dma_xmc4xxx_isr(const struct device *dev)
}
}
static uint32_t dma_xmc4xxx_reg_ctll(struct dma_block_config *block, struct dma_config *config)
{
uint32_t ctll;
ctll = config->dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos |
config->source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos |
block->dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos |
block->source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos |
config->dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos |
config->source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos |
BIT(GPDMA0_CH_CTLL_INT_EN_Pos);
/* Only GPDMA flow controller supported */
if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
ctll |= XMC_DMA_CTLL_MEMORY_TO_PERIPHERAL << GPDMA0_CH_CTLL_TT_FC_Pos;
}
if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
ctll |= XMC_DMA_CTLL_PERIPHERAL_TO_MEMORY << GPDMA0_CH_CTLL_TT_FC_Pos;
}
if (block->source_gather_en && block->source_gather_count > 0) {
ctll |= BIT(GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos);
}
if (block->dest_scatter_en && block->dest_scatter_count > 0) {
ctll |= BIT(GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos);
}
return ctll;
}
#define SET_CHECK_SCATTER_GATHER(type) \
do { \
if (block->type##_en && block->type##_count > 0 && !type.enabled) { \
type.enabled = true; \
type.interval = block->type##_interval; \
type.count = block->type##_count; \
} else if (block->type##_en && type.enabled) { \
if (block->type##_interval != type.interval || \
block->type##_count != type.count) { \
LOG_ERR(STRINGIFY(type) " parameters must be consistent " \
"across enabled blocks"); \
return -EINVAL; \
} \
} \
} while (0)
static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct dma_config *config)
{
struct dma_xmc4xxx_data *dev_data = dev->data;
@ -130,6 +203,8 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct @@ -130,6 +203,8 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
struct dma_block_config *block = config->head_block;
XMC_DMA_t *dma = dev_cfg->dma;
uint8_t dlr_line = DLR_LINE_UNSET;
struct dma_xmc4xxx_scatter_gather source_gather = { 0 };
struct dma_xmc4xxx_scatter_gather dest_scatter = { 0 };
if (channel >= dev_data->ctx.dma_channels) {
LOG_ERR("Invalid channel number");
@ -153,14 +228,14 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct @@ -153,14 +228,14 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
return -EINVAL;
}
if (config->block_count != 1) {
LOG_ERR("Invalid block count");
if (config->block_count > CONFIG_DMA_XMC4XXX_NUM_DESCRIPTORS) {
LOG_ERR("Block count exceeds descriptor array size");
return -EINVAL;
}
if (block->source_gather_en || block->dest_scatter_en) {
if (dma != XMC_DMA0 || channel >= 2) {
LOG_ERR("Gather/scatter only supported on DMA0 on ch0 and ch1");
if (block->source_gather_en || block->dest_scatter_en || config->block_count != 1) {
if ((uint32_t)dma != (uint32_t)XMC_DMA0 || channel >= 2) {
LOG_ERR("Multi-block and gather/scatter only supported on DMA0 on ch0 and ch1");
return -EINVAL;
}
}
@ -202,12 +277,59 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct @@ -202,12 +277,59 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
XMC_DMA_CH_ClearEventStatus(dma, channel, ALL_EVENTS);
/* check dma slot number */
dma->CH[channel].SAR = block->source_address;
dma->CH[channel].DAR = block->dest_address;
dma->CH[channel].LLP = 0;
if (config->block_count == 1) {
uint32_t ctll;
dma->CH[channel].SAR = block->source_address;
dma->CH[channel].DAR = block->dest_address;
dma->CH[channel].LLP = 0;
/* set number of transactions */
dma->CH[channel].CTLH = block->block_size / config->source_data_size;
ctll = dma_xmc4xxx_reg_ctll(block, config);
SET_CHECK_SCATTER_GATHER(source_gather);
SET_CHECK_SCATTER_GATHER(dest_scatter);
dma->CH[channel].CTLL = ctll;
} else {
struct dma_xmc4xxx_descriptor *desc;
dma->CH[channel].LLP = (uint32_t)&descriptor_list[channel][0];
dma->CH[channel].CTLL = BIT(GPDMA0_CH_CTLL_LLP_DST_EN_Pos) |
BIT(GPDMA0_CH_CTLL_LLP_SRC_EN_Pos);
for (int i = 0; i < config->block_count; i++) {
uint32_t ctll;
desc = &descriptor_list[channel][i];
desc->sar = block->source_address;
desc->dar = block->dest_address;
desc->ctlh = block->block_size / config->source_data_size;
ctll = dma_xmc4xxx_reg_ctll(block, config);
if (i < config->block_count - 1) {
desc->llp = (uint32_t)&descriptor_list[channel][i + 1];
ctll |= BIT(GPDMA0_CH_CTLL_LLP_DST_EN_Pos) |
BIT(GPDMA0_CH_CTLL_LLP_SRC_EN_Pos);
} else {
desc->llp = 0;
}
desc->ctll = ctll;
SET_CHECK_SCATTER_GATHER(source_gather);
SET_CHECK_SCATTER_GATHER(dest_scatter);
block = block->next_block;
}
}
block = config->head_block;
/* set number of transactions */
dma->CH[channel].CTLH = block->block_size / config->source_data_size;
/* set priority and software handshaking for src/dst. if hardware hankshaking is used */
/* it will be enabled later in the code */
dma->CH[channel].CFGL = (config->channel_priority << GPDMA0_CH_CFGL_CH_PRIOR_Pos) |
@ -215,28 +337,19 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct @@ -215,28 +337,19 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
dma->CH[channel].CFGH = 0;
dma->CH[channel].CTLL = config->dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos |
config->source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos |
block->dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos |
block->source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos |
config->dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos |
config->source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos |
BIT(GPDMA0_CH_CTLL_INT_EN_Pos);
dma->CH[channel].CFGH = 0;
if (config->channel_direction == MEMORY_TO_PERIPHERAL ||
config->channel_direction == PERIPHERAL_TO_MEMORY) {
uint8_t request_source = XMC4XXX_DMA_GET_REQUEST_SOURCE(config->dma_slot);
uint8_t dlr_line_reg = XMC4XXX_DMA_GET_LINE(config->dma_slot);
dlr_line = dlr_line_reg;
if (dma == XMC_DMA0 && dlr_line > 7) {
if ((uint32_t)dma == (uint32_t)XMC_DMA0 && dlr_line > 7) {
LOG_ERR("Unsupported request line %d for DMA0."
"Should be in range [0,7]", dlr_line);
return -EINVAL;
}
if (dma == XMC_DMA1 && (dlr_line < 8 || dlr_line > 11)) {
if ((uint32_t)dma == (uint32_t)XMC_DMA1 && (dlr_line < 8 || dlr_line > 11)) {
LOG_ERR("Unsupported request line %d for DMA1."
"Should be in range [8,11]", dlr_line);
return -EINVAL;
@ -249,12 +362,12 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct @@ -249,12 +362,12 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
DLR->LNEN |= BIT(dlr_line);
/* connect DMA Line to SR */
if (dma == XMC_DMA0) {
if ((uint32_t)dma == (uint32_t)XMC_DMA0) {
DLR->SRSEL0 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
DLR->SRSEL0 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
}
if (dma == XMC_DMA1) {
if ((uint32_t)dma == (uint32_t)XMC_DMA1) {
dlr_line_reg -= 8;
DLR->SRSEL1 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
DLR->SRSEL1 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
@ -264,13 +377,11 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct @@ -264,13 +377,11 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_DEST_PER_Pos) | 4;
dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_DST_Pos);
dma->CH[channel].CTLL |= 1 << GPDMA0_CH_CTLL_TT_FC_Pos;
}
if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_SRC_PER_Pos) | 4;
dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_SRC_Pos);
dma->CH[channel].CTLL |= 2 << GPDMA0_CH_CTLL_TT_FC_Pos;
}
}
@ -278,18 +389,24 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct @@ -278,18 +389,24 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
dma->CH[channel].CFGH |= GPDMA0_CH_CFGH_FIFO_MODE_Msk;
}
if (block->source_gather_en) {
dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos);
if ((uint32_t)dma == (uint32_t)XMC_DMA0) {
if (channel == 0 || channel == 1) {
/* reset scatter/gather registers */
dma->CH[channel].SGR = 0;
dma->CH[channel].DSR = 0;
}
}
if (source_gather.enabled) {
/* truncate if we are out of range */
dma->CH[channel].SGR = (block->source_gather_interval & GPDMA0_CH_SGR_SGI_Msk) |
block->source_gather_count << GPDMA0_CH_SGR_SGC_Pos;
dma->CH[channel].SGR = (source_gather.interval & GPDMA0_CH_SGR_SGI_Msk) |
source_gather.count << GPDMA0_CH_SGR_SGC_Pos;
}
if (block->dest_scatter_en) {
dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos);
if (dest_scatter.enabled) {
/* truncate if we are out of range */
dma->CH[channel].DSR = (block->dest_scatter_interval & GPDMA0_CH_DSR_DSI_Msk) |
block->dest_scatter_count << GPDMA0_CH_DSR_DSC_Pos;
dma->CH[channel].DSR = (dest_scatter.interval & GPDMA0_CH_DSR_DSI_Msk) |
dest_scatter.count << GPDMA0_CH_DSR_DSC_Pos;
}
dev_data->channels[channel].cb = config->dma_callback;
@ -301,6 +418,12 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct @@ -301,6 +418,12 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
dev_data->channels[channel].dest_addr_adj = block->dest_addr_adj;
dev_data->channels[channel].dest_address = block->dest_address;
if (config->block_count > 1) {
dev_data->channels[channel].multi_block = true;
} else {
dev_data->channels[channel].multi_block = false;
}
XMC_DMA_CH_DisableEvent(dma, channel, ALL_EVENTS);
XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_TRANSFER_COMPLETE);
@ -412,6 +535,12 @@ static int dma_xmc4xxx_get_status(const struct device *dev, uint32_t channel, @@ -412,6 +535,12 @@ static int dma_xmc4xxx_get_status(const struct device *dev, uint32_t channel,
stat->busy = XMC_DMA_CH_IsEnabled(dma, channel);
if (dma_channel->multi_block) {
/* not supported for multi-block transfers */
stat->pending_length = 0;
return 0;
}
/* Use DAR to check for transferred bytes when possible. Value CTL.BLOCK_TS does not */
/* appear to guarantee that the last value is fully transferred to dest. */
if (dma_channel->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT) {

Loading…
Cancel
Save