Browse Source

drivers: uart: add dma support for ambiq uart driver

This commit adds dma support for ambiq uart driver

Signed-off-by: Hao Luo <hluo@ambiq.com>
pull/90076/head
Hao Luo 2 months ago committed by Benjamin Cabé
parent
commit
ba21058cd7
  1. 12
      drivers/serial/Kconfig.ambiq
  2. 693
      drivers/serial/uart_ambiq.c
  3. 2
      west.yml

12
drivers/serial/Kconfig.ambiq

@ -8,8 +8,20 @@ config UART_AMBIQ @@ -8,8 +8,20 @@ config UART_AMBIQ
depends on SOC_SERIES_APOLLO5X
select SERIAL_HAS_DRIVER
select SERIAL_SUPPORT_INTERRUPT
select SERIAL_SUPPORT_ASYNC
select PINCTRL
select AMBIQ_HAL
select AMBIQ_HAL_USE_UART
help
Enable the AMBIQ UART driver.
if UART_AMBIQ
config UART_AMBIQ_HANDLE_CACHE
bool "Turn on cache handling in uart driver"
default y
depends on CACHE_MANAGEMENT && DCACHE
help
Disable this if cache has been handled in upper layers.
endif # UART_AMBIQ

693
drivers/serial/uart_ambiq.c

@ -6,29 +6,63 @@ @@ -6,29 +6,63 @@
#define DT_DRV_COMPAT ambiq_uart
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/drivers/uart.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/pm/device.h>
#include <zephyr/pm/policy.h>
#include <zephyr/pm/device_runtime.h>
#include <zephyr/logging/log.h>
#include <zephyr/cache.h>
/* ambiq-sdk includes */
#include <soc.h>
LOG_MODULE_REGISTER(uart_ambiq, CONFIG_UART_LOG_LEVEL);
#define UART_AMBIQ_RSR_ERROR_MASK \
#define UART_AMBIQ_RSR_ERROR_MASK \
(UART0_RSR_FESTAT_Msk | UART0_RSR_PESTAT_Msk | UART0_RSR_BESTAT_Msk | UART0_RSR_OESTAT_Msk)
#ifdef CONFIG_UART_ASYNC_API
struct uart_ambiq_async_tx {
const uint8_t *buf;
size_t len;
int32_t timeout;
struct k_work_delayable timeout_work;
bool enabled;
};
struct uart_ambiq_async_rx {
uint8_t *buf;
size_t len;
size_t offset;
size_t counter;
uint8_t *next_buf;
size_t next_len;
int32_t timeout;
struct k_work_delayable timeout_work;
bool enabled;
};
struct uart_ambiq_async_data {
const struct device *uart_dev;
struct uart_ambiq_async_tx tx;
struct uart_ambiq_async_rx rx;
uart_callback_t cb;
void *user_data;
volatile bool dma_rdy;
};
#endif
struct uart_ambiq_config {
uint32_t base;
int size;
int inst_idx;
uint32_t clk_src;
const struct pinctrl_dev_config *pincfg;
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
void (*irq_config_func)(const struct device *dev);
#endif
};
@ -44,8 +78,52 @@ struct uart_ambiq_data { @@ -44,8 +78,52 @@ struct uart_ambiq_data {
struct k_spinlock irq_cb_lock;
void *irq_cb_data;
#endif
#ifdef CONFIG_UART_ASYNC_API
struct uart_ambiq_async_data async;
#endif
bool tx_poll_trans_on;
bool tx_int_trans_on;
bool pm_policy_state_on;
};
static void uart_ambiq_pm_policy_state_lock_get_unconditional(void)
{
if (IS_ENABLED(CONFIG_PM)) {
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
}
}
static void uart_ambiq_pm_policy_state_lock_get(const struct device *dev)
{
if (IS_ENABLED(CONFIG_PM)) {
struct uart_ambiq_data *data = dev->data;
if (!data->pm_policy_state_on) {
data->pm_policy_state_on = true;
uart_ambiq_pm_policy_state_lock_get_unconditional();
}
}
}
static void uart_ambiq_pm_policy_state_lock_put_unconditional(void)
{
if (IS_ENABLED(CONFIG_PM)) {
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
}
}
static void uart_ambiq_pm_policy_state_lock_put(const struct device *dev)
{
if (IS_ENABLED(CONFIG_PM)) {
struct uart_ambiq_data *data = dev->data;
if (data->pm_policy_state_on) {
data->pm_policy_state_on = false;
uart_ambiq_pm_policy_state_lock_put_unconditional();
}
}
}
static int uart_ambiq_configure(const struct device *dev, const struct uart_config *cfg)
{
const struct uart_ambiq_config *config = dev->config;
@ -179,6 +257,18 @@ static void uart_ambiq_poll_out(const struct device *dev, unsigned char c) @@ -179,6 +257,18 @@ static void uart_ambiq_poll_out(const struct device *dev, unsigned char c)
am_hal_uart_flags_get(data->uart_handler, &flag);
} while (flag & UART0_FR_TXFF_Msk);
/* If an interrupt transmission is in progress, the pm constraint is already managed by the
* call of uart_ambiq_irq_tx_[en|dis]able
*/
if (!data->tx_poll_trans_on && !data->tx_int_trans_on) {
data->tx_poll_trans_on = true;
/* Don't allow system to suspend until
* transmission has completed
*/
uart_ambiq_pm_policy_state_lock_get(dev);
}
/* Send a character */
am_hal_uart_fifo_write(data->uart_handler, &c, 1, NULL);
}
@ -233,7 +323,12 @@ static void uart_ambiq_irq_tx_enable(const struct device *dev) @@ -233,7 +323,12 @@ static void uart_ambiq_irq_tx_enable(const struct device *dev)
const struct uart_ambiq_config *cfg = dev->config;
struct uart_ambiq_data *data = dev->data;
am_hal_uart_interrupt_enable(data->uart_handler, AM_HAL_UART_INT_TX);
data->tx_poll_trans_on = false;
data->tx_int_trans_on = true;
uart_ambiq_pm_policy_state_lock_get(dev);
am_hal_uart_interrupt_enable(data->uart_handler,
(AM_HAL_UART_INT_TX | AM_HAL_UART_INT_TXCMP));
if (!data->sw_call_txdrdy) {
return;
@ -274,7 +369,10 @@ static void uart_ambiq_irq_tx_disable(const struct device *dev) @@ -274,7 +369,10 @@ static void uart_ambiq_irq_tx_disable(const struct device *dev)
struct uart_ambiq_data *data = dev->data;
data->sw_call_txdrdy = true;
am_hal_uart_interrupt_disable(data->uart_handler, AM_HAL_UART_INT_TX);
am_hal_uart_interrupt_disable(data->uart_handler,
(AM_HAL_UART_INT_TX | AM_HAL_UART_INT_TXCMP));
data->tx_int_trans_on = false;
uart_ambiq_pm_policy_state_lock_put(dev);
}
static int uart_ambiq_irq_tx_complete(const struct device *dev)
@ -372,31 +470,11 @@ static void uart_ambiq_irq_callback_set(const struct device *dev, uart_irq_callb @@ -372,31 +470,11 @@ static void uart_ambiq_irq_callback_set(const struct device *dev, uart_irq_callb
}
#endif /* CONFIG_UART_INTERRUPT_DRIVEN */
static DEVICE_API(uart, uart_ambiq_driver_api) = {
.poll_in = uart_ambiq_poll_in,
.poll_out = uart_ambiq_poll_out,
.err_check = uart_ambiq_err_check,
#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
.configure = uart_ambiq_configure,
.config_get = uart_ambiq_config_get,
#endif
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
.fifo_fill = uart_ambiq_fifo_fill,
.fifo_read = uart_ambiq_fifo_read,
.irq_tx_enable = uart_ambiq_irq_tx_enable,
.irq_tx_disable = uart_ambiq_irq_tx_disable,
.irq_tx_ready = uart_ambiq_irq_tx_ready,
.irq_rx_enable = uart_ambiq_irq_rx_enable,
.irq_rx_disable = uart_ambiq_irq_rx_disable,
.irq_tx_complete = uart_ambiq_irq_tx_complete,
.irq_rx_ready = uart_ambiq_irq_rx_ready,
.irq_err_enable = uart_ambiq_irq_err_enable,
.irq_err_disable = uart_ambiq_irq_err_disable,
.irq_is_pending = uart_ambiq_irq_is_pending,
.irq_update = uart_ambiq_irq_update,
.irq_callback_set = uart_ambiq_irq_callback_set,
#endif /* CONFIG_UART_INTERRUPT_DRIVEN */
};
#ifdef CONFIG_UART_ASYNC_API
static void async_user_callback(const struct device *dev, struct uart_event *evt);
static void uart_ambiq_async_tx_timeout(struct k_work *work);
static void uart_ambiq_async_rx_timeout(struct k_work *work);
#endif /* CONFIG_UART_ASYNC_API */
static int uart_ambiq_init(const struct device *dev)
{
@ -424,11 +502,20 @@ static int uart_ambiq_init(const struct device *dev) @@ -424,11 +502,20 @@ static int uart_ambiq_init(const struct device *dev)
goto end;
}
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
config->irq_config_func(dev);
data->sw_call_txdrdy = true;
#endif
#ifdef CONFIG_UART_ASYNC_API
data->async.uart_dev = dev;
k_work_init_delayable(&data->async.tx.timeout_work, uart_ambiq_async_tx_timeout);
k_work_init_delayable(&data->async.rx.timeout_work, uart_ambiq_async_rx_timeout);
data->async.rx.len = 0;
data->async.rx.offset = 0;
data->async.dma_rdy = true;
#endif
end:
if (ret < 0) {
am_hal_uart_deinitialize(data->uart_handler);
@ -464,65 +551,539 @@ static int uart_ambiq_pm_action(const struct device *dev, enum pm_device_action @@ -464,65 +551,539 @@ static int uart_ambiq_pm_action(const struct device *dev, enum pm_device_action
}
#endif /*CONFIG_PM_DEVICE*/
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
void uart_ambiq_isr(const struct device *dev)
{
struct uart_ambiq_data *data = dev->data;
uint32_t status = 0;
am_hal_uart_interrupt_status_get(data->uart_handler, &status, false);
if (status & AM_HAL_UART_INT_TXCMP) {
if (data->tx_poll_trans_on) {
/* A poll transmission just completed,
* allow system to suspend
*/
am_hal_uart_interrupt_disable(data->uart_handler, AM_HAL_UART_INT_TXCMP);
data->tx_poll_trans_on = false;
uart_ambiq_pm_policy_state_lock_put(dev);
}
/* Transmission was either async or IRQ based,
* constraint will be released at the same time TXCMP IT
* is disabled
*/
}
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
/* Verify if the callback has been registered */
if (data->irq_cb) {
K_SPINLOCK(&data->irq_cb_lock) {
data->irq_cb(dev, data->irq_cb_data);
}
}
#endif /* CONFIG_UART_INTERRUPT_DRIVEN */
#ifdef CONFIG_UART_ASYNC_API
am_hal_uart_interrupt_service(data->uart_handler, status);
if (status & AM_HAL_UART_INT_TXCMP) {
if (data->tx_int_trans_on) {
struct uart_event tx_done = {
.type = UART_TX_DONE,
.data.tx.buf = data->async.tx.buf,
.data.tx.len = data->async.tx.len,
};
async_user_callback(dev, &tx_done);
data->tx_int_trans_on = false;
data->async.dma_rdy = true;
uart_ambiq_pm_policy_state_lock_put_unconditional();
}
}
if (data->async.rx.timeout != SYS_FOREVER_US && data->async.rx.timeout != 0 &&
(status & AM_HAL_UART_INT_RX)) {
k_work_reschedule(&data->async.rx.timeout_work, K_USEC(data->async.rx.timeout));
}
#endif /* CONFIG_UART_ASYNC_API */
am_hal_uart_interrupt_clear(data->uart_handler, status);
}
#endif /* CONFIG_UART_INTERRUPT_DRIVEN */
#define UART_AMBIQ_DECLARE_CFG(n, IRQ_FUNC_INIT) \
static const struct uart_ambiq_config uart_ambiq_cfg_##n = { \
.base = DT_INST_REG_ADDR(n), \
.size = DT_INST_REG_SIZE(n), \
.inst_idx = (DT_INST_REG_ADDR(n) - UART0_BASE) / (UART1_BASE - UART0_BASE), \
.clk_src = DT_INST_PROP(n, clk_src), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
IRQ_FUNC_INIT}
#if defined(CONFIG_UART_ASYNC_API)
static inline void async_timer_start(struct k_work_delayable *work, int32_t timeout)
{
if ((timeout != SYS_FOREVER_US) && (timeout != 0)) {
k_work_reschedule(work, K_USEC(timeout));
}
}
static void async_user_callback(const struct device *dev, struct uart_event *evt)
{
const struct uart_ambiq_data *data = dev->data;
if (data->async.cb) {
data->async.cb(dev, evt, data->async.user_data);
}
}
static void uart_ambiq_async_tx_callback(uint32_t status, void *user_data)
{
const struct device *dev = user_data;
const struct uart_ambiq_config *config = dev->config;
struct uart_ambiq_data *data = dev->data;
struct uart_ambiq_async_tx *tx = &data->async.tx;
unsigned int key = irq_lock();
/* Skip callback if no DMA interrupt */
if ((UARTn(config->inst_idx)->RSR_b.DMACPL == 0) &&
(UARTn(config->inst_idx)->RSR_b.DMAERR == 0)) {
irq_unlock(key);
return;
}
k_work_cancel_delayable(&tx->timeout_work);
am_hal_uart_dma_transfer_complete(data->uart_handler);
irq_unlock(key);
}
static int uart_ambiq_async_callback_set(const struct device *dev, uart_callback_t callback,
void *user_data)
{
struct uart_ambiq_data *data = dev->data;
data->async.cb = callback;
data->async.user_data = user_data;
return 0;
}
static int uart_ambiq_async_tx(const struct device *dev, const uint8_t *buf, size_t len,
int32_t timeout)
{
struct uart_ambiq_data *data = dev->data;
am_hal_uart_transfer_t uart_tx = {0};
int ret = 0;
if (!data->async.dma_rdy) {
LOG_WRN("UART DMA busy");
return -EBUSY;
}
data->async.dma_rdy = false;
#ifdef CONFIG_UART_AMBIQ_HANDLE_CACHE
if (!buf_in_nocache((uintptr_t)buf, len)) {
/* Clean Dcache before DMA write */
sys_cache_data_flush_range((void *)buf, len);
}
#endif /* CONFIG_UART_AMBIQ_HANDLE_CACHE */
unsigned int key = irq_lock();
data->async.tx.buf = buf;
data->async.tx.len = len;
data->async.tx.timeout = timeout;
/* Do not allow system to suspend until transmission has completed */
uart_ambiq_pm_policy_state_lock_get_unconditional();
/* Enable interrupt so we can signal correct TX done */
am_hal_uart_interrupt_enable(
data->uart_handler,
(AM_HAL_UART_INT_TXCMP | AM_HAL_UART_INT_DMACPRIS | AM_HAL_UART_INT_DMAERIS));
uart_tx.eDirection = AM_HAL_UART_TX;
uart_tx.ui32NumBytes = len;
uart_tx.pui32TxBuffer = (uint32_t *)buf;
uart_tx.pfnCallback = uart_ambiq_async_tx_callback;
uart_tx.pvContext = (void *)dev;
if (am_hal_uart_dma_transfer(data->uart_handler, &uart_tx) != AM_HAL_STATUS_SUCCESS) {
ret = -EINVAL;
LOG_ERR("Error starting Tx DMA (%d)", ret);
irq_unlock(key);
return ret;
}
data->tx_poll_trans_on = false;
data->tx_int_trans_on = true;
async_timer_start(&data->async.tx.timeout_work, timeout);
irq_unlock(key);
return ret;
}
static int uart_ambiq_async_tx_abort(const struct device *dev)
{
struct uart_ambiq_data *data = dev->data;
const struct uart_ambiq_config *config = dev->config;
size_t bytes_sent;
unsigned int key = irq_lock();
k_work_cancel_delayable(&data->async.tx.timeout_work);
am_hal_uart_tx_abort(data->uart_handler);
data->async.dma_rdy = true;
bytes_sent = data->async.tx.len - UARTn(config->inst_idx)->COUNT_b.TOTCOUNT;
irq_unlock(key);
struct uart_event tx_aborted = {
.type = UART_TX_ABORTED,
.data.tx.buf = data->async.tx.buf,
.data.tx.len = bytes_sent,
};
async_user_callback(dev, &tx_aborted);
data->tx_int_trans_on = false;
return 0;
}
static void uart_ambiq_async_tx_timeout(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct uart_ambiq_async_tx *tx =
CONTAINER_OF(dwork, struct uart_ambiq_async_tx, timeout_work);
struct uart_ambiq_async_data *async = CONTAINER_OF(tx, struct uart_ambiq_async_data, tx);
struct uart_ambiq_data *data = CONTAINER_OF(async, struct uart_ambiq_data, async);
uart_ambiq_async_tx_abort(data->async.uart_dev);
LOG_DBG("tx: async timeout");
}
static int uart_ambiq_async_rx_disable(const struct device *dev)
{
struct uart_ambiq_data *data = dev->data;
struct uart_event disabled_event = {.type = UART_RX_DISABLED};
if (!data->async.rx.enabled) {
async_user_callback(dev, &disabled_event);
return -EFAULT;
}
unsigned int key = irq_lock();
k_work_cancel_delayable(&data->async.rx.timeout_work);
am_hal_uart_rx_abort(data->uart_handler);
data->async.rx.enabled = false;
data->async.dma_rdy = true;
irq_unlock(key);
/* Release current buffer event */
struct uart_event rel_event = {
.type = UART_RX_BUF_RELEASED,
.data.rx_buf.buf = data->async.rx.buf,
};
async_user_callback(dev, &rel_event);
/* Disable RX event */
async_user_callback(dev, &disabled_event);
data->async.rx.buf = NULL;
data->async.rx.len = 0;
data->async.rx.counter = 0;
data->async.rx.offset = 0;
if (data->async.rx.next_buf) {
/* Release next buffer event */
struct uart_event next_rel_event = {
.type = UART_RX_BUF_RELEASED,
.data.rx_buf.buf = data->async.rx.next_buf,
};
async_user_callback(dev, &next_rel_event);
data->async.rx.next_buf = NULL;
data->async.rx.next_len = 0;
}
LOG_DBG("rx: disabled");
return 0;
}
static void uart_ambiq_async_rx_callback(uint32_t status, void *user_data)
{
const struct device *dev = user_data;
const struct uart_ambiq_config *config = dev->config;
struct uart_ambiq_data *data = dev->data;
struct uart_ambiq_async_data *async = &data->async;
size_t total_rx;
total_rx = async->rx.len - UARTn(config->inst_idx)->COUNT_b.TOTCOUNT;
#if CONFIG_UART_AMBIQ_HANDLE_CACHE
if (!buf_in_nocache((uintptr_t)async->rx.buf, total_rx)) {
/* Invalidate Dcache after DMA read */
sys_cache_data_invd_range((void *)async->rx.buf, total_rx);
}
#endif /* CONFIG_UART_AMBIQ_HANDLE_CACHE */
unsigned int key = irq_lock();
am_hal_uart_interrupt_disable(data->uart_handler,
(AM_HAL_UART_INT_DMACPRIS | AM_HAL_UART_INT_DMAERIS));
irq_unlock(key);
if (total_rx > async->rx.offset) {
async->rx.counter = total_rx - async->rx.offset;
struct uart_event rdy_event = {
.type = UART_RX_RDY,
.data.rx.buf = async->rx.buf,
.data.rx.len = async->rx.counter,
.data.rx.offset = async->rx.offset,
};
async_user_callback(dev, &rdy_event);
}
if (async->rx.next_buf) {
async->rx.offset = 0;
async->rx.counter = 0;
struct uart_event rel_event = {
.type = UART_RX_BUF_RELEASED,
.data.rx_buf.buf = async->rx.buf,
};
async_user_callback(dev, &rel_event);
async->rx.buf = async->rx.next_buf;
async->rx.len = async->rx.next_len;
async->rx.next_buf = NULL;
async->rx.next_len = 0;
struct uart_event req_event = {
.type = UART_RX_BUF_REQUEST,
};
async_user_callback(dev, &req_event);
am_hal_uart_transfer_t uart_rx = {0};
uart_rx.eDirection = AM_HAL_UART_RX;
uart_rx.ui32NumBytes = async->rx.next_len;
uart_rx.pui32RxBuffer = (uint32_t *)async->rx.next_buf;
uart_rx.pfnCallback = uart_ambiq_async_rx_callback;
uart_rx.pvContext = user_data;
am_hal_uart_interrupt_enable(data->uart_handler,
(AM_HAL_UART_INT_DMACPRIS | AM_HAL_UART_INT_DMAERIS));
am_hal_uart_dma_transfer(data->uart_handler, &uart_rx);
async_timer_start(&async->rx.timeout_work, async->rx.timeout);
} else {
uart_ambiq_async_rx_disable(dev);
}
}
static int uart_ambiq_async_rx_enable(const struct device *dev, uint8_t *buf, size_t len,
int32_t timeout)
{
struct uart_ambiq_data *data = dev->data;
am_hal_uart_transfer_t uart_rx = {0};
int ret = 0;
if (!data->async.dma_rdy) {
LOG_WRN("UART DMA busy");
return -EBUSY;
}
if (data->async.rx.enabled) {
LOG_WRN("RX was already enabled");
return -EBUSY;
}
unsigned int key = irq_lock();
data->async.dma_rdy = false;
data->async.rx.enabled = true;
data->async.rx.buf = buf;
data->async.rx.len = len;
data->async.rx.timeout = timeout;
uart_rx.eDirection = AM_HAL_UART_RX;
uart_rx.ui32NumBytes = len;
uart_rx.pui32RxBuffer = (uint32_t *)buf;
uart_rx.pfnCallback = uart_ambiq_async_rx_callback;
uart_rx.pvContext = (void *)dev;
/* Disable RX interrupts to let DMA to handle it */
uart_ambiq_irq_rx_disable(dev);
am_hal_uart_interrupt_enable(data->uart_handler,
(AM_HAL_UART_INT_DMACPRIS | AM_HAL_UART_INT_DMAERIS));
if (am_hal_uart_dma_transfer(data->uart_handler, &uart_rx) != AM_HAL_STATUS_SUCCESS) {
ret = -EINVAL;
LOG_ERR("Error starting Rx DMA (%d)", ret);
irq_unlock(key);
return ret;
}
async_timer_start(&data->async.rx.timeout_work, timeout);
struct uart_event buf_req = {
.type = UART_RX_BUF_REQUEST,
};
async_user_callback(dev, &buf_req);
irq_unlock(key);
LOG_DBG("async rx enabled");
return ret;
}
static int uart_ambiq_async_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
{
struct uart_ambiq_data *data = dev->data;
unsigned int key;
int ret = 0;
LOG_DBG("replace buffer (%d)", len);
key = irq_lock();
if (data->async.rx.next_buf != NULL) {
ret = -EBUSY;
} else if (!data->async.rx.enabled) {
ret = -EACCES;
} else {
data->async.rx.next_buf = buf;
data->async.rx.next_len = len;
}
irq_unlock(key);
return ret;
}
static void uart_ambiq_async_rx_timeout(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct uart_ambiq_async_rx *rx =
CONTAINER_OF(dwork, struct uart_ambiq_async_rx, timeout_work);
struct uart_ambiq_async_data *async = CONTAINER_OF(rx, struct uart_ambiq_async_data, rx);
struct uart_ambiq_data *data = CONTAINER_OF(async, struct uart_ambiq_data, async);
const struct uart_ambiq_config *config = data->async.uart_dev->config;
uint32_t total_rx;
LOG_DBG("rx timeout");
unsigned int key = irq_lock();
am_hal_uart_interrupt_disable(data->uart_handler,
(AM_HAL_UART_INT_DMACPRIS | AM_HAL_UART_INT_DMAERIS));
k_work_cancel_delayable(&data->async.rx.timeout_work);
irq_unlock(key);
total_rx = async->rx.len - UARTn(config->inst_idx)->COUNT_b.TOTCOUNT;
if (total_rx > async->rx.offset) {
async->rx.counter = total_rx - async->rx.offset;
struct uart_event rdy_event = {
.type = UART_RX_RDY,
.data.rx.buf = async->rx.buf,
.data.rx.len = async->rx.counter,
.data.rx.offset = async->rx.offset,
};
async_user_callback(async->uart_dev, &rdy_event);
async->dma_rdy = true;
}
async->rx.offset += async->rx.counter;
async->rx.counter = 0;
am_hal_uart_interrupt_enable(data->uart_handler,
(AM_HAL_UART_INT_DMACPRIS | AM_HAL_UART_INT_DMAERIS));
}
#endif
static DEVICE_API(uart, uart_ambiq_driver_api) = {
.poll_in = uart_ambiq_poll_in,
.poll_out = uart_ambiq_poll_out,
.err_check = uart_ambiq_err_check,
#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
.configure = uart_ambiq_configure,
.config_get = uart_ambiq_config_get,
#endif
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
#define UART_AMBIQ_CONFIG_FUNC(n) \
static void uart_ambiq_irq_config_func_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), uart_ambiq_isr, \
DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
.fifo_fill = uart_ambiq_fifo_fill,
.fifo_read = uart_ambiq_fifo_read,
.irq_tx_enable = uart_ambiq_irq_tx_enable,
.irq_tx_disable = uart_ambiq_irq_tx_disable,
.irq_tx_ready = uart_ambiq_irq_tx_ready,
.irq_rx_enable = uart_ambiq_irq_rx_enable,
.irq_rx_disable = uart_ambiq_irq_rx_disable,
.irq_tx_complete = uart_ambiq_irq_tx_complete,
.irq_rx_ready = uart_ambiq_irq_rx_ready,
.irq_err_enable = uart_ambiq_irq_err_enable,
.irq_err_disable = uart_ambiq_irq_err_disable,
.irq_is_pending = uart_ambiq_irq_is_pending,
.irq_update = uart_ambiq_irq_update,
.irq_callback_set = uart_ambiq_irq_callback_set,
#endif /* CONFIG_UART_INTERRUPT_DRIVEN */
#ifdef CONFIG_UART_ASYNC_API
.callback_set = uart_ambiq_async_callback_set,
.tx = uart_ambiq_async_tx,
.tx_abort = uart_ambiq_async_tx_abort,
.rx_enable = uart_ambiq_async_rx_enable,
.rx_buf_rsp = uart_ambiq_async_rx_buf_rsp,
.rx_disable = uart_ambiq_async_rx_disable,
#endif /* CONFIG_UART_ASYNC_API */
};
#define UART_AMBIQ_DECLARE_CFG(n, IRQ_FUNC_INIT) \
static const struct uart_ambiq_config uart_ambiq_cfg_##n = { \
.base = DT_INST_REG_ADDR(n), \
.size = DT_INST_REG_SIZE(n), \
.inst_idx = (DT_INST_REG_ADDR(n) - UART0_BASE) / (UART1_BASE - UART0_BASE), \
.clk_src = DT_INST_PROP(n, clk_src), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
IRQ_FUNC_INIT}
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
#define UART_AMBIQ_CONFIG_FUNC(n) \
static void uart_ambiq_irq_config_func_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), uart_ambiq_isr, \
DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
}
#define UART_AMBIQ_IRQ_CFG_FUNC_INIT(n) .irq_config_func = uart_ambiq_irq_config_func_##n
#define UART_AMBIQ_INIT_CFG(n) UART_AMBIQ_DECLARE_CFG(n, UART_AMBIQ_IRQ_CFG_FUNC_INIT(n))
#define UART_AMBIQ_INIT_CFG(n) UART_AMBIQ_DECLARE_CFG(n, UART_AMBIQ_IRQ_CFG_FUNC_INIT(n))
#else
#define UART_AMBIQ_CONFIG_FUNC(n)
#define UART_AMBIQ_IRQ_CFG_FUNC_INIT
#define UART_AMBIQ_INIT_CFG(n) UART_AMBIQ_DECLARE_CFG(n, UART_AMBIQ_IRQ_CFG_FUNC_INIT)
#endif /* CONFIG_UART_INTERRUPT_DRIVEN */
#define UART_AMBIQ_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
static struct uart_ambiq_data uart_ambiq_data_##n = { \
.uart_cfg = \
{ \
.baudrate = DT_INST_PROP(n, current_speed), \
.parity = UART_CFG_PARITY_NONE, \
.stop_bits = UART_CFG_STOP_BITS_1, \
.data_bits = UART_CFG_DATA_BITS_8, \
.flow_ctrl = DT_INST_PROP(n, hw_flow_control) \
? UART_CFG_FLOW_CTRL_RTS_CTS \
: UART_CFG_FLOW_CTRL_NONE, \
}, \
}; \
static const struct uart_ambiq_config uart_ambiq_cfg_##n; \
PM_DEVICE_DT_INST_DEFINE(n, uart_ambiq_pm_action); \
DEVICE_DT_INST_DEFINE(n, uart_ambiq_init, PM_DEVICE_DT_INST_GET(n), &uart_ambiq_data_##n, \
&uart_ambiq_cfg_##n, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \
&uart_ambiq_driver_api); \
UART_AMBIQ_CONFIG_FUNC(n) \
#define UART_AMBIQ_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
static struct uart_ambiq_data uart_ambiq_data_##n = { \
.uart_cfg = \
{ \
.baudrate = DT_INST_PROP(n, current_speed), \
.parity = UART_CFG_PARITY_NONE, \
.stop_bits = UART_CFG_STOP_BITS_1, \
.data_bits = UART_CFG_DATA_BITS_8, \
.flow_ctrl = DT_INST_PROP(n, hw_flow_control) \
? UART_CFG_FLOW_CTRL_RTS_CTS \
: UART_CFG_FLOW_CTRL_NONE, \
}, \
}; \
static const struct uart_ambiq_config uart_ambiq_cfg_##n; \
PM_DEVICE_DT_INST_DEFINE(n, uart_ambiq_pm_action); \
DEVICE_DT_INST_DEFINE(n, uart_ambiq_init, PM_DEVICE_DT_INST_GET(n), &uart_ambiq_data_##n, \
&uart_ambiq_cfg_##n, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \
&uart_ambiq_driver_api); \
UART_AMBIQ_CONFIG_FUNC(n) \
UART_AMBIQ_INIT_CFG(n);
DT_INST_FOREACH_STATUS_OKAY(UART_AMBIQ_INIT)

2
west.yml

@ -149,7 +149,7 @@ manifest: @@ -149,7 +149,7 @@ manifest:
groups:
- hal
- name: hal_ambiq
revision: 080be37fc674bea9b2e407c8959c3c42660aa7e7
revision: f46941f3427bbc05d893a601660e6e3cffe9e29d
path: modules/hal/ambiq
groups:
- hal

Loading…
Cancel
Save