Browse Source

drivers: uhc: rework transfer buffer handling

The current approach is a bit impractical in the upper layer.
This patch removes the two fifos that hold the transfer buffers
and replaces them with a byte array for the setup packet and
a pointer to a data buffer. The data buffer is mandatory for
all types of transfers except control without a data stage.
The waste of eight unused bytes for non-control transfers should
be insignificant, since an additional pointer would be at least
half of it, and then there would be the overhead of handling it.

This patch also clean up the transfer flags, rename owner to callback
as it reflects the upper layer use case, and add an additional member
to hold the pointer to the USB device (peripheral on the bus).

Signed-off-by: Johann Fischer <johann.fischer@nordicsemi.no>
pull/63348/head
Johann Fischer 2 years ago committed by Johan Hedberg
parent
commit
9cb777b95e
  1. 118
      drivers/usb/uhc/uhc_common.c
  2. 71
      drivers/usb/uhc/uhc_common.h
  3. 123
      drivers/usb/uhc/uhc_max3421e.c
  4. 139
      drivers/usb/uhc/uhc_virtual.c
  5. 88
      include/zephyr/drivers/usb/uhc.h
  6. 21
      subsys/usb/host/usbh_ch9.c
  7. 11
      subsys/usb/host/usbh_core.c
  8. 41
      subsys/usb/host/usbh_shell.c

118
drivers/usb/uhc/uhc_common.c

@ -50,7 +50,6 @@ void uhc_xfer_return(const struct device *dev, @@ -50,7 +50,6 @@ void uhc_xfer_return(const struct device *dev,
sys_dlist_remove(&xfer->node);
xfer->queued = 0;
xfer->claimed = 0;
xfer->err = err;
data->event_cb(dev, &drv_evt);
@ -81,13 +80,25 @@ int uhc_xfer_append(const struct device *dev, @@ -81,13 +80,25 @@ int uhc_xfer_append(const struct device *dev,
return 0;
}
struct net_buf *uhc_xfer_buf_alloc(const struct device *dev,
const size_t size)
{
return net_buf_alloc_len(&uhc_ep_pool, size, K_NO_WAIT);
}
void uhc_xfer_buf_free(const struct device *dev, struct net_buf *const buf)
{
net_buf_unref(buf);
}
struct uhc_transfer *uhc_xfer_alloc(const struct device *dev,
const uint8_t addr,
const uint8_t ep,
const uint8_t attrib,
const uint16_t mps,
const uint16_t timeout,
void *const owner)
void *const udev,
void *const cb)
{
const struct uhc_api *api = dev->api;
struct uhc_transfer *xfer = NULL;
@ -98,8 +109,8 @@ struct uhc_transfer *uhc_xfer_alloc(const struct device *dev, @@ -98,8 +109,8 @@ struct uhc_transfer *uhc_xfer_alloc(const struct device *dev,
goto xfer_alloc_error;
}
LOG_DBG("Allocate xfer, ep 0x%02x attrib 0x%02x owner %p",
ep, attrib, owner);
LOG_DBG("Allocate xfer, ep 0x%02x attrib 0x%02x cb %p",
ep, attrib, cb);
if (k_mem_slab_alloc(&uhc_xfer_pool, (void **)&xfer, K_NO_WAIT)) {
LOG_ERR("Failed to allocate transfer");
@ -107,14 +118,13 @@ struct uhc_transfer *uhc_xfer_alloc(const struct device *dev, @@ -107,14 +118,13 @@ struct uhc_transfer *uhc_xfer_alloc(const struct device *dev,
}
memset(xfer, 0, sizeof(struct uhc_transfer));
k_fifo_init(&xfer->queue);
k_fifo_init(&xfer->done);
xfer->addr = addr;
xfer->ep = ep;
xfer->attrib = attrib;
xfer->mps = mps;
xfer->timeout = timeout;
xfer->owner = owner;
xfer->udev = udev;
xfer->cb = cb;
xfer_alloc_error:
api->unlock(dev);
@ -122,84 +132,70 @@ xfer_alloc_error: @@ -122,84 +132,70 @@ xfer_alloc_error:
return xfer;
}
int uhc_xfer_free(const struct device *dev, struct uhc_transfer *const xfer)
struct uhc_transfer *uhc_xfer_alloc_with_buf(const struct device *dev,
const uint8_t addr,
const uint8_t ep,
const uint8_t attrib,
const uint16_t mps,
const uint16_t timeout,
void *const udev,
void *const cb,
size_t size)
{
const struct uhc_api *api = dev->api;
struct uhc_transfer *xfer;
struct net_buf *buf;
int ret = 0;
api->lock(dev);
if (xfer->queued || xfer->claimed) {
ret = -EBUSY;
LOG_ERR("Transfer is still claimed");
goto xfer_free_error;
}
while (!k_fifo_is_empty(&xfer->queue)) {
buf = net_buf_get(&xfer->queue, K_NO_WAIT);
uhc_xfer_buf_free(dev, buf);
buf = uhc_xfer_buf_alloc(dev, size);
if (buf == NULL) {
return NULL;
}
while (!k_fifo_is_empty(&xfer->done)) {
buf = net_buf_get(&xfer->done, K_NO_WAIT);
uhc_xfer_buf_free(dev, buf);
xfer = uhc_xfer_alloc(dev, addr, ep, attrib, mps, timeout, udev, cb);
if (xfer == NULL) {
net_buf_unref(buf);
return NULL;
}
k_mem_slab_free(&uhc_xfer_pool, (void *)xfer);
xfer_free_error:
api->unlock(dev);
xfer->buf = buf;
return ret;
return xfer;
}
struct net_buf *uhc_xfer_buf_alloc(const struct device *dev,
struct uhc_transfer *const xfer,
const size_t size)
int uhc_xfer_free(const struct device *dev, struct uhc_transfer *const xfer)
{
const struct uhc_api *api = dev->api;
struct net_buf *buf = NULL;
int ret = 0;
api->lock(dev);
if (!uhc_is_initialized(dev)) {
goto buf_alloc_error;
}
if (xfer->queued || xfer->claimed) {
goto buf_alloc_error;
}
LOG_DBG("Allocate net_buf, ep 0x%02x, size %zd", xfer->ep, size);
buf = net_buf_alloc_len(&uhc_ep_pool, size, K_NO_WAIT);
if (!buf) {
LOG_ERR("Failed to allocate net_buf");
goto buf_alloc_error;
}
if (buf->size < size) {
LOG_ERR("Buffer is smaller than requested");
net_buf_unref(buf);
buf = NULL;
goto buf_alloc_error;
if (xfer->queued) {
ret = -EBUSY;
LOG_ERR("Transfer is still queued");
goto xfer_free_error;
}
k_fifo_put(&xfer->queue, &buf->node);
k_mem_slab_free(&uhc_xfer_pool, (void *)xfer);
buf_alloc_error:
xfer_free_error:
api->unlock(dev);
return buf;
return ret;
}
int uhc_xfer_buf_free(const struct device *dev, struct net_buf *const buf)
int uhc_xfer_buf_add(const struct device *dev,
struct uhc_transfer *const xfer,
struct net_buf *buf)
{
const struct uhc_api *api = dev->api;
int ret = 0;
api->lock(dev);
net_buf_unref(buf);
if (xfer->queued) {
ret = -EBUSY;
} else {
xfer->buf = buf;
}
api->unlock(dev);
return ret;
@ -217,12 +213,13 @@ int uhc_ep_enqueue(const struct device *dev, struct uhc_transfer *const xfer) @@ -217,12 +213,13 @@ int uhc_ep_enqueue(const struct device *dev, struct uhc_transfer *const xfer)
goto ep_enqueue_error;
}
xfer->claimed = 1;
xfer->queued = 1;
ret = api->ep_enqueue(dev, xfer);
if (ret) {
xfer->claimed = 0;
xfer->queued = 0;
}
ep_enqueue_error:
api->unlock(dev);
@ -242,6 +239,7 @@ int uhc_ep_dequeue(const struct device *dev, struct uhc_transfer *const xfer) @@ -242,6 +239,7 @@ int uhc_ep_dequeue(const struct device *dev, struct uhc_transfer *const xfer)
}
ret = api->ep_dequeue(dev, xfer);
xfer->queued = 0;
ep_dequeue_error:
api->unlock(dev);

71
drivers/usb/uhc/uhc_common.h

@ -58,54 +58,6 @@ static inline int uhc_unlock_internal(const struct device *dev) @@ -58,54 +58,6 @@ static inline int uhc_unlock_internal(const struct device *dev)
return k_mutex_unlock(&data->mutex);
}
/**
* @brief Checks if the transfer is queued.
*
* @param[in] xfer Pointer to UHC transfer
*
* @return true if transfer is queued, false otherwise
*/
static inline bool uhc_xfer_is_queued(struct uhc_transfer *xfer)
{
return xfer->queued;
}
/**
* @brief Helper function to set queued flag
*
* This function can be used by the driver to set queued flag
*
* @param[in] xfer Pointer to UHC transfer
*/
static inline void uhc_xfer_queued(struct uhc_transfer *xfer)
{
xfer->queued = true;
}
/**
* @brief Checks if the setup flag is set.
*
* @param[in] xfer Pointer to UHC transfer
*
* @return true if setup flagh is set, false otherwise
*/
static inline bool uhc_xfer_is_setup(struct uhc_transfer *xfer)
{
return xfer->setup;
}
/**
* @brief Helper function to set setup flag
*
* This function can be used by the driver to set setup flag
*
* @param[in] xfer Pointer to UHC transfer
*/
static inline void uhc_xfer_setup(struct uhc_transfer *xfer)
{
xfer->setup = true;
}
/**
* @brief Helper function to return UHC transfer to a higher level.
*
@ -119,29 +71,6 @@ void uhc_xfer_return(const struct device *dev, @@ -119,29 +71,6 @@ void uhc_xfer_return(const struct device *dev,
struct uhc_transfer *const xfer,
const int err);
/**
* @brief Helper to move current buffer in the done-FIFO.
*
* Helper to move current buffer (probably completed) in the
* designated done-FIFO.
*
* @param[in] xfer Pointer to UHC transfer
*
* @return 0 on success, all other values should be treated as error.
* @retval -ENOMEM if there is no buffer in the queue
*/
static inline int uhc_xfer_done(struct uhc_transfer *xfer)
{
struct net_buf *buf;
buf = k_fifo_get(&xfer->queue, K_NO_WAIT);
if (buf) {
k_fifo_put(&xfer->done, &buf->node);
}
return buf == NULL ? -ENOMEM : 0;
}
/**
* @brief Helper to get next transfer to process.
*

123
drivers/usb/uhc/uhc_max3421e.c

@ -310,28 +310,19 @@ static int max3421e_xfer_control(const struct device *dev, @@ -310,28 +310,19 @@ static int max3421e_xfer_control(const struct device *dev,
const uint8_t hrsl)
{
struct max3421e_data *priv = uhc_get_private(dev);
struct net_buf *buf;
struct net_buf *buf = xfer->buf;
int ret;
/* Just restart if device NAKed packet */
if (uhc_xfer_is_queued(xfer) && HRSLT_IS_NAK(hrsl)) {
if (HRSLT_IS_NAK(hrsl)) {
return max3421e_hxfr_start(dev, priv->hxfr);
}
buf = k_fifo_peek_head(&xfer->queue);
if (buf == NULL) {
LOG_ERR("No buffers to handle");
return -ENODATA;
}
if (!uhc_xfer_is_queued(xfer) && xfer->setup) {
return -EINVAL;
}
if (!xfer->setup) {
/* Handle SETUP stage */
if (xfer->stage == UHC_CONTROL_STAGE_SETUP) {
LOG_DBG("Handle SETUP stage");
ret = max3421e_write(dev, MAX3421E_REG_SUDFIFO,
buf->data, MIN(buf->len, 8));
xfer->setup_pkt, sizeof(xfer->setup_pkt));
if (ret) {
return ret;
}
@ -341,25 +332,26 @@ static int max3421e_xfer_control(const struct device *dev, @@ -341,25 +332,26 @@ static int max3421e_xfer_control(const struct device *dev,
return ret;
}
uhc_xfer_setup(xfer);
uhc_xfer_queued(xfer);
return 0;
}
if (buf->size != 0) {
/* handle DATA stage */
ret = max3421e_xfer_data(dev, buf, xfer->ep);
} else {
/* handle ACK stage */
if (buf != NULL && xfer->stage == UHC_CONTROL_STAGE_DATA) {
LOG_DBG("Handle DATA stage");
return max3421e_xfer_data(dev, buf, xfer->ep);
}
if (xfer->stage == UHC_CONTROL_STAGE_STATUS) {
LOG_DBG("Handle STATUS stage");
if (USB_EP_DIR_IS_IN(xfer->ep)) {
ret = max3421e_hxfr_start(dev, MAX3421E_HXFR_HSOUT(0));
} else {
ret = max3421e_hxfr_start(dev, MAX3421E_HXFR_HSIN(0));
}
return ret;
}
return ret;
return -EINVAL;
}
static int max3421e_xfer_bulk(const struct device *dev,
@ -367,26 +359,19 @@ static int max3421e_xfer_bulk(const struct device *dev, @@ -367,26 +359,19 @@ static int max3421e_xfer_bulk(const struct device *dev,
const uint8_t hrsl)
{
struct max3421e_data *priv = uhc_get_private(dev);
struct net_buf *buf;
int ret;
struct net_buf *buf = xfer->buf;
/* Just restart if device NAKed packet */
if (uhc_xfer_is_queued(xfer) && HRSLT_IS_NAK(hrsl)) {
if (HRSLT_IS_NAK(hrsl)) {
return max3421e_hxfr_start(dev, priv->hxfr);
}
buf = k_fifo_peek_head(&xfer->queue);
if (buf == NULL) {
LOG_ERR("No buffers to handle");
LOG_ERR("No buffer to handle");
return -ENODATA;
}
ret = max3421e_xfer_data(dev, buf, xfer->ep);
if (!ret) {
uhc_xfer_queued(xfer);
}
return ret;
return max3421e_xfer_data(dev, buf, xfer->ep);
}
static int max3421e_schedule_xfer(const struct device *dev)
@ -444,27 +429,27 @@ static int max3421e_hrslt_success(const struct device *dev) @@ -444,27 +429,27 @@ static int max3421e_hrslt_success(const struct device *dev)
{
struct max3421e_data *priv = uhc_get_private(dev);
struct uhc_transfer *const xfer = priv->last_xfer;
struct net_buf *buf;
struct net_buf *buf = xfer->buf;
bool finished = false;
int err = 0;
size_t len;
uint8_t bc;
buf = k_fifo_peek_head(&xfer->queue);
if (buf == NULL) {
return -ENODATA;
}
switch (MAX3421E_HXFR_TYPE(priv->hxfr)) {
case MAX3421E_HXFR_TYPE_SETUP:
err = uhc_xfer_done(xfer);
if (xfer->buf != NULL) {
xfer->stage = UHC_CONTROL_STAGE_DATA;
} else {
xfer->stage = UHC_CONTROL_STAGE_STATUS;
}
break;
case MAX3421E_HXFR_TYPE_HSOUT:
LOG_DBG("HSOUT");
err = uhc_xfer_done(xfer);
finished = true;
break;
case MAX3421E_HXFR_TYPE_HSIN:
LOG_DBG("HSIN");
err = uhc_xfer_done(xfer);
finished = true;
break;
case MAX3421E_HXFR_TYPE_ISOOUT:
LOG_ERR("ISO OUT is not implemented");
@ -477,7 +462,11 @@ static int max3421e_hrslt_success(const struct device *dev) @@ -477,7 +462,11 @@ static int max3421e_hrslt_success(const struct device *dev)
case MAX3421E_HXFR_TYPE_BULKOUT:
if (buf->len == 0) {
LOG_INF("hrslt bulk out %u", buf->len);
err = uhc_xfer_done(xfer);
if (xfer->ep == USB_CONTROL_EP_OUT) {
xfer->stage = UHC_CONTROL_STAGE_STATUS;
} else {
finished = true;
}
}
break;
case MAX3421E_HXFR_TYPE_BULKIN:
@ -502,11 +491,25 @@ static int max3421e_hrslt_success(const struct device *dev) @@ -502,11 +491,25 @@ static int max3421e_hrslt_success(const struct device *dev)
if (bc < MAX3421E_MAX_EP_SIZE || !net_buf_tailroom(buf)) {
LOG_INF("hrslt bulk in %u, %u", bc, len);
err = uhc_xfer_done(xfer);
if (xfer->ep == USB_CONTROL_EP_IN) {
xfer->stage = UHC_CONTROL_STAGE_STATUS;
} else {
finished = true;
}
}
break;
}
if (finished) {
LOG_DBG("Transfer finished");
uhc_xfer_return(dev, xfer, 0);
priv->last_xfer = NULL;
}
if (err) {
max3421e_xfer_drop_active(dev, err);
}
return err;
}
@ -515,27 +518,13 @@ static int max3421e_handle_hxfrdn(const struct device *dev) @@ -515,27 +518,13 @@ static int max3421e_handle_hxfrdn(const struct device *dev)
struct max3421e_data *priv = uhc_get_private(dev);
struct uhc_transfer *const xfer = priv->last_xfer;
const uint8_t hrsl = priv->hrsl;
int ret;
int ret = 0;
if (xfer == NULL) {
LOG_ERR("No transfers to handle");
return -ENODATA;
}
/* If an active xfer is not marked then something has gone wrong */
if (!uhc_xfer_is_queued(xfer)) {
LOG_ERR("Active transfer not queued");
max3421e_xfer_drop_active(dev, -EINVAL);
return -EINVAL;
}
/* There should always be a buffer in the fifo when a xfer is active */
if (k_fifo_is_empty(&xfer->queue)) {
LOG_ERR("No buffers to handle");
max3421e_xfer_drop_active(dev, -ENODATA);
return -ENODATA;
}
switch (MAX3421E_HRSLT(hrsl)) {
case MAX3421E_HR_NAK:
/*
@ -549,27 +538,15 @@ static int max3421e_handle_hxfrdn(const struct device *dev) @@ -549,27 +538,15 @@ static int max3421e_handle_hxfrdn(const struct device *dev)
max3421e_xfer_drop_active(dev, -ETIMEDOUT);
}
ret = 0;
break;
case MAX3421E_HR_STALL:
max3421e_xfer_drop_active(dev, -EPIPE);
ret = 0;
break;
case MAX3421E_HR_TOGERR:
LOG_WRN("Toggle error");
ret = 0;
break;
case MAX3421E_HR_SUCCESS:
ret = max3421e_hrslt_success(dev);
if (ret) {
max3421e_xfer_drop_active(dev, ret);
} else {
if (k_fifo_is_empty(&xfer->queue)) {
uhc_xfer_return(dev, xfer, 0);
priv->last_xfer = NULL;
}
}
break;
default:
/* TODO: Handle all reasonalbe result codes */
@ -578,7 +555,7 @@ static int max3421e_handle_hxfrdn(const struct device *dev) @@ -578,7 +555,7 @@ static int max3421e_handle_hxfrdn(const struct device *dev)
break;
}
return 0;
return ret;
}
static void max3421e_handle_condet(const struct device *dev)

139
drivers/usb/uhc/uhc_virtual.c

@ -78,24 +78,16 @@ static int vrt_xfer_control(const struct device *dev, @@ -78,24 +78,16 @@ static int vrt_xfer_control(const struct device *dev,
struct uhc_transfer *const xfer)
{
struct uhc_vrt_data *priv = uhc_get_private(dev);
struct net_buf *buf = xfer->buf;
struct uvb_packet *uvb_pkt;
struct net_buf *buf;
uint8_t *data = NULL;
size_t length = 0;
buf = k_fifo_peek_head(&xfer->queue);
if (buf == NULL) {
LOG_ERR("No buffers to handle");
return -ENODATA;
}
if (!uhc_xfer_is_queued(xfer) && xfer->setup) {
return -EINVAL;
}
if (!xfer->setup) {
if (xfer->stage == UHC_CONTROL_STAGE_SETUP) {
LOG_DBG("Handle SETUP stage");
uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_SETUP,
xfer->addr, USB_CONTROL_EP_OUT,
buf->data, buf->len);
xfer->setup_pkt, sizeof(xfer->setup_pkt));
if (uvb_pkt == NULL) {
LOG_ERR("Failed to allocate UVB packet");
return -ENOMEM;
@ -103,17 +95,11 @@ static int vrt_xfer_control(const struct device *dev, @@ -103,17 +95,11 @@ static int vrt_xfer_control(const struct device *dev,
priv->req = UVB_REQUEST_SETUP;
priv->busy = true;
uhc_xfer_setup(xfer);
uhc_xfer_queued(xfer);
return uvb_advert_pkt(priv->host_node, uvb_pkt);
}
if (buf->size != 0) {
uint8_t *data;
size_t length;
LOG_DBG("Handle DATA stage");
if (buf != NULL && xfer->stage == UHC_CONTROL_STAGE_DATA) {
if (USB_EP_DIR_IS_IN(xfer->ep)) {
length = MIN(net_buf_tailroom(buf), xfer->mps);
data = net_buf_tail(buf);
@ -122,6 +108,7 @@ static int vrt_xfer_control(const struct device *dev, @@ -122,6 +108,7 @@ static int vrt_xfer_control(const struct device *dev,
data = buf->data;
}
LOG_DBG("Handle DATA stage");
uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA,
xfer->addr, xfer->ep,
data, length);
@ -132,7 +119,11 @@ static int vrt_xfer_control(const struct device *dev, @@ -132,7 +119,11 @@ static int vrt_xfer_control(const struct device *dev,
priv->req = UVB_REQUEST_DATA;
priv->busy = true;
} else {
return uvb_advert_pkt(priv->host_node, uvb_pkt);
}
if (xfer->stage == UHC_CONTROL_STAGE_STATUS) {
uint8_t ep;
LOG_DBG("Handle STATUS stage");
@ -144,7 +135,7 @@ static int vrt_xfer_control(const struct device *dev, @@ -144,7 +135,7 @@ static int vrt_xfer_control(const struct device *dev,
uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA,
xfer->addr, ep,
buf->data, 0);
NULL, 0);
if (uvb_pkt == NULL) {
LOG_ERR("Failed to allocate UVB packet");
return -ENOMEM;
@ -152,26 +143,21 @@ static int vrt_xfer_control(const struct device *dev, @@ -152,26 +143,21 @@ static int vrt_xfer_control(const struct device *dev,
priv->req = UVB_REQUEST_DATA;
priv->busy = true;
return uvb_advert_pkt(priv->host_node, uvb_pkt);
}
return uvb_advert_pkt(priv->host_node, uvb_pkt);
return -EINVAL;
}
static int vrt_xfer_bulk(const struct device *dev,
struct uhc_transfer *const xfer)
{
struct uhc_vrt_data *priv = uhc_get_private(dev);
struct net_buf *buf = xfer->buf;
struct uvb_packet *uvb_pkt;
struct net_buf *buf;
uint8_t *data;
size_t length;
int ret;
buf = k_fifo_peek_head(&xfer->queue);
if (buf == NULL) {
LOG_ERR("No buffers to handle");
return -ENODATA;
}
if (USB_EP_DIR_IS_IN(xfer->ep)) {
length = MIN(net_buf_tailroom(buf), xfer->mps);
@ -188,12 +174,7 @@ static int vrt_xfer_bulk(const struct device *dev, @@ -188,12 +174,7 @@ static int vrt_xfer_bulk(const struct device *dev,
return -ENOMEM;
}
ret = uvb_advert_pkt(priv->host_node, uvb_pkt);
if (!ret) {
uhc_xfer_queued(xfer);
}
return ret;
return uvb_advert_pkt(priv->host_node, uvb_pkt);
}
static int vrt_schedule_xfer(const struct device *dev)
@ -218,31 +199,41 @@ static int vrt_schedule_xfer(const struct device *dev) @@ -218,31 +199,41 @@ static int vrt_schedule_xfer(const struct device *dev)
return vrt_xfer_bulk(dev, priv->last_xfer);
}
static int vrt_hrslt_success(const struct device *dev,
struct uvb_packet *const pkt)
static void vrt_hrslt_success(const struct device *dev,
struct uvb_packet *const pkt)
{
struct uhc_vrt_data *priv = uhc_get_private(dev);
struct uhc_transfer *const xfer = priv->last_xfer;
struct net_buf *buf;
struct net_buf *buf = xfer->buf;
bool finished = false;
size_t length;
int err = 0;
buf = k_fifo_peek_head(&xfer->queue);
if (buf == NULL) {
return -ENODATA;
}
switch (pkt->request) {
case UVB_REQUEST_SETUP:
err = uhc_xfer_done(xfer);
if (xfer->buf != NULL) {
xfer->stage = UHC_CONTROL_STAGE_DATA;
} else {
xfer->stage = UHC_CONTROL_STAGE_STATUS;
}
break;
case UVB_REQUEST_DATA:
if (xfer->stage == UHC_CONTROL_STAGE_STATUS) {
LOG_DBG("Status stage finished");
finished = true;
break;
}
if (USB_EP_DIR_IS_OUT(pkt->ep)) {
length = MIN(buf->len, xfer->mps);
net_buf_pull(buf, length);
LOG_DBG("OUT chunk %zu out of %u", length, buf->len);
if (buf->len == 0) {
err = uhc_xfer_done(xfer);
if (pkt->ep == USB_CONTROL_EP_OUT) {
xfer->stage = UHC_CONTROL_STAGE_STATUS;
} else {
finished = true;
}
}
} else {
length = MIN(net_buf_tailroom(buf), pkt->length);
@ -254,13 +245,21 @@ static int vrt_hrslt_success(const struct device *dev, @@ -254,13 +245,21 @@ static int vrt_hrslt_success(const struct device *dev,
LOG_DBG("IN chunk %zu out of %zu", length, net_buf_tailroom(buf));
if (pkt->length < xfer->mps || !net_buf_tailroom(buf)) {
err = uhc_xfer_done(xfer);
if (pkt->ep == USB_CONTROL_EP_IN) {
xfer->stage = UHC_CONTROL_STAGE_STATUS;
} else {
finished = true;
}
}
}
break;
}
return err;
if (finished) {
LOG_DBG("Transfer finished");
uhc_xfer_return(dev, xfer, 0);
priv->last_xfer = NULL;
}
}
static void vrt_xfer_drop_active(const struct device *dev, int err)
@ -278,7 +277,7 @@ static int vrt_handle_reply(const struct device *dev, @@ -278,7 +277,7 @@ static int vrt_handle_reply(const struct device *dev,
{
struct uhc_vrt_data *priv = uhc_get_private(dev);
struct uhc_transfer *const xfer = priv->last_xfer;
int ret;
int ret = 0;
if (xfer == NULL) {
LOG_ERR("No transfers to handle");
@ -286,47 +285,19 @@ static int vrt_handle_reply(const struct device *dev, @@ -286,47 +285,19 @@ static int vrt_handle_reply(const struct device *dev,
goto handle_reply_err;
}
/* If an active xfer is not marked then something has gone wrong */
if (!uhc_xfer_is_queued(xfer)) {
LOG_ERR("Active transfer not queued");
vrt_xfer_drop_active(dev, -EINVAL);
ret = -EINVAL;
goto handle_reply_err;
}
/* There should always be a buffer in the fifo when a xfer is active */
if (k_fifo_is_empty(&xfer->queue)) {
LOG_ERR("No buffers to handle");
vrt_xfer_drop_active(dev, -ENODATA);
ret = -ENODATA;
goto handle_reply_err;
}
priv->busy = false;
switch (pkt->reply) {
case UVB_REPLY_NACK:
/* Restart last transaction */
priv->busy = false;
break;
case UVB_REPLY_STALL:
vrt_xfer_drop_active(dev, -EPIPE);
priv->busy = false;
break;
case UVB_REPLY_ACK:
ret = vrt_hrslt_success(dev, pkt);
priv->busy = false;
if (ret) {
vrt_xfer_drop_active(dev, ret);
} else {
if (k_fifo_is_empty(&xfer->queue)) {
LOG_DBG("Transfer done");
uhc_xfer_return(dev, xfer, 0);
priv->last_xfer = NULL;
}
}
vrt_hrslt_success(dev, pkt);
break;
default:
priv->busy = false;
vrt_xfer_drop_active(dev, -EINVAL);
ret = -EINVAL;
break;
@ -349,7 +320,11 @@ static void xfer_work_handler(struct k_work *work) @@ -349,7 +320,11 @@ static void xfer_work_handler(struct k_work *work)
switch (ev->type) {
case UHC_VRT_EVT_REPLY:
vrt_handle_reply(dev, ev->pkt);
err = vrt_handle_reply(dev, ev->pkt);
if (unlikely(err)) {
uhc_submit_event(dev, UHC_EVT_ERROR, err);
}
schedule = true;
break;
case UHC_VRT_EVT_XFER:

88
include/zephyr/drivers/usb/uhc.h

@ -25,6 +25,15 @@ @@ -25,6 +25,15 @@
* @{
*/
/**
* @brief USB control transfer stage
*/
enum uhc_control_stage {
UHC_CONTROL_STAGE_SETUP = 0,
UHC_CONTROL_STAGE_DATA,
UHC_CONTROL_STAGE_STATUS,
};
/**
* UHC endpoint buffer info
*
@ -38,10 +47,10 @@ @@ -38,10 +47,10 @@
struct uhc_transfer {
/** dlist node */
sys_dnode_t node;
/** FIFO requests to process */
struct k_fifo queue;
/** FIFO to keep completed requests */
struct k_fifo done;
/** Control transfer setup packet */
uint8_t setup_pkt[8];
/** Transfer data buffer */
struct net_buf *buf;
/** Device (peripheral) address */
uint8_t addr;
/** Endpoint to which request is associated */
@ -52,14 +61,14 @@ struct uhc_transfer { @@ -52,14 +61,14 @@ struct uhc_transfer {
uint16_t mps;
/** Timeout in number of frames */
uint16_t timeout;
/** Flag marks request buffer claimed by the controller */
unsigned int claimed : 1;
/** Flag marks request buffer is queued */
unsigned int queued : 1;
/** Flag marks setup stage of transfer */
unsigned int setup : 1;
/** Transfer owner */
void *owner;
/** Control stage status, up to the driver to use it or not */
unsigned int stage : 2;
/** Pointer to USB device (opaque for the UHC) */
void *udev;
/** Pointer to transfer completion callback (opaque for the UHC) */
void *cb;
/** Transfer result, 0 on success, other values on error */
int err;
};
@ -321,8 +330,8 @@ static inline int uhc_bus_resume(const struct device *dev) @@ -321,8 +330,8 @@ static inline int uhc_bus_resume(const struct device *dev)
* @brief Allocate UHC transfer
*
* Allocate a new transfer from common transfer pool.
* Transfer has no buffers after allocation, these can be
* requested and assigned separately.
* Transfer has no buffer after allocation, but can be allocated
* and added from different pools.
*
* @param[in] dev Pointer to device struct of the driver instance
* @param[in] addr Device (peripheral) address
@ -330,7 +339,8 @@ static inline int uhc_bus_resume(const struct device *dev) @@ -330,7 +339,8 @@ static inline int uhc_bus_resume(const struct device *dev)
* @param[in] attrib Endpoint attributes
* @param[in] mps Maximum packet size of the endpoint
* @param[in] timeout Timeout in number of frames
* @param[in] owner Transfer owner
* @param[in] udev Opaque pointer to USB device
* @param[in] cb Transfer completion callback
*
* @return pointer to allocated transfer or NULL on error.
*/
@ -340,7 +350,35 @@ struct uhc_transfer *uhc_xfer_alloc(const struct device *dev, @@ -340,7 +350,35 @@ struct uhc_transfer *uhc_xfer_alloc(const struct device *dev,
const uint8_t attrib,
const uint16_t mps,
const uint16_t timeout,
void *const owner);
void *const udev,
void *const cb);
/**
* @brief Allocate UHC transfer with buffer
*
* Allocate a new transfer from common transfer pool with buffer.
*
* @param[in] dev Pointer to device struct of the driver instance
* @param[in] addr Device (peripheral) address
* @param[in] ep Endpoint address
* @param[in] attrib Endpoint attributes
* @param[in] mps Maximum packet size of the endpoint
* @param[in] timeout Timeout in number of frames
* @param[in] udev Opaque pointer to USB device
* @param[in] cb Transfer completion callback
* @param[in] size Size of the buffer
*
* @return pointer to allocated transfer or NULL on error.
*/
struct uhc_transfer *uhc_xfer_alloc_with_buf(const struct device *dev,
const uint8_t addr,
const uint8_t ep,
const uint8_t attrib,
const uint16_t mps,
const uint16_t timeout,
void *const udev,
void *const cb,
size_t size);
/**
* @brief Free UHC transfer and any buffers
@ -355,20 +393,32 @@ struct uhc_transfer *uhc_xfer_alloc(const struct device *dev, @@ -355,20 +393,32 @@ struct uhc_transfer *uhc_xfer_alloc(const struct device *dev,
int uhc_xfer_free(const struct device *dev,
struct uhc_transfer *const xfer);
/**
* @brief Add UHC transfer buffer
*
* Add a previously allocated buffer to the transfer.
*
* @param[in] dev Pointer to device struct of the driver instance
* @param[in] xfer Pointer to UHC transfer
* @param[in] buf Pointer to UHC request buffer
*
* @return pointer to allocated request or NULL on error.
*/
int uhc_xfer_buf_add(const struct device *dev,
struct uhc_transfer *const xfer,
struct net_buf *buf);
/**
* @brief Allocate UHC transfer buffer
*
* Allocate a new buffer from common request buffer pool and
* assign it to the transfer.
* assign it to the transfer if the xfer parameter is not NULL.
*
* @param[in] dev Pointer to device struct of the driver instance
* @param[in] xfer Pointer to UHC transfer
* @param[in] size Size of the request buffer
*
* @return pointer to allocated request or NULL on error.
*/
struct net_buf *uhc_xfer_buf_alloc(const struct device *dev,
struct uhc_transfer *const xfer,
const size_t size);
/**
@ -378,10 +428,8 @@ struct net_buf *uhc_xfer_buf_alloc(const struct device *dev, @@ -378,10 +428,8 @@ struct net_buf *uhc_xfer_buf_alloc(const struct device *dev,
*
* @param[in] dev Pointer to device struct of the driver instance
* @param[in] buf Pointer to UHC request buffer
*
* @return 0 on success, all other values should be treated as error.
*/
int uhc_xfer_buf_free(const struct device *dev, struct net_buf *const buf);
void uhc_xfer_buf_free(const struct device *dev, struct net_buf *const buf);
/**
* @brief Queue USB host controller transfer

21
subsys/usb/host/usbh_ch9.c

@ -37,21 +37,15 @@ int usbh_req_setup(const struct device *dev, @@ -37,21 +37,15 @@ int usbh_req_setup(const struct device *dev,
uint8_t ep = usb_reqtype_is_to_device(&req) ? 0x00 : 0x80;
int ret;
xfer = uhc_xfer_alloc(dev, addr, ep, 0, 64, SETUP_REQ_TIMEOUT, NULL);
xfer = uhc_xfer_alloc(dev, addr, ep, 0, 64, SETUP_REQ_TIMEOUT, NULL, NULL);
if (!xfer) {
return -ENOMEM;
}
buf = uhc_xfer_buf_alloc(dev, xfer, sizeof(req));
if (!buf) {
ret = -ENOMEM;
goto buf_alloc_err;
}
net_buf_add_mem(buf, &req, sizeof(req));
memcpy(xfer->setup_pkt, &req, sizeof(req));
if (wLength) {
buf = uhc_xfer_buf_alloc(dev, xfer, wLength);
buf = uhc_xfer_buf_alloc(dev, wLength);
if (!buf) {
ret = -ENOMEM;
goto buf_alloc_err;
@ -60,12 +54,11 @@ int usbh_req_setup(const struct device *dev, @@ -60,12 +54,11 @@ int usbh_req_setup(const struct device *dev,
if (usb_reqtype_is_to_device(&req) && data != NULL) {
net_buf_add_mem(buf, data, wLength);
}
}
buf = uhc_xfer_buf_alloc(dev, xfer, 0);
if (!buf) {
ret = -ENOMEM;
goto buf_alloc_err;
ret = uhc_xfer_buf_add(dev, xfer, buf);
if (ret) {
goto buf_alloc_err;
}
}
return uhc_ep_enqueue(dev, xfer);

11
subsys/usb/host/usbh_core.c

@ -39,14 +39,9 @@ static int event_ep_request(struct usbh_contex *const ctx, @@ -39,14 +39,9 @@ static int event_ep_request(struct usbh_contex *const ctx,
return class_data->request(ctx, event->xfer, event->status);
}
while (!k_fifo_is_empty(&xfer->done)) {
struct net_buf *buf;
buf = net_buf_get(&xfer->done, K_NO_WAIT);
if (buf) {
LOG_HEXDUMP_INF(buf->data, buf->len, "buf");
uhc_xfer_buf_free(dev, buf);
}
if (xfer->buf) {
LOG_HEXDUMP_INF(xfer->buf->data, xfer->buf->len, "buf");
uhc_xfer_buf_free(dev, xfer->buf);
}
return uhc_xfer_free(dev, xfer);

41
subsys/usb/host/usbh_shell.c

@ -107,27 +107,22 @@ static int bazfoo_request(struct usbh_contex *const ctx, @@ -107,27 +107,22 @@ static int bazfoo_request(struct usbh_contex *const ctx,
shell_info(ctx_shell, "host: transfer finished %p, err %d", xfer, err);
while (!k_fifo_is_empty(&xfer->done)) {
struct net_buf *buf;
buf = net_buf_get(&xfer->done, K_NO_WAIT);
if (buf) {
/*
* FIXME: We don not distinguish the context
* of the request and always try to print it
* as descriptor first. If it is not a known descriptor,
* we show a hexdump in any case.
* This is just simple enough for first steps and will
* be revised with coming peripheral device management.
*/
if (xfer->ep == USB_CONTROL_EP_IN) {
print_desc(ctx_shell, buf);
} else {
shell_hexdump(ctx_shell, buf->data, buf->len);
}
uhc_xfer_buf_free(dev, buf);
if (xfer->buf) {
/*
* FIXME: We don not distinguish the context
* of the request and always try to print it
* as descriptor first. If it is not a known descriptor,
* we show a hexdump in any case.
* This is just simple enough for first steps and will
* be revised with coming peripheral device management.
*/
if (xfer->ep == USB_CONTROL_EP_IN) {
print_desc(ctx_shell, xfer->buf);
} else {
shell_hexdump(ctx_shell, xfer->buf->data, xfer->buf->len);
}
uhc_xfer_buf_free(dev, xfer->buf);
}
return uhc_xfer_free(dev, xfer);
@ -191,12 +186,12 @@ static int cmd_bulk(const struct shell *sh, size_t argc, char **argv) @@ -191,12 +186,12 @@ static int cmd_bulk(const struct shell *sh, size_t argc, char **argv)
ep = strtol(argv[2], NULL, 16);
len = MIN(sizeof(vreq_test_buf), strtol(argv[3], NULL, 10));
xfer = uhc_xfer_alloc(uhs_ctx.dev, addr, ep, 0, 512, 10, NULL);
xfer = uhc_xfer_alloc(uhs_ctx.dev, addr, ep, 0, 512, 10, NULL, NULL);
if (!xfer) {
return -ENOMEM;
}
buf = uhc_xfer_buf_alloc(uhs_ctx.dev, xfer, len);
buf = uhc_xfer_buf_alloc(uhs_ctx.dev, len);
if (!buf) {
return -ENOMEM;
}
@ -205,6 +200,8 @@ static int cmd_bulk(const struct shell *sh, size_t argc, char **argv) @@ -205,6 +200,8 @@ static int cmd_bulk(const struct shell *sh, size_t argc, char **argv)
net_buf_add_mem(buf, vreq_test_buf, len);
}
uhc_xfer_buf_add(uhs_ctx.dev, xfer, buf);
return uhc_ep_enqueue(uhs_ctx.dev, xfer);
}

Loading…
Cancel
Save