Commit 83472457 authored by Alexandre Bounine's avatar Alexandre Bounine Committed by Linus Torvalds

rapidio/tsi721_dma: update error reporting from prep_sg callback

Switch to returning error-valued pointer instead of simple NULL pointer.
This allows to properly identify situation when request queue is full
and therefore gives to upper layer an option to retry operation later.
Signed-off-by: default avatarAlexandre Bounine <alexandre.bounine@idt.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Andre van Herk <andre.van.herk@prodrive-technologies.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 72d8a0d2
...@@ -767,7 +767,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, ...@@ -767,7 +767,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
void *tinfo) void *tinfo)
{ {
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
struct tsi721_tx_desc *desc, *_d; struct tsi721_tx_desc *desc;
struct rio_dma_ext *rext = tinfo; struct rio_dma_ext *rext = tinfo;
enum dma_rtype rtype; enum dma_rtype rtype;
struct dma_async_tx_descriptor *txd = NULL; struct dma_async_tx_descriptor *txd = NULL;
...@@ -775,7 +775,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, ...@@ -775,7 +775,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
if (!sgl || !sg_len) { if (!sgl || !sg_len) {
tsi_err(&dchan->dev->device, "DMAC%d No SG list", tsi_err(&dchan->dev->device, "DMAC%d No SG list",
bdma_chan->id); bdma_chan->id);
return NULL; return ERR_PTR(-EINVAL);
} }
tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id, tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
...@@ -800,13 +800,14 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, ...@@ -800,13 +800,14 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
tsi_err(&dchan->dev->device, tsi_err(&dchan->dev->device,
"DMAC%d Unsupported DMA direction option", "DMAC%d Unsupported DMA direction option",
bdma_chan->id); bdma_chan->id);
return NULL; return ERR_PTR(-EINVAL);
} }
spin_lock_bh(&bdma_chan->lock); spin_lock_bh(&bdma_chan->lock);
list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) { if (!list_empty(&bdma_chan->free_list)) {
if (async_tx_test_ack(&desc->txd)) { desc = list_first_entry(&bdma_chan->free_list,
struct tsi721_tx_desc, desc_node);
list_del_init(&desc->desc_node); list_del_init(&desc->desc_node);
desc->destid = rext->destid; desc->destid = rext->destid;
desc->rio_addr = rext->rio_addr; desc->rio_addr = rext->rio_addr;
...@@ -816,12 +817,16 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, ...@@ -816,12 +817,16 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
desc->sg = sgl; desc->sg = sgl;
txd = &desc->txd; txd = &desc->txd;
txd->flags = flags; txd->flags = flags;
break;
}
} }
spin_unlock_bh(&bdma_chan->lock); spin_unlock_bh(&bdma_chan->lock);
if (!txd) {
tsi_debug(DMA, &dchan->dev->device,
"DMAC%d free TXD is not available", bdma_chan->id);
return ERR_PTR(-EBUSY);
}
return txd; return txd;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment