Commit 83dc311c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-4.3-rc4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "This contains fixes spread throughout the drivers, and also fixes one
  more instance of privatecnt in dmaengine.

  Driver fixes summary:
   - bunch of pxa_dma fixes for reuse of descriptor issue, residue and
     no-requestor
   - odd fixes in xgene, idma, sun4i and zxdma
   - at_xdmac fixes for cleaning descriptor and block addr mode"

* tag 'dmaengine-fix-4.3-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: pxa_dma: fix residue corner case
  dmaengine: pxa_dma: fix the no-requestor case
  dmaengine: zxdma: Fix off-by-one for testing valid pchan request
  dmaengine: at_xdmac: clean used descriptor
  dmaengine: at_xdmac: change block increment addressing mode
  dmaengine: dw: properly read DWC_PARAMS register
  dmaengine: xgene-dma: Fix overwritting DMA tx ring
  dmaengine: fix balance of privatecnt
  dmaengine: sun4i: fix unsafe list iteration
  dmaengine: idma64: improve residue estimation
  dmaengine: xgene-dma: fix handling xgene_dma_get_ring_size result
  dmaengine: pxa_dma: fix initial list move
parents 27728bf0 7b09a1bb
...@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, ...@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
return desc; return desc;
} }
void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
{
memset(&desc->lld, 0, sizeof(desc->lld));
INIT_LIST_HEAD(&desc->descs_list);
desc->direction = DMA_TRANS_NONE;
desc->xfer_size = 0;
desc->active_xfer = false;
}
/* Call must be protected by lock. */ /* Call must be protected by lock. */
static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
{ {
...@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) ...@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
desc = list_first_entry(&atchan->free_descs_list, desc = list_first_entry(&atchan->free_descs_list,
struct at_xdmac_desc, desc_node); struct at_xdmac_desc, desc_node);
list_del(&desc->desc_node); list_del(&desc->desc_node);
desc->active_xfer = false; at_xdmac_init_used_desc(desc);
} }
return desc; return desc;
...@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, ...@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
if (xt->src_inc) { if (xt->src_inc) {
if (xt->src_sgl) if (xt->src_sgl)
chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
else else
chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
} }
if (xt->dst_inc) { if (xt->dst_inc) {
if (xt->dst_sgl) if (xt->dst_sgl)
chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
else else
chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
} }
......
...@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) ...@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
mutex_lock(&dma_list_mutex); mutex_lock(&dma_list_mutex);
if (chan->client_count == 0) { if (chan->client_count == 0) {
struct dma_device *device = chan->device;
dma_cap_set(DMA_PRIVATE, device->cap_mask);
device->privatecnt++;
err = dma_chan_get(chan); err = dma_chan_get(chan);
if (err) if (err) {
pr_debug("%s: failed to get %s: (%d)\n", pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err); __func__, dma_chan_name(chan), err);
chan = NULL;
if (--device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
}
} else } else
chan = NULL; chan = NULL;
......
...@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) ...@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
INIT_LIST_HEAD(&dw->dma.channels); INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < nr_channels; i++) { for (i = 0; i < nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i]; struct dw_dma_chan *dwc = &dw->chan[i];
int r = nr_channels - i - 1;
dwc->chan.device = &dw->dma; dwc->chan.device = &dw->dma;
dma_cookie_init(&dwc->chan); dma_cookie_init(&dwc->chan);
...@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) ...@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* 7 is highest priority & 0 is lowest. */ /* 7 is highest priority & 0 is lowest. */
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
dwc->priority = r; dwc->priority = nr_channels - i - 1;
else else
dwc->priority = i; dwc->priority = i;
...@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) ...@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Hardware configuration */ /* Hardware configuration */
if (autocfg) { if (autocfg) {
unsigned int dwc_params; unsigned int dwc_params;
unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
void __iomem *addr = chip->regs + r * sizeof(u32); void __iomem *addr = chip->regs + r * sizeof(u32);
dwc_params = dma_read_byaddr(addr, DWC_PARAMS); dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
......
...@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) ...@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
struct idma64_desc *desc = idma64c->desc; struct idma64_desc *desc = idma64c->desc;
struct idma64_hw_desc *hw; struct idma64_hw_desc *hw;
size_t bytes = desc->length; size_t bytes = desc->length;
u64 llp; u64 llp = channel_readq(idma64c, LLP);
u32 ctlhi; u32 ctlhi = channel_readl(idma64c, CTL_HI);
unsigned int i = 0; unsigned int i = 0;
llp = channel_readq(idma64c, LLP);
do { do {
hw = &desc->hw[i]; hw = &desc->hw[i];
} while ((hw->llp != llp) && (++i < desc->ndesc)); if (hw->llp == llp)
break;
bytes -= hw->len;
} while (++i < desc->ndesc);
if (!i) if (!i)
return bytes; return bytes;
do { /* The current chunk is not fully transfered yet */
bytes -= desc->hw[--i].len; bytes += desc->hw[--i].len;
} while (i);
ctlhi = channel_readl(idma64c, CTL_HI);
return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
} }
......
...@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) ...@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan)
return; return;
/* clear the channel mapping in DRCMR */ /* clear the channel mapping in DRCMR */
reg = pxad_drcmr(chan->drcmr); if (chan->drcmr <= DRCMR_CHLNUM) {
writel_relaxed(0, chan->phy->base + reg); reg = pxad_drcmr(chan->drcmr);
writel_relaxed(0, chan->phy->base + reg);
}
spin_lock_irqsave(&pdev->phy_lock, flags); spin_lock_irqsave(&pdev->phy_lock, flags);
for (i = 0; i < 32; i++) for (i = 0; i < 32; i++)
...@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) ...@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
"%s(); phy=%p(%d) misaligned=%d\n", __func__, "%s(); phy=%p(%d) misaligned=%d\n", __func__,
phy, phy->idx, misaligned); phy, phy->idx, misaligned);
reg = pxad_drcmr(phy->vchan->drcmr); if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); reg = pxad_drcmr(phy->vchan->drcmr);
writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
}
dalgn = phy_readl_relaxed(phy, DALGN); dalgn = phy_readl_relaxed(phy, DALGN);
if (misaligned) if (misaligned)
...@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, ...@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
INIT_LIST_HEAD(&vd->node);
tx = vchan_tx_prep(vc, vd, tx_flags); tx = vchan_tx_prep(vc, vd, tx_flags);
tx->tx_submit = pxad_tx_submit; tx->tx_submit = pxad_tx_submit;
dev_dbg(&chan->vc.chan.dev->device, dev_dbg(&chan->vc.chan.dev->device,
...@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, ...@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan,
width = chan->cfg.src_addr_width; width = chan->cfg.src_addr_width;
dev_addr = chan->cfg.src_addr; dev_addr = chan->cfg.src_addr;
*dev_src = dev_addr; *dev_src = dev_addr;
*dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; *dcmd |= PXA_DCMD_INCTRGADDR;
if (chan->drcmr <= DRCMR_CHLNUM)
*dcmd |= PXA_DCMD_FLOWSRC;
} }
if (dir == DMA_MEM_TO_DEV) { if (dir == DMA_MEM_TO_DEV) {
maxburst = chan->cfg.dst_maxburst; maxburst = chan->cfg.dst_maxburst;
width = chan->cfg.dst_addr_width; width = chan->cfg.dst_addr_width;
dev_addr = chan->cfg.dst_addr; dev_addr = chan->cfg.dst_addr;
*dev_dst = dev_addr; *dev_dst = dev_addr;
*dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; *dcmd |= PXA_DCMD_INCSRCADDR;
if (chan->drcmr <= DRCMR_CHLNUM)
*dcmd |= PXA_DCMD_FLOWTRG;
} }
if (dir == DMA_MEM_TO_MEM) if (dir == DMA_MEM_TO_MEM)
*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
...@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, ...@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan,
else else
curr = phy_readl_relaxed(chan->phy, DTADR); curr = phy_readl_relaxed(chan->phy, DTADR);
/*
* curr has to be actually read before checking descriptor
* completion, so that a curr inside a status updater
* descriptor implies the following test returns true, and
* preventing reordering of curr load and the test.
*/
rmb();
if (is_desc_completed(vd))
goto out;
for (i = 0; i < sw_desc->nb_desc - 1; i++) { for (i = 0; i < sw_desc->nb_desc - 1; i++) {
hw_desc = sw_desc->hw_desc[i]; hw_desc = sw_desc->hw_desc[i];
if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
......
...@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) ...@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract)
static void sun4i_dma_free_contract(struct virt_dma_desc *vd) static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
{ {
struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
struct sun4i_dma_promise *promise; struct sun4i_dma_promise *promise, *tmp;
/* Free all the demands and completed demands */ /* Free all the demands and completed demands */
list_for_each_entry(promise, &contract->demands, list) list_for_each_entry_safe(promise, tmp, &contract->demands, list)
kfree(promise); kfree(promise);
list_for_each_entry(promise, &contract->completed_demands, list) list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
kfree(promise); kfree(promise);
kfree(contract); kfree(contract);
......
...@@ -59,7 +59,6 @@ ...@@ -59,7 +59,6 @@
#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
#define XGENE_DMA_RING_CMD_OFFSET 0x2C #define XGENE_DMA_RING_CMD_OFFSET 0x2C
...@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) ...@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
return flyby_type[src_cnt]; return flyby_type[src_cnt];
} }
static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
{
u32 __iomem *cmd_base = ring->cmd_base;
u32 ring_state = ioread32(&cmd_base[1]);
return XGENE_DMA_RING_DESC_CNT(ring_state);
}
static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
dma_addr_t *paddr) dma_addr_t *paddr)
{ {
...@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, ...@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
dma_pool_free(chan->desc_pool, desc, desc->tx.phys); dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
} }
static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
struct xgene_dma_desc_sw *desc_sw) struct xgene_dma_desc_sw *desc_sw)
{ {
struct xgene_dma_ring *ring = &chan->tx_ring;
struct xgene_dma_desc_hw *desc_hw; struct xgene_dma_desc_hw *desc_hw;
/* Check if can push more descriptor to hw for execution */
if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
return -EBUSY;
/* Get hw descriptor from DMA tx ring */ /* Get hw descriptor from DMA tx ring */
desc_hw = &ring->desc_hw[ring->head]; desc_hw = &ring->desc_hw[ring->head];
...@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, ...@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
} }
/* Increment the pending transaction count */
chan->pending += ((desc_sw->flags &
XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
/* Notify the hw that we have descriptor ready for execution */ /* Notify the hw that we have descriptor ready for execution */
iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
2 : 1, ring->cmd); 2 : 1, ring->cmd);
return 0;
} }
/** /**
...@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, ...@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
{ {
struct xgene_dma_desc_sw *desc_sw, *_desc_sw; struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
int ret;
/* /*
* If the list of pending descriptors is empty, then we * If the list of pending descriptors is empty, then we
...@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) ...@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
if (chan->pending >= chan->max_outstanding) if (chan->pending >= chan->max_outstanding)
return; return;
ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); xgene_chan_xfer_request(chan, desc_sw);
if (ret)
return;
/* /*
* Delete this element from ld pending queue and append it to * Delete this element from ld pending queue and append it to
* ld running queue * ld running queue
*/ */
list_move_tail(&desc_sw->node, &chan->ld_running); list_move_tail(&desc_sw->node, &chan->ld_running);
/* Increment the pending transaction count */
chan->pending++;
} }
} }
...@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) ...@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
* Decrement the pending transaction count * Decrement the pending transaction count
* as we have processed one * as we have processed one
*/ */
chan->pending--; chan->pending -= ((desc_sw->flags &
XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
/* /*
* Delete this node from ld running queue and append it to * Delete this node from ld running queue and append it to
...@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, ...@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
struct xgene_dma_ring *ring, struct xgene_dma_ring *ring,
enum xgene_dma_ring_cfgsize cfgsize) enum xgene_dma_ring_cfgsize cfgsize)
{ {
int ret;
/* Setup DMA ring descriptor variables */ /* Setup DMA ring descriptor variables */
ring->pdma = chan->pdma; ring->pdma = chan->pdma;
ring->cfgsize = cfgsize; ring->cfgsize = cfgsize;
ring->num = chan->pdma->ring_num++; ring->num = chan->pdma->ring_num++;
ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
ring->size = xgene_dma_get_ring_size(chan, cfgsize); ret = xgene_dma_get_ring_size(chan, cfgsize);
if (ring->size <= 0) if (ret <= 0)
return ring->size; return ret;
ring->size = ret;
/* Allocate memory for DMA ring descriptor */ /* Allocate memory for DMA ring descriptor */
ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
...@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) ...@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
/* Set the max outstanding request possible to this channel */ /* Set the max outstanding request possible to this channel */
chan->max_outstanding = rx_ring->slots; chan->max_outstanding = tx_ring->slots;
return ret; return ret;
} }
......
...@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, ...@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct dma_chan *chan; struct dma_chan *chan;
struct zx_dma_chan *c; struct zx_dma_chan *c;
if (request > d->dma_requests) if (request >= d->dma_requests)
return NULL; return NULL;
chan = dma_get_any_slave_channel(&d->slave); chan = dma_get_any_slave_channel(&d->slave);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment