Commit 1175f83c authored by Kuninori Morimoto's avatar Kuninori Morimoto Committed by Vinod Koul

dmaengine: rcar-dmac: enable descriptor mode on 40bit

SYS-DMAC can use 40bit address transfer, and it supports Descriptor
Mode too. Current SYS-DMAC driver disables Descriptor Mode if it was
40bit address today. But it can use Descriptor Mode with 40bit if
transfer Source/Destination address are located in same 4GiB region
in the 40 bit address space.
This patch enables it if all condition was clear
Signed-off-by: default avatarKuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Reviewed-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: default avatarGeert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 6f3125ce
...@@ -344,13 +344,19 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) ...@@ -344,13 +344,19 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
if (desc->hwdescs.use) { if (desc->hwdescs.use) {
struct rcar_dmac_xfer_chunk *chunk; struct rcar_dmac_xfer_chunk *chunk =
list_first_entry(&desc->chunks,
struct rcar_dmac_xfer_chunk, node);
dev_dbg(chan->chan.device->dev, dev_dbg(chan->chan.device->dev,
"chan%u: queue desc %p: %u@%pad\n", "chan%u: queue desc %p: %u@%pad\n",
chan->index, desc, desc->nchunks, &desc->hwdescs.dma); chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
chunk->src_addr >> 32);
rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
chunk->dst_addr >> 32);
rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
desc->hwdescs.dma >> 32); desc->hwdescs.dma >> 32);
#endif #endif
...@@ -368,8 +374,6 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) ...@@ -368,8 +374,6 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
* should. Initialize it manually with the destination address * should. Initialize it manually with the destination address
* of the first chunk. * of the first chunk.
*/ */
chunk = list_first_entry(&desc->chunks,
struct rcar_dmac_xfer_chunk, node);
rcar_dmac_chan_write(chan, RCAR_DMADAR, rcar_dmac_chan_write(chan, RCAR_DMADAR,
chunk->dst_addr & 0xffffffff); chunk->dst_addr & 0xffffffff);
...@@ -855,8 +859,12 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, ...@@ -855,8 +859,12 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
unsigned int nchunks = 0; unsigned int nchunks = 0;
unsigned int max_chunk_size; unsigned int max_chunk_size;
unsigned int full_size = 0; unsigned int full_size = 0;
bool highmem = false; bool cross_boundary = false;
unsigned int i; unsigned int i;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
u32 high_dev_addr;
u32 high_mem_addr;
#endif
desc = rcar_dmac_desc_get(chan); desc = rcar_dmac_desc_get(chan);
if (!desc) if (!desc)
...@@ -882,6 +890,16 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, ...@@ -882,6 +890,16 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
full_size += len; full_size += len;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (i == 0) {
high_dev_addr = dev_addr >> 32;
high_mem_addr = mem_addr >> 32;
}
if ((dev_addr >> 32 != high_dev_addr) ||
(mem_addr >> 32 != high_mem_addr))
cross_boundary = true;
#endif
while (len) { while (len) {
unsigned int size = min(len, max_chunk_size); unsigned int size = min(len, max_chunk_size);
...@@ -890,18 +908,14 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, ...@@ -890,18 +908,14 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
* Prevent individual transfers from crossing 4GB * Prevent individual transfers from crossing 4GB
* boundaries. * boundaries.
*/ */
if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) cross_boundary = true;
}
if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
cross_boundary = true;
/* }
* Check if either of the source or destination address
* can't be expressed in 32 bits. If so we can't use
* hardware descriptor lists.
*/
if (dev_addr >> 32 || mem_addr >> 32)
highmem = true;
#endif #endif
chunk = rcar_dmac_xfer_chunk_get(chan); chunk = rcar_dmac_xfer_chunk_get(chan);
...@@ -943,13 +957,11 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, ...@@ -943,13 +957,11 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
* Use hardware descriptor lists if possible when more than one chunk * Use hardware descriptor lists if possible when more than one chunk
* needs to be transferred (otherwise they don't make much sense). * needs to be transferred (otherwise they don't make much sense).
* *
* The highmem check currently covers the whole transfer. As an * Source/Destination address should be located in same 4GiB region
* optimization we could use descriptor lists for consecutive lowmem * in the 40bit address space when it uses Hardware descriptor,
* chunks and direct manual mode for highmem chunks. Whether the * and cross_boundary is checking it.
* performance improvement would be significant enough compared to the
* additional complexity remains to be investigated.
*/ */
desc->hwdescs.use = !highmem && nchunks > 1; desc->hwdescs.use = !cross_boundary && nchunks > 1;
if (desc->hwdescs.use) { if (desc->hwdescs.use) {
if (rcar_dmac_fill_hwdesc(chan, desc) < 0) if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
desc->hwdescs.use = false; desc->hwdescs.use = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment