Commit a06d568f authored by Dan Williams's avatar Dan Williams

async_xor: dma_map destination DMA_BIDIRECTIONAL

Mapping the destination multiple times is a misuse of the dma-api.
Since the destination may be reused as a source, ensure that it is only
mapped once and that it is mapped bidirectionally.  This appears to add
ugliness on the unmap side in that it always reads back the destination
address from the descriptor, but gcc can determine that dma_unmap is a
nop and not emit the code that calculates its arguments.

Cc: <stable@kernel.org>
Cc: Saeed Bishara <saeed@marvell.com>
Acked-by: default avatarYuri Tikhonov <yur@emcraft.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent b0b42b16
...@@ -53,10 +53,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, ...@@ -53,10 +53,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
int xor_src_cnt; int xor_src_cnt;
dma_addr_t dma_dest; dma_addr_t dma_dest;
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE); /* map the dest bidrectional in case it is re-used as a source */
for (i = 0; i < src_cnt; i++) dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
for (i = 0; i < src_cnt; i++) {
/* only map the dest once */
if (unlikely(src_list[i] == dest)) {
dma_src[i] = dma_dest;
continue;
}
dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
len, DMA_TO_DEVICE); len, DMA_TO_DEVICE);
}
while (src_cnt) { while (src_cnt) {
async_flags = flags; async_flags = flags;
......
...@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, ...@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags; enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt; u32 src_cnt;
dma_addr_t addr; dma_addr_t addr;
dma_addr_t dest;
src_cnt = unmap->unmap_src_cnt;
dest = iop_desc_get_dest_addr(unmap, iop_chan);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
addr = iop_desc_get_dest_addr(unmap, iop_chan); enum dma_data_direction dir;
dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
if (src_cnt > 1) /* is xor? */
dir = DMA_BIDIRECTIONAL;
else
dir = DMA_FROM_DEVICE;
dma_unmap_page(dev, dest, len, dir);
} }
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) { while (src_cnt--) {
addr = iop_desc_get_src_addr(unmap, addr = iop_desc_get_src_addr(unmap,
iop_chan, iop_chan,
src_cnt); src_cnt);
if (addr == dest)
continue;
dma_unmap_page(dev, addr, len, dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
......
...@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, ...@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags; enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt; u32 src_cnt;
dma_addr_t addr; dma_addr_t addr;
dma_addr_t dest;
src_cnt = unmap->unmap_src_cnt;
dest = mv_desc_get_dest_addr(unmap);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
addr = mv_desc_get_dest_addr(unmap); enum dma_data_direction dir;
dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
if (src_cnt > 1) /* is xor ? */
dir = DMA_BIDIRECTIONAL;
else
dir = DMA_FROM_DEVICE;
dma_unmap_page(dev, dest, len, dir);
} }
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) { while (src_cnt--) {
addr = mv_desc_get_src_addr(unmap, addr = mv_desc_get_src_addr(unmap,
src_cnt); src_cnt);
if (addr == dest)
continue;
dma_unmap_page(dev, addr, len, dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment