Commit 4f005dbe authored by Maciej Sosnowski's avatar Maciej Sosnowski Committed by Dan Williams

ioatdma: fix "ioatdma frees DMA memory with wrong function"

as reported by Alexander Beregalov <a.beregalov@gmail.com>

ioatdma 0000:00:08.0: DMA-API: device driver frees DMA memory with
wrong function [device address=0x000000007f76f800] [size=2000 bytes]
[map
ped as single] [unmapped as page]

The ioatdma driver was unmapping all regions
(either allocated as page or single) using unmap_page.
This patch lets dma driver recognize if unmap_single or unmap_page should be used.
It introduces two new dma control flags:
DMA_COMPL_SRC_UNMAP_SINGLE and DMA_COMPL_DEST_UNMAP_SINGLE.
They should be set to indicate dma driver to do dma-unmapping as single
(first one for the source, tha latter for the destination).
If respective flag is not set, the driver assumes dma-unmapping as page.
Signed-off-by: default avatarMaciej Sosnowski <maciej.sosnowski@intel.com>
Reported-by: default avatarAlexander Beregalov <a.beregalov@gmail.com>
Tested-by: default avatarAlexander Beregalov <a.beregalov@gmail.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent ca50a51e
...@@ -804,11 +804,14 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, ...@@ -804,11 +804,14 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
dma_addr_t dma_dest, dma_src; dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie; dma_cookie_t cookie;
int cpu; int cpu;
unsigned long flags;
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags = DMA_CTRL_ACK |
DMA_CTRL_ACK); DMA_COMPL_SRC_UNMAP_SINGLE |
DMA_COMPL_DEST_UNMAP_SINGLE;
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
if (!tx) { if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
...@@ -850,11 +853,12 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, ...@@ -850,11 +853,12 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
dma_addr_t dma_dest, dma_src; dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie; dma_cookie_t cookie;
int cpu; int cpu;
unsigned long flags;
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
DMA_CTRL_ACK); tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
if (!tx) { if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
...@@ -898,12 +902,13 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, ...@@ -898,12 +902,13 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
dma_addr_t dma_dest, dma_src; dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie; dma_cookie_t cookie;
int cpu; int cpu;
unsigned long flags;
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags = DMA_CTRL_ACK;
DMA_CTRL_ACK); tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
if (!tx) { if (!tx) {
dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
......
...@@ -1063,22 +1063,31 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) ...@@ -1063,22 +1063,31 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
static void static void
ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
{ {
/* if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
* yes we are unmapping both _page and _single if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
* alloc'd regions with unmap_page. Is this pci_unmap_single(ioat_chan->device->pdev,
* *really* that bad? pci_unmap_addr(desc, dst),
*/ pci_unmap_len(desc, len),
if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) PCI_DMA_FROMDEVICE);
pci_unmap_page(ioat_chan->device->pdev, else
pci_unmap_addr(desc, dst), pci_unmap_page(ioat_chan->device->pdev,
pci_unmap_len(desc, len), pci_unmap_addr(desc, dst),
PCI_DMA_FROMDEVICE); pci_unmap_len(desc, len),
PCI_DMA_FROMDEVICE);
if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) }
pci_unmap_page(ioat_chan->device->pdev,
pci_unmap_addr(desc, src), if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
pci_unmap_len(desc, len), if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
PCI_DMA_TODEVICE); pci_unmap_single(ioat_chan->device->pdev,
pci_unmap_addr(desc, src),
pci_unmap_len(desc, len),
PCI_DMA_TODEVICE);
else
pci_unmap_page(ioat_chan->device->pdev,
pci_unmap_addr(desc, src),
pci_unmap_len(desc, len),
PCI_DMA_TODEVICE);
}
} }
/** /**
...@@ -1363,6 +1372,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) ...@@ -1363,6 +1372,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
int err = 0; int err = 0;
struct completion cmp; struct completion cmp;
unsigned long tmo; unsigned long tmo;
unsigned long flags;
src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
if (!src) if (!src)
...@@ -1392,8 +1402,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device) ...@@ -1392,8 +1402,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, 0); IOAT_TEST_SIZE, flags);
if (!tx) { if (!tx) {
dev_err(&device->pdev->dev, dev_err(&device->pdev->dev,
"Self-test prep failed, disabling\n"); "Self-test prep failed, disabling\n");
......
...@@ -78,12 +78,18 @@ enum dma_transaction_type { ...@@ -78,12 +78,18 @@ enum dma_transaction_type {
* dependency chains * dependency chains
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
* @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
* @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
* (if not set, do the source dma-unmapping as page)
* @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
* (if not set, do the destination dma-unmapping as page)
*/ */
enum dma_ctrl_flags { enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0), DMA_PREP_INTERRUPT = (1 << 0),
DMA_CTRL_ACK = (1 << 1), DMA_CTRL_ACK = (1 << 1),
DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment