Commit 6b7df3ce authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky

s390/pci: fix dma address calculation in map_sg

__s390_dma_map_sg maps a dma-contiguous area. Although we only map
whole pages we have to take into account that the area doesn't start
or stop at a page boundary because we use the dma address to loop
over the individual sg entries. Failing to do that might lead to an
access of the wrong sg entry.

Fixes: ee877b81 ("s390/pci_dma: improve map_sg")
Reported-and-tested-by: default avatarChristoph Raisch <raisch@de.ibm.com>
Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Reviewed-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 191ce9d1
...@@ -419,6 +419,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -419,6 +419,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle, size_t size, dma_addr_t *handle,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
dma_addr_t dma_addr_base, dma_addr; dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID; int flags = ZPCI_PTE_VALID;
...@@ -426,8 +427,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -426,8 +427,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long pa; unsigned long pa;
int ret; int ret;
size = PAGE_ALIGN(size); dma_addr_base = dma_alloc_address(dev, nr_pages);
dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
if (dma_addr_base == DMA_ERROR_CODE) if (dma_addr_base == DMA_ERROR_CODE)
return -ENOMEM; return -ENOMEM;
...@@ -436,26 +436,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -436,26 +436,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
flags |= ZPCI_TABLE_PROTECTED; flags |= ZPCI_TABLE_PROTECTED;
for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) { for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
pa = page_to_phys(sg_page(s)) + s->offset; pa = page_to_phys(sg_page(s));
ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags); ret = __dma_update_trans(zdev, pa, dma_addr,
s->offset + s->length, flags);
if (ret) if (ret)
goto unmap; goto unmap;
dma_addr += s->length; dma_addr += s->offset + s->length;
} }
ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags); ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
if (ret) if (ret)
goto unmap; goto unmap;
*handle = dma_addr_base; *handle = dma_addr_base;
atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages); atomic64_add(nr_pages, &zdev->mapped_pages);
return ret; return ret;
unmap: unmap:
dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base, dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
ZPCI_PTE_INVALID); ZPCI_PTE_INVALID);
dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT); dma_free_address(dev, dma_addr_base, nr_pages);
zpci_err("map error:\n"); zpci_err("map error:\n");
zpci_err_dma(ret, pa); zpci_err_dma(ret, pa);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment