Commit ee877b81 authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky

s390/pci_dma: improve map_sg

Our map_sg implementation mapped sg entries independently of each other.
For ease of use and possible performance improvements this patch changes
the implementation to try to map as many (likely physically non-contiguous)
sglist entries as possible into a contiguous DMA segment.
Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Reviewed-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 8cb63b78
...@@ -388,37 +388,94 @@ static void s390_dma_free(struct device *dev, size_t size, ...@@ -388,37 +388,94 @@ static void s390_dma_free(struct device *dev, size_t size,
free_pages((unsigned long) pa, get_order(size)); free_pages((unsigned long) pa, get_order(size));
} }
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, /* Map a segment into a contiguous dma address area */
int nr_elements, enum dma_data_direction dir, static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long attrs) size_t size, dma_addr_t *handle,
enum dma_data_direction dir)
{ {
int mapped_elements = 0; struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID;
struct scatterlist *s; struct scatterlist *s;
int i; unsigned long pa;
int ret;
for_each_sg(sg, s, nr_elements, i) { size = PAGE_ALIGN(size);
struct page *page = sg_page(s); dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
s->dma_address = s390_dma_map_pages(dev, page, s->offset, if (dma_addr_base == DMA_ERROR_CODE)
s->length, dir, 0); return -ENOMEM;
if (!dma_mapping_error(dev, s->dma_address)) {
s->dma_length = s->length; dma_addr = dma_addr_base;
mapped_elements++; if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
} else flags |= ZPCI_TABLE_PROTECTED;
for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
pa = page_to_phys(sg_page(s)) + s->offset;
ret = dma_update_trans(zdev, pa, dma_addr, s->length, flags);
if (ret)
goto unmap; goto unmap;
dma_addr += s->length;
} }
out: *handle = dma_addr_base;
return mapped_elements; atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
return ret;
unmap: unmap:
for_each_sg(sg, s, mapped_elements, i) { dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
if (s->dma_address) ZPCI_PTE_INVALID);
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
dir, 0); zpci_err("map error:\n");
s->dma_address = 0; zpci_err_dma(ret, pa);
return ret;
}
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s = sg, *start = sg, *dma = sg;
unsigned int max = dma_get_max_seg_size(dev);
unsigned int size = s->offset + s->length;
unsigned int offset = s->offset;
int count = 0, i;
for (i = 1; i < nr_elements; i++) {
s = sg_next(s);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0; s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) ||
size + s->length > max) {
if (__s390_dma_map_sg(dev, start, size,
&dma->dma_address, dir))
goto unmap;
dma->dma_address += offset;
dma->dma_length = size - offset;
size = offset = s->offset;
start = s;
dma = sg_next(dma);
count++;
} }
mapped_elements = 0; size += s->length;
goto out; }
if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
goto unmap;
dma->dma_address += offset;
dma->dma_length = size - offset;
return count + 1;
unmap:
for_each_sg(sg, s, count, i)
s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
dir, attrs);
return 0;
} }
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
...@@ -429,8 +486,9 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, ...@@ -429,8 +486,9 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int i; int i;
for_each_sg(sg, s, nr_elements, i) { for_each_sg(sg, s, nr_elements, i) {
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, if (s->dma_length)
0); s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
dir, attrs);
s->dma_address = 0; s->dma_address = 0;
s->dma_length = 0; s->dma_length = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment