Commit 4c360ace authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu/dma: Factor out remapped pages lookup

Since we duplicate the find_vm_area() logic a few times in places where
we only care aboute the pages, factor out a helper to abstract it.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
[hch: don't warn when not finding a region, as we'll rely on that later]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 796a08cf
...@@ -554,6 +554,15 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, ...@@ -554,6 +554,15 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages; return pages;
} }
static struct page **__iommu_dma_get_pages(void *cpu_addr)
{
struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || !area->pages)
return NULL;
return area->pages;
}
/** /**
* iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc() * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
* @dev: Device which owns this buffer * @dev: Device which owns this buffer
...@@ -1042,11 +1051,11 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, ...@@ -1042,11 +1051,11 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
dma_common_free_remap(cpu_addr, size, VM_USERMAP); dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else if (is_vmalloc_addr(cpu_addr)){ } else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr); struct page **pages = __iommu_dma_get_pages(cpu_addr);
if (WARN_ON(!area || !area->pages)) if (!pages)
return; return;
__iommu_dma_free(dev, area->pages, iosize, &handle); __iommu_dma_free(dev, pages, iosize, &handle);
dma_common_free_remap(cpu_addr, size, VM_USERMAP); dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else { } else {
__iommu_dma_unmap(dev, handle, iosize); __iommu_dma_unmap(dev, handle, iosize);
...@@ -1078,7 +1087,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -1078,7 +1087,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
{ {
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff; unsigned long off = vma->vm_pgoff;
struct vm_struct *area; struct page **pages;
int ret; int ret;
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
...@@ -1103,11 +1112,10 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -1103,11 +1112,10 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return __iommu_dma_mmap_pfn(vma, pfn, size); return __iommu_dma_mmap_pfn(vma, pfn, size);
} }
area = find_vm_area(cpu_addr); pages = __iommu_dma_get_pages(cpu_addr);
if (WARN_ON(!area || !area->pages)) if (!pages)
return -ENXIO; return -ENXIO;
return __iommu_dma_mmap(pages, size, vma);
return __iommu_dma_mmap(area->pages, size, vma);
} }
static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page *page, static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page *page,
...@@ -1125,7 +1133,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, ...@@ -1125,7 +1133,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
unsigned long attrs) unsigned long attrs)
{ {
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct vm_struct *area = find_vm_area(cpu_addr); struct page **pages;
if (!is_vmalloc_addr(cpu_addr)) { if (!is_vmalloc_addr(cpu_addr)) {
struct page *page = virt_to_page(cpu_addr); struct page *page = virt_to_page(cpu_addr);
...@@ -1141,10 +1149,10 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, ...@@ -1141,10 +1149,10 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
return __iommu_dma_get_sgtable_page(sgt, page, size); return __iommu_dma_get_sgtable_page(sgt, page, size);
} }
if (WARN_ON(!area || !area->pages)) pages = __iommu_dma_get_pages(cpu_addr);
if (!pages)
return -ENXIO; return -ENXIO;
return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
GFP_KERNEL); GFP_KERNEL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment