Commit 7a5dbf3a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Joerg Roedel

iommu/amd: Remove the leftover of bypass support

The AMD iommu dma_ops are only attached on a per-device basis when an
actual translation is needed.  Remove the leftover bypass support which
in parts was already broken (e.g. it always returns 0 from ->map_sg).

Use the opportunity to remove a few local variables and move assignments
into the declaration line where they were previously separated by the
bypass check.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 83d18bdf
...@@ -2481,20 +2481,10 @@ static dma_addr_t map_page(struct device *dev, struct page *page, ...@@ -2481,20 +2481,10 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
unsigned long attrs) unsigned long attrs)
{ {
phys_addr_t paddr = page_to_phys(page) + offset; phys_addr_t paddr = page_to_phys(page) + offset;
struct protection_domain *domain; struct protection_domain *domain = get_domain(dev);
struct dma_ops_domain *dma_dom; struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
u64 dma_mask;
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL)
return (dma_addr_t)paddr;
else if (IS_ERR(domain))
return DMA_MAPPING_ERROR;
dma_mask = *dev->dma_mask;
dma_dom = to_dma_ops_domain(domain);
return __map_single(dev, dma_dom, paddr, size, dir, dma_mask); return __map_single(dev, dma_dom, paddr, size, dir, *dev->dma_mask);
} }
/* /*
...@@ -2503,14 +2493,8 @@ static dma_addr_t map_page(struct device *dev, struct page *page, ...@@ -2503,14 +2493,8 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
struct protection_domain *domain; struct protection_domain *domain = get_domain(dev);
struct dma_ops_domain *dma_dom; struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
domain = get_domain(dev);
if (IS_ERR(domain))
return;
dma_dom = to_dma_ops_domain(domain);
__unmap_single(dma_dom, dma_addr, size, dir); __unmap_single(dma_dom, dma_addr, size, dir);
} }
...@@ -2550,20 +2534,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -2550,20 +2534,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long attrs) unsigned long attrs)
{ {
int mapped_pages = 0, npages = 0, prot = 0, i; int mapped_pages = 0, npages = 0, prot = 0, i;
struct protection_domain *domain; struct protection_domain *domain = get_domain(dev);
struct dma_ops_domain *dma_dom; struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
struct scatterlist *s; struct scatterlist *s;
unsigned long address; unsigned long address;
u64 dma_mask; u64 dma_mask = *dev->dma_mask;
int ret; int ret;
domain = get_domain(dev);
if (IS_ERR(domain))
return 0;
dma_dom = to_dma_ops_domain(domain);
dma_mask = *dev->dma_mask;
npages = sg_num_pages(dev, sglist, nelems); npages = sg_num_pages(dev, sglist, nelems);
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
...@@ -2635,20 +2612,11 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -2635,20 +2612,11 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir, int nelems, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
struct protection_domain *domain; struct protection_domain *domain = get_domain(dev);
struct dma_ops_domain *dma_dom; struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
unsigned long startaddr;
int npages = 2;
domain = get_domain(dev);
if (IS_ERR(domain))
return;
startaddr = sg_dma_address(sglist) & PAGE_MASK;
dma_dom = to_dma_ops_domain(domain);
npages = sg_num_pages(dev, sglist, nelems);
__unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir); __unmap_single(dma_dom, sg_dma_address(sglist) & PAGE_MASK,
sg_num_pages(dev, sglist, nelems) << PAGE_SHIFT, dir);
} }
/* /*
...@@ -2659,16 +2627,11 @@ static void *alloc_coherent(struct device *dev, size_t size, ...@@ -2659,16 +2627,11 @@ static void *alloc_coherent(struct device *dev, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
u64 dma_mask = dev->coherent_dma_mask; u64 dma_mask = dev->coherent_dma_mask;
struct protection_domain *domain; struct protection_domain *domain = get_domain(dev);
struct dma_ops_domain *dma_dom; struct dma_ops_domain *dma_dom;
struct page *page; struct page *page;
domain = get_domain(dev); if (IS_ERR(domain))
if (PTR_ERR(domain) == -EINVAL) {
page = alloc_pages(flag, get_order(size));
*dma_addr = page_to_phys(page);
return page_address(page);
} else if (IS_ERR(domain))
return NULL; return NULL;
dma_dom = to_dma_ops_domain(domain); dma_dom = to_dma_ops_domain(domain);
...@@ -2714,22 +2677,13 @@ static void free_coherent(struct device *dev, size_t size, ...@@ -2714,22 +2677,13 @@ static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr, void *virt_addr, dma_addr_t dma_addr,
unsigned long attrs) unsigned long attrs)
{ {
struct protection_domain *domain; struct protection_domain *domain = get_domain(dev);
struct dma_ops_domain *dma_dom; struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
struct page *page; struct page *page = virt_to_page(virt_addr);
page = virt_to_page(virt_addr);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
domain = get_domain(dev);
if (IS_ERR(domain))
goto free_mem;
dma_dom = to_dma_ops_domain(domain);
__unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL); __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
free_mem:
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, get_order(size)); __free_pages(page, get_order(size));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment