Commit b468620f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Ingo Molnar

iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and dma_direct_{alloc,free}()

This cleans up the code a lot by removing duplicate logic.
Tested-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Tested-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarJoerg Roedel <jroedel@suse.de>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Jon Mason <jdmason@kudzu.us>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Muli Ben-Yehuda <mulix@mulix.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: iommu@lists.linux-foundation.org
Link: http://lkml.kernel.org/r/20180319103826.12853-8-hch@lst.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 51c7eeba
......@@ -107,6 +107,7 @@ config IOMMU_PGTABLES_L2
# AMD IOMMU support
config AMD_IOMMU
bool "AMD IOMMU support"
select DMA_DIRECT_OPS
select SWIOTLB
select PCI_MSI
select PCI_ATS
......
......@@ -2600,51 +2600,32 @@ static void *alloc_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
u64 dma_mask = dev->coherent_dma_mask;
struct protection_domain *domain;
struct dma_ops_domain *dma_dom;
struct page *page;
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL) {
page = alloc_pages(flag, get_order(size));
*dma_addr = page_to_phys(page);
return page_address(page);
} else if (IS_ERR(domain))
return NULL;
struct protection_domain *domain = get_domain(dev);
bool is_direct = false;
void *virt_addr;
dma_dom = to_dma_ops_domain(domain);
size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
flag |= __GFP_ZERO;
page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
if (!page) {
if (!gfpflags_allow_blocking(flag))
return NULL;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size), flag);
if (!page)
if (IS_ERR(domain)) {
if (PTR_ERR(domain) != -EINVAL)
return NULL;
is_direct = true;
}
virt_addr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
if (!virt_addr || is_direct)
return virt_addr;
if (!dma_mask)
dma_mask = *dev->dma_mask;
*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
size, DMA_BIDIRECTIONAL, dma_mask);
*dma_addr = __map_single(dev, to_dma_ops_domain(domain),
virt_to_phys(virt_addr), PAGE_ALIGN(size),
DMA_BIDIRECTIONAL, dma_mask);
if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
goto out_free;
return page_address(page);
return virt_addr;
out_free:
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, get_order(size));
dma_direct_free(dev, size, virt_addr, *dma_addr, attrs);
return NULL;
}
......@@ -2655,24 +2636,17 @@ static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr,
unsigned long attrs)
{
struct protection_domain *domain;
struct dma_ops_domain *dma_dom;
struct page *page;
struct protection_domain *domain = get_domain(dev);
page = virt_to_page(virt_addr);
size = PAGE_ALIGN(size);
domain = get_domain(dev);
if (IS_ERR(domain))
goto free_mem;
if (!IS_ERR(domain)) {
struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
dma_dom = to_dma_ops_domain(domain);
__unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
__unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
}
free_mem:
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, get_order(size));
dma_direct_free(dev, size, virt_addr, dma_addr, attrs);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment