Commit 9cc0c2af authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Joerg Roedel

iommu/vt-d: Use dma_direct for bypass devices

The intel-iommu driver currently has a partial reimplementation
of the direct mapping code for devices that use pass through
mode.  Replace that code with calls to the relevant dma_direct
routines at the highest level.  This means we have exactly the
same behvior as the dma direct code itself, and can prepare for
eventually only attaching the intel_iommu ops to devices that
actually need dynamic iommu mappings.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 48b2c937
...@@ -3657,9 +3657,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, ...@@ -3657,9 +3657,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (!iommu_need_mapping(dev))
return paddr;
domain = get_valid_domain_for_dev(dev); domain = get_valid_domain_for_dev(dev);
if (!domain) if (!domain)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
...@@ -3708,15 +3705,20 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page, ...@@ -3708,15 +3705,20 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
return __intel_map_single(dev, page_to_phys(page) + offset, size, if (iommu_need_mapping(dev))
dir, *dev->dma_mask); return __intel_map_single(dev, page_to_phys(page) + offset,
size, dir, *dev->dma_mask);
return dma_direct_map_page(dev, page, offset, size, dir, attrs);
} }
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr, static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask); if (iommu_need_mapping(dev))
return __intel_map_single(dev, phys_addr, size, dir,
*dev->dma_mask);
return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
} }
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
...@@ -3728,9 +3730,6 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) ...@@ -3728,9 +3730,6 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
struct intel_iommu *iommu; struct intel_iommu *iommu;
struct page *freelist; struct page *freelist;
if (!iommu_need_mapping(dev))
return;
domain = find_domain(dev); domain = find_domain(dev);
BUG_ON(!domain); BUG_ON(!domain);
...@@ -3766,7 +3765,17 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ...@@ -3766,7 +3765,17 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
intel_unmap(dev, dev_addr, size); if (iommu_need_mapping(dev))
intel_unmap(dev, dev_addr, size);
else
dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
}
static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
if (iommu_need_mapping(dev))
intel_unmap(dev, dev_addr, size);
} }
static void *intel_alloc_coherent(struct device *dev, size_t size, static void *intel_alloc_coherent(struct device *dev, size_t size,
...@@ -3776,28 +3785,18 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, ...@@ -3776,28 +3785,18 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL; struct page *page = NULL;
int order; int order;
if (!iommu_need_mapping(dev))
return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
flags &= ~(GFP_DMA | GFP_DMA32);
if (iommu_need_mapping(dev))
flags &= ~(GFP_DMA | GFP_DMA32);
else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
flags |= GFP_DMA;
else
flags |= GFP_DMA32;
}
if (gfpflags_allow_blocking(flags)) { if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT; unsigned int count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order, page = dma_alloc_from_contiguous(dev, count, order,
flags & __GFP_NOWARN); flags & __GFP_NOWARN);
if (page && !iommu_need_mapping(dev) &&
page_to_phys(page) + size > dev->coherent_dma_mask) {
dma_release_from_contiguous(dev, page, count);
page = NULL;
}
} }
if (!page) if (!page)
...@@ -3823,6 +3822,9 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, ...@@ -3823,6 +3822,9 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order; int order;
struct page *page = virt_to_page(vaddr); struct page *page = virt_to_page(vaddr);
if (!iommu_need_mapping(dev))
return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
...@@ -3840,6 +3842,9 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -3840,6 +3842,9 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
if (!iommu_need_mapping(dev))
return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
for_each_sg(sglist, sg, nelems, i) { for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg)); nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
} }
...@@ -3847,20 +3852,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -3847,20 +3852,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
} }
static int intel_nontranslate_map_sg(struct device *hddev,
struct scatterlist *sglist, int nelems, int dir)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg_page(sg));
sg->dma_address = sg_phys(sg);
sg->dma_length = sg->length;
}
return nelems;
}
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
...@@ -3876,7 +3867,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ...@@ -3876,7 +3867,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (!iommu_need_mapping(dev)) if (!iommu_need_mapping(dev))
return intel_nontranslate_map_sg(dev, sglist, nelems, dir); return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = get_valid_domain_for_dev(dev); domain = get_valid_domain_for_dev(dev);
if (!domain) if (!domain)
...@@ -3926,7 +3917,7 @@ static const struct dma_map_ops intel_dma_ops = { ...@@ -3926,7 +3917,7 @@ static const struct dma_map_ops intel_dma_ops = {
.map_page = intel_map_page, .map_page = intel_map_page,
.unmap_page = intel_unmap_page, .unmap_page = intel_unmap_page,
.map_resource = intel_map_resource, .map_resource = intel_map_resource,
.unmap_resource = intel_unmap_page, .unmap_resource = intel_unmap_resource,
.dma_supported = dma_direct_supported, .dma_supported = dma_direct_supported,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment