Commit 6fc7020c authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu/vt-d: Apply per-device dma_ops

Current Intel IOMMU driver sets the system level dma_ops. This causes
each dma API to go through the IOMMU driver even the devices are using
identity mapped domains. This sets per-device dma_ops only if a device
is using a DMA domain. Otherwise, use the default system level dma_ops
for direct dma.
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Tested-by: default avatarDaniel Drake <drake@endlessm.com>
Reviewed-by: default avatarJon Derrick <jonathan.derrick@intel.com>
Reviewed-by: default avatarJerry Snitselaar <jsnitsel@redhat.com>
Link: https://lore.kernel.org/r/20200506015947.28662-4-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 14b3526d
...@@ -2720,17 +2720,6 @@ static int __init si_domain_init(int hw) ...@@ -2720,17 +2720,6 @@ static int __init si_domain_init(int hw)
return 0; return 0;
} }
static int identity_mapping(struct device *dev)
{
struct device_domain_info *info;
info = dev->archdata.iommu;
if (info)
return (info->domain == si_domain);
return 0;
}
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{ {
struct dmar_domain *ndomain; struct dmar_domain *ndomain;
...@@ -3315,18 +3304,6 @@ static unsigned long intel_alloc_iova(struct device *dev, ...@@ -3315,18 +3304,6 @@ static unsigned long intel_alloc_iova(struct device *dev,
return iova_pfn; return iova_pfn;
} }
/* Check if the dev needs to go through non-identity map and unmap process.*/
static bool iommu_need_mapping(struct device *dev)
{
if (iommu_dummy(dev))
return false;
if (unlikely(attach_deferred(dev)))
do_deferred_attach(dev);
return !identity_mapping(dev);
}
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask) size_t size, int dir, u64 dma_mask)
{ {
...@@ -3340,6 +3317,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, ...@@ -3340,6 +3317,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (unlikely(attach_deferred(dev)))
do_deferred_attach(dev);
domain = find_domain(dev); domain = find_domain(dev);
if (!domain) if (!domain)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
...@@ -3391,20 +3371,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page, ...@@ -3391,20 +3371,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
if (iommu_need_mapping(dev))
return __intel_map_single(dev, page_to_phys(page) + offset, return __intel_map_single(dev, page_to_phys(page) + offset,
size, dir, *dev->dma_mask); size, dir, *dev->dma_mask);
return dma_direct_map_page(dev, page, offset, size, dir, attrs);
} }
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr, static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
if (iommu_need_mapping(dev)) return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
return __intel_map_single(dev, phys_addr, size, dir,
*dev->dma_mask);
return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
} }
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
...@@ -3455,16 +3430,12 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ...@@ -3455,16 +3430,12 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
if (iommu_need_mapping(dev))
intel_unmap(dev, dev_addr, size); intel_unmap(dev, dev_addr, size);
else
dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
} }
static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr, static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
if (iommu_need_mapping(dev))
intel_unmap(dev, dev_addr, size); intel_unmap(dev, dev_addr, size);
} }
...@@ -3475,8 +3446,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, ...@@ -3475,8 +3446,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL; struct page *page = NULL;
int order; int order;
if (!iommu_need_mapping(dev)) if (unlikely(attach_deferred(dev)))
return dma_direct_alloc(dev, size, dma_handle, flags, attrs); do_deferred_attach(dev);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
...@@ -3511,9 +3482,6 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, ...@@ -3511,9 +3482,6 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order; int order;
struct page *page = virt_to_page(vaddr); struct page *page = virt_to_page(vaddr);
if (!iommu_need_mapping(dev))
return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
...@@ -3531,9 +3499,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -3531,9 +3499,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
if (!iommu_need_mapping(dev))
return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
for_each_sg(sglist, sg, nelems, i) { for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg)); nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
} }
...@@ -3557,8 +3522,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ...@@ -3557,8 +3522,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct intel_iommu *iommu; struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs); if (unlikely(attach_deferred(dev)))
do_deferred_attach(dev);
domain = find_domain(dev); domain = find_domain(dev);
if (!domain) if (!domain)
...@@ -3605,8 +3571,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ...@@ -3605,8 +3571,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
static u64 intel_get_required_mask(struct device *dev) static u64 intel_get_required_mask(struct device *dev)
{ {
if (!iommu_need_mapping(dev))
return dma_direct_get_required_mask(dev);
return DMA_BIT_MASK(32); return DMA_BIT_MASK(32);
} }
...@@ -4888,8 +4852,6 @@ int __init intel_iommu_init(void) ...@@ -4888,8 +4852,6 @@ int __init intel_iommu_init(void)
} }
up_write(&dmar_global_lock); up_write(&dmar_global_lock);
dma_ops = &intel_dma_ops;
init_iommu_pm_ops(); init_iommu_pm_ops();
down_read(&dmar_global_lock); down_read(&dmar_global_lock);
...@@ -5479,11 +5441,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) ...@@ -5479,11 +5441,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
if (translation_pre_enabled(iommu)) if (translation_pre_enabled(iommu))
dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO; dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
if (device_needs_bounce(dev)) {
dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
set_dma_ops(dev, &bounce_dma_ops);
}
return &iommu->iommu; return &iommu->iommu;
} }
...@@ -5498,7 +5455,19 @@ static void intel_iommu_release_device(struct device *dev) ...@@ -5498,7 +5455,19 @@ static void intel_iommu_release_device(struct device *dev)
dmar_remove_one_dev_info(dev); dmar_remove_one_dev_info(dev);
set_dma_ops(dev, NULL);
}
static void intel_iommu_probe_finalize(struct device *dev)
{
struct iommu_domain *domain;
domain = iommu_get_domain_for_dev(dev);
if (device_needs_bounce(dev)) if (device_needs_bounce(dev))
set_dma_ops(dev, &bounce_dma_ops);
else if (domain && domain->type == IOMMU_DOMAIN_DMA)
set_dma_ops(dev, &intel_dma_ops);
else
set_dma_ops(dev, NULL); set_dma_ops(dev, NULL);
} }
...@@ -5830,6 +5799,7 @@ const struct iommu_ops intel_iommu_ops = { ...@@ -5830,6 +5799,7 @@ const struct iommu_ops intel_iommu_ops = {
.unmap = intel_iommu_unmap, .unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys, .iova_to_phys = intel_iommu_iova_to_phys,
.probe_device = intel_iommu_probe_device, .probe_device = intel_iommu_probe_device,
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device, .release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions, .get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment