Commit a1ddcbe9 authored by Joerg Roedel's avatar Joerg Roedel

iommu/vt-d: Pass dmar_domain directly into iommu_flush_iotlb_psi

This function can figure out the domain-id to use itself
from the iommu_did array. This is more reliable over
different domain types and brings us one step further to
remove the domain->id field.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent de24e553
...@@ -1471,11 +1471,14 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, ...@@ -1471,11 +1471,14 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
} }
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
unsigned long pfn, unsigned int pages, int ih, int map) struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
int ih, int map)
{ {
unsigned int mask = ilog2(__roundup_pow_of_two(pages)); unsigned int mask = ilog2(__roundup_pow_of_two(pages));
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
u16 did = domain->iommu_did[iommu->seq_id];
BUG_ON(pages == 0); BUG_ON(pages == 0);
...@@ -3422,7 +3425,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, ...@@ -3422,7 +3425,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */ /* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap)) if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1); iommu_flush_iotlb_psi(iommu, domain,
mm_to_dma_pfn(iova->pfn_lo),
size, 0, 1);
else else
iommu_flush_write_buffer(iommu); iommu_flush_write_buffer(iommu);
...@@ -3473,7 +3478,7 @@ static void flush_unmaps(void) ...@@ -3473,7 +3478,7 @@ static void flush_unmaps(void)
/* On real hardware multiple invalidations are expensive */ /* On real hardware multiple invalidations are expensive */
if (cap_caching_mode(iommu->cap)) if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain->id, iommu_flush_iotlb_psi(iommu, domain,
iova->pfn_lo, iova_size(iova), iova->pfn_lo, iova_size(iova),
!deferred_flush[i].freelist[j], 0); !deferred_flush[i].freelist[j], 0);
else { else {
...@@ -3557,7 +3562,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr) ...@@ -3557,7 +3562,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
freelist = domain_unmap(domain, start_pfn, last_pfn); freelist = domain_unmap(domain, start_pfn, last_pfn);
if (intel_iommu_strict) { if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, iommu_flush_iotlb_psi(iommu, domain, start_pfn,
last_pfn - start_pfn + 1, !freelist, 0); last_pfn - start_pfn + 1, !freelist, 0);
/* free iova */ /* free iova */
__free_iova(&domain->iovad, iova); __free_iova(&domain->iovad, iova);
...@@ -3715,7 +3720,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ...@@ -3715,7 +3720,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
/* it's a non-present to present mapping. Only flush if caching mode */ /* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap)) if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1); iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
else else
iommu_flush_write_buffer(iommu); iommu_flush_write_buffer(iommu);
...@@ -4421,7 +4426,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb, ...@@ -4421,7 +4426,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
rcu_read_lock(); rcu_read_lock();
for_each_active_iommu(iommu, drhd) for_each_active_iommu(iommu, drhd)
iommu_flush_iotlb_psi(iommu, si_domain->id, iommu_flush_iotlb_psi(iommu, si_domain,
iova->pfn_lo, iova_size(iova), iova->pfn_lo, iova_size(iova),
!freelist, 0); !freelist, 0);
rcu_read_unlock(); rcu_read_unlock();
...@@ -4872,17 +4877,18 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, ...@@ -4872,17 +4877,18 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
npages = last_pfn - start_pfn + 1; npages = last_pfn - start_pfn + 1;
for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) { for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
iommu = g_iommus[iommu_id]; iommu = g_iommus[iommu_id];
/* /*
* find bit position of dmar_domain * find bit position of dmar_domain
*/ */
ndomains = cap_ndoms(iommu->cap); ndomains = cap_ndoms(iommu->cap);
for_each_set_bit(num, iommu->domain_ids, ndomains) { for_each_set_bit(num, iommu->domain_ids, ndomains) {
if (get_iommu_domain(iommu, num) == dmar_domain) if (get_iommu_domain(iommu, num) == dmar_domain)
iommu_flush_iotlb_psi(iommu, num, start_pfn, iommu_flush_iotlb_psi(iommu, dmar_domain,
npages, !freelist, 0); start_pfn, npages,
} !freelist, 0);
}
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment