Commit b371ddb9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v5.5-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Fix kmemleak warning in IOVA code

 - Fix compile warnings on ARM32/64 in dma-iommu code due to dma_mask
   type mismatches

 - Make ISA reserved regions relaxable, so that VFIO can assign devices
   which have such regions defined

 - Fix mapping errors resulting in IO page-faults in the VT-d driver

 - Make sure direct mappings for a domain are created after the default
   domain is updated

 - Map ISA reserved regions in the VT-d driver with correct permissions

 - Remove unneeded check for PSI capability in the IOTLB flush code of
   the VT-d driver

 - Lockdep fix iommu_dma_prepare_msi()

* tag 'iommu-fixes-v5.5-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/dma: Relax locking in iommu_dma_prepare_msi()
  iommu/vt-d: Remove incorrect PSI capability check
  iommu/vt-d: Allocate reserved region for ISA with correct permission
  iommu: set group default domain before creating direct mappings
  iommu/vt-d: Fix dmar pte read access not set error
  iommu/vt-d: Set ISA bridge reserved region as relaxable
  iommu/dma: Rationalise types for DMA masks
  iommu/iova: Init the struct iova to fix the possible memleak
parents fce34dec c1864790
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/iova.h> #include <linux/iova.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -44,7 +45,6 @@ struct iommu_dma_cookie { ...@@ -44,7 +45,6 @@ struct iommu_dma_cookie {
dma_addr_t msi_iova; dma_addr_t msi_iova;
}; };
struct list_head msi_page_list; struct list_head msi_page_list;
spinlock_t msi_lock;
/* Domain for flush queue callback; NULL if flush queue not in use */ /* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain; struct iommu_domain *fq_domain;
...@@ -63,7 +63,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) ...@@ -63,7 +63,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (cookie) { if (cookie) {
spin_lock_init(&cookie->msi_lock);
INIT_LIST_HEAD(&cookie->msi_page_list); INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type; cookie->type = type;
} }
...@@ -399,7 +398,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, ...@@ -399,7 +398,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
} }
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
size_t size, dma_addr_t dma_limit, struct device *dev) size_t size, u64 dma_limit, struct device *dev)
{ {
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad; struct iova_domain *iovad = &cookie->iovad;
...@@ -424,7 +423,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, ...@@ -424,7 +423,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
if (domain->geometry.force_aperture) if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end); dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */ /* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
...@@ -477,7 +476,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, ...@@ -477,7 +476,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
} }
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, dma_addr_t dma_mask) size_t size, int prot, u64 dma_mask)
{ {
struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
...@@ -1176,7 +1175,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, ...@@ -1176,7 +1175,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (msi_page->phys == msi_addr) if (msi_page->phys == msi_addr)
return msi_page; return msi_page;
msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
if (!msi_page) if (!msi_page)
return NULL; return NULL;
...@@ -1206,7 +1205,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) ...@@ -1206,7 +1205,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_cookie *cookie; struct iommu_dma_cookie *cookie;
struct iommu_dma_msi_page *msi_page; struct iommu_dma_msi_page *msi_page;
unsigned long flags; static DEFINE_MUTEX(msi_prepare_lock); /* see below */
if (!domain || !domain->iova_cookie) { if (!domain || !domain->iova_cookie) {
desc->iommu_cookie = NULL; desc->iommu_cookie = NULL;
...@@ -1216,13 +1215,13 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) ...@@ -1216,13 +1215,13 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
cookie = domain->iova_cookie; cookie = domain->iova_cookie;
/* /*
* We disable IRQs to rule out a possible inversion against * In fact the whole prepare operation should already be serialised by
* irq_desc_lock if, say, someone tries to retarget the affinity * irq_domain_mutex further up the callchain, but that's pretty subtle
* of an MSI from within an IPI handler. * on its own, so consider this locking as failsafe documentation...
*/ */
spin_lock_irqsave(&cookie->msi_lock, flags); mutex_lock(&msi_prepare_lock);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
spin_unlock_irqrestore(&cookie->msi_lock, flags); mutex_unlock(&msi_prepare_lock);
msi_desc_set_iommu_cookie(desc, msi_page); msi_desc_set_iommu_cookie(desc, msi_page);
......
...@@ -5478,9 +5478,6 @@ static int intel_iommu_map(struct iommu_domain *domain, ...@@ -5478,9 +5478,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
int prot = 0; int prot = 0;
int ret; int ret;
if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
return -EINVAL;
if (iommu_prot & IOMMU_READ) if (iommu_prot & IOMMU_READ)
prot |= DMA_PTE_READ; prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE) if (iommu_prot & IOMMU_WRITE)
...@@ -5523,8 +5520,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, ...@@ -5523,8 +5520,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
/* Cope with horrid API which requires us to unmap more than the /* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */ size argument if it happens to be a large-page mapping. */
BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)); BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
return 0;
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level); size = VTD_PAGE_SIZE << level_to_offset_bits(level);
...@@ -5556,9 +5551,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -5556,9 +5551,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
int level = 0; int level = 0;
u64 phys = 0; u64 phys = 0;
if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
return 0;
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
if (pte) if (pte)
phys = dma_pte_addr(pte); phys = dma_pte_addr(pte);
...@@ -5736,8 +5728,8 @@ static void intel_iommu_get_resv_regions(struct device *device, ...@@ -5736,8 +5728,8 @@ static void intel_iommu_get_resv_regions(struct device *device,
struct pci_dev *pdev = to_pci_dev(device); struct pci_dev *pdev = to_pci_dev(device);
if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
reg = iommu_alloc_resv_region(0, 1UL << 24, 0, reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
IOMMU_RESV_DIRECT); IOMMU_RESV_DIRECT_RELAXABLE);
if (reg) if (reg)
list_add_tail(&reg->list, head); list_add_tail(&reg->list, head);
} }
......
...@@ -104,11 +104,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d ...@@ -104,11 +104,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
{ {
struct qi_desc desc; struct qi_desc desc;
/* if (pages == -1) {
* Do PASID granu IOTLB invalidation if page selective capability is
* not available.
*/
if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
QI_EIOTLB_DID(sdev->did) | QI_EIOTLB_DID(sdev->did) |
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
......
...@@ -2282,13 +2282,13 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) ...@@ -2282,13 +2282,13 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
goto out; goto out;
} }
iommu_group_create_direct_mappings(group, dev);
/* Make the domain the default for this group */ /* Make the domain the default for this group */
if (group->default_domain) if (group->default_domain)
iommu_domain_free(group->default_domain); iommu_domain_free(group->default_domain);
group->default_domain = domain; group->default_domain = domain;
iommu_group_create_direct_mappings(group, dev);
dev_info(dev, "Using iommu %s mapping\n", dev_info(dev, "Using iommu %s mapping\n",
type == IOMMU_DOMAIN_DMA ? "dma" : "direct"); type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
......
...@@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex); ...@@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
struct iova *alloc_iova_mem(void) struct iova *alloc_iova_mem(void)
{ {
return kmem_cache_alloc(iova_cache, GFP_ATOMIC); return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
} }
EXPORT_SYMBOL(alloc_iova_mem); EXPORT_SYMBOL(alloc_iova_mem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment