Commit 070c95d4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v5.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - AMD IOMMU fix for sg-mapping with sg->offset > PAGE_SIZE

 - Fix for IOVA code to trigger the slow-path less often

 - Two fixes for Intel VT-d to avoid writing to read-only registers and
   to flush the right domain id for the default domains in scalable mode

* tag 'iommu-fixes-v5.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Save the right domain ID used by hardware
  iommu/vt-d: Check capability before disabling protected memory
  iommu/iova: Fix tracking of recently failed iova address
  iommu/amd: fix sg->dma_address for sg->offset bigger than PAGE_SIZE
parents dcacc486 84c11e4d
......@@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
/* Everything is mapped - write the right values into s->dma_address */
for_each_sg(sglist, s, nelems, i) {
s->dma_address += address + s->offset;
/*
* Add in the remaining piece of the scatter-gather offset that
* was masked out when we were determining the physical address
* via (sg_phys(s) & PAGE_MASK) earlier.
*/
s->dma_address += address + (s->offset & ~PAGE_MASK);
s->dma_length = s->length;
}
......
......@@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
u32 pmen;
unsigned long flags;
if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
return;
raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
......@@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
ctx_lo = context[0].lo;
sdev->did = domain->iommu_did[iommu->seq_id];
sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
if (!(ctx_lo & CONTEXT_PASIDE)) {
......
......@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);
if (limit_pfn < size || new_pfn < iovad->start_pfn)
if (limit_pfn < size || new_pfn < iovad->start_pfn) {
iovad->max32_alloc_size = size;
goto iova32_full;
}
/* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = new_pfn;
......@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
return 0;
iova32_full:
iovad->max32_alloc_size = size;
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment