Commit 8744daf4 authored by Jacob Pan's avatar Jacob Pan Committed by Joerg Roedel

iommu/vt-d: Remove global page flush support

Global pages support is removed from VT-d spec 3.0. Since global pages G
flag only affects first-level paging structures and because DMA request
with PASID are only supported by VT-d spec. 3.0 and onward, we can
safely remove global pages support.

For kernel shared virtual address IOTLB invalidation, PASID
granularity and page selective within PASID will be used. There is
no global granularity supported. Without this fix, IOTLB invalidation
will cause invalid descriptor error in the queued invalidation (QI)
interface.

Fixes: 1c4f88b7 ("iommu/vt-d: Shared virtual address in scalable mode")
Reported-by: default avatarSanjay K Kumar <sanjay.k.kumar@intel.com>
Signed-off-by: default avatarJacob Pan <jacob.jun.pan@linux.intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 0ce4a85f
...@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) ...@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
} }
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
unsigned long address, unsigned long pages, int ih, int gl) unsigned long address, unsigned long pages, int ih)
{ {
struct qi_desc desc; struct qi_desc desc;
if (pages == -1) { /*
/* For global kernel pages we have to flush them in *all* PASIDs * Do PASID granu IOTLB invalidation if page selective capability is
* because that's the only option the hardware gives us. Despite * not available.
* the fact that they are actually only accessible through one. */ */
if (gl) if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
QI_EIOTLB_DID(sdev->did) | QI_EIOTLB_DID(sdev->did) |
QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
QI_EIOTLB_TYPE; QI_EIOTLB_TYPE;
else
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
QI_EIOTLB_DID(sdev->did) |
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = 0; desc.qw1 = 0;
} else { } else {
int mask = ilog2(__roundup_pow_of_two(pages)); int mask = ilog2(__roundup_pow_of_two(pages));
...@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d ...@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
QI_EIOTLB_TYPE; QI_EIOTLB_TYPE;
desc.qw1 = QI_EIOTLB_ADDR(address) | desc.qw1 = QI_EIOTLB_ADDR(address) |
QI_EIOTLB_GL(gl) |
QI_EIOTLB_IH(ih) | QI_EIOTLB_IH(ih) |
QI_EIOTLB_AM(mask); QI_EIOTLB_AM(mask);
} }
...@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d ...@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
} }
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
unsigned long pages, int ih, int gl) unsigned long pages, int ih)
{ {
struct intel_svm_dev *sdev; struct intel_svm_dev *sdev;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) list_for_each_entry_rcu(sdev, &svm->devs, list)
intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn, ...@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
intel_flush_svm_range(svm, start, intel_flush_svm_range(svm, start,
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0); (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
} }
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
...@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) { list_for_each_entry_rcu(sdev, &svm->devs, list) {
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid); intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) ...@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
* large and has to be physically contiguous. So it's * large and has to be physically contiguous. So it's
* hard to be as defensive as we might like. */ * hard to be as defensive as we might like. */
intel_pasid_tear_down_entry(iommu, dev, svm->pasid); intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
kfree_rcu(sdev, rcu); kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) { if (list_empty(&svm->devs)) {
......
...@@ -346,7 +346,6 @@ enum { ...@@ -346,7 +346,6 @@ enum {
#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) #define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
#define QI_EIOTLB_AM(am) (((u64)am)) #define QI_EIOTLB_AM(am) (((u64)am))
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
...@@ -378,8 +377,6 @@ enum { ...@@ -378,8 +377,6 @@ enum {
#define QI_RESP_INVALID 0x1 #define QI_RESP_INVALID 0x1
#define QI_RESP_FAILURE 0xf #define QI_RESP_FAILURE 0xf
#define QI_GRAN_ALL_ALL 0
#define QI_GRAN_NONG_ALL 1
#define QI_GRAN_NONG_PASID 2 #define QI_GRAN_NONG_PASID 2
#define QI_GRAN_PSI_PASID 3 #define QI_GRAN_PSI_PASID 3
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment