Commit 1c4f88b7 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu/vt-d: Shared virtual address in scalable mode

This patch enables the current SVA (Shared Virtual Address)
implementation to work in the scalable mode.

Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: default avatarSanjay Kumar <sanjay.k.kumar@intel.com>
Signed-off-by: default avatarLiu Yi L <yi.l.liu@intel.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarAshok Raj <ashok.raj@intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 437f35e1
...@@ -5257,18 +5257,6 @@ static void intel_iommu_put_resv_regions(struct device *dev, ...@@ -5257,18 +5257,6 @@ static void intel_iommu_put_resv_regions(struct device *dev,
} }
#ifdef CONFIG_INTEL_IOMMU_SVM #ifdef CONFIG_INTEL_IOMMU_SVM
static inline unsigned long intel_iommu_get_pts(struct device *dev)
{
int pts, max_pasid;
max_pasid = intel_pasid_get_dev_max_id(dev);
pts = find_first_bit((unsigned long *)&max_pasid, MAX_NR_PASID_BITS);
if (pts < 5)
return 0;
return pts - 5;
}
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev) int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
{ {
struct device_domain_info *info; struct device_domain_info *info;
...@@ -5300,33 +5288,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd ...@@ -5300,33 +5288,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
sdev->sid = PCI_DEVID(info->bus, info->devfn); sdev->sid = PCI_DEVID(info->bus, info->devfn);
if (!(ctx_lo & CONTEXT_PASIDE)) { if (!(ctx_lo & CONTEXT_PASIDE)) {
if (iommu->pasid_state_table)
context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
context[1].lo = (u64)virt_to_phys(info->pasid_table->table) |
intel_iommu_get_pts(sdev->dev);
wmb();
/* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
* extended to permit requests-with-PASID if the PASIDE bit
* is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
* however, the PASIDE bit is ignored and requests-with-PASID
* are unconditionally blocked. Which makes less sense.
* So convert from CONTEXT_TT_PASS_THROUGH to one of the new
* "guest mode" translation types depending on whether ATS
* is available or not. Annoyingly, we can't use the new
* modes *unless* PASIDE is set. */
if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
ctx_lo &= ~CONTEXT_TT_MASK;
if (info->ats_supported)
ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
else
ctx_lo |= CONTEXT_TT_PT_PASID << 2;
}
ctx_lo |= CONTEXT_PASIDE; ctx_lo |= CONTEXT_PASIDE;
if (iommu->pasid_state_table)
ctx_lo |= CONTEXT_DINVE;
if (info->pri_supported)
ctx_lo |= CONTEXT_PRS;
context[0].lo = ctx_lo; context[0].lo = ctx_lo;
wmb(); wmb();
iommu->flush.flush_context(iommu, sdev->did, sdev->sid, iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
......
...@@ -286,7 +286,7 @@ static inline void pasid_clear_entry(struct pasid_entry *pe) ...@@ -286,7 +286,7 @@ static inline void pasid_clear_entry(struct pasid_entry *pe)
WRITE_ONCE(pe->val[7], 0); WRITE_ONCE(pe->val[7], 0);
} }
void intel_pasid_clear_entry(struct device *dev, int pasid) static void intel_pasid_clear_entry(struct device *dev, int pasid)
{ {
struct pasid_entry *pe; struct pasid_entry *pe;
......
...@@ -58,7 +58,6 @@ void intel_pasid_free_table(struct device *dev); ...@@ -58,7 +58,6 @@ void intel_pasid_free_table(struct device *dev);
struct pasid_table *intel_pasid_get_table(struct device *dev); struct pasid_table *intel_pasid_get_table(struct device *dev);
int intel_pasid_get_dev_max_id(struct device *dev); int intel_pasid_get_dev_max_id(struct device *dev);
struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid); struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid);
void intel_pasid_clear_entry(struct device *dev, int pasid);
int intel_pasid_setup_first_level(struct intel_iommu *iommu, int intel_pasid_setup_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd, struct device *dev, pgd_t *pgd,
int pasid, u16 did, int flags); int pasid, u16 did, int flags);
......
...@@ -29,10 +29,6 @@ ...@@ -29,10 +29,6 @@
#include "intel-pasid.h" #include "intel-pasid.h"
#define PASID_ENTRY_P BIT_ULL(0)
#define PASID_ENTRY_FLPM_5LP BIT_ULL(9)
#define PASID_ENTRY_SRE BIT_ULL(11)
static irqreturn_t prq_event_thread(int irq, void *d); static irqreturn_t prq_event_thread(int irq, void *d);
struct pasid_state_entry { struct pasid_state_entry {
...@@ -248,20 +244,6 @@ static void intel_invalidate_range(struct mmu_notifier *mn, ...@@ -248,20 +244,6 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0); (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
} }
static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev, int pasid)
{
struct qi_desc desc;
desc.qw0 = QI_PC_TYPE | QI_PC_DID(sdev->did) |
QI_PC_PASID_SEL | QI_PC_PASID(pasid);
desc.qw1 = 0;
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(&desc, svm->iommu);
}
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{ {
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
...@@ -281,8 +263,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -281,8 +263,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/ */
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) { list_for_each_entry_rcu(sdev, &svm->devs, list) {
intel_pasid_clear_entry(sdev->dev, svm->pasid); intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
intel_flush_pasid_dev(svm, sdev, svm->pasid);
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -301,11 +282,9 @@ static LIST_HEAD(global_svm_list); ...@@ -301,11 +282,9 @@ static LIST_HEAD(global_svm_list);
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops) int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
{ {
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
struct pasid_entry *entry;
struct intel_svm_dev *sdev; struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL; struct intel_svm *svm = NULL;
struct mm_struct *mm = NULL; struct mm_struct *mm = NULL;
u64 pasid_entry_val;
int pasid_max; int pasid_max;
int ret; int ret;
...@@ -414,22 +393,22 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -414,22 +393,22 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
kfree(sdev); kfree(sdev);
goto out; goto out;
} }
pasid_entry_val = (u64)__pa(mm->pgd) | PASID_ENTRY_P; }
} else
pasid_entry_val = (u64)__pa(init_mm.pgd) |
PASID_ENTRY_P | PASID_ENTRY_SRE;
if (cpu_feature_enabled(X86_FEATURE_LA57))
pasid_entry_val |= PASID_ENTRY_FLPM_5LP;
entry = intel_pasid_get_entry(dev, svm->pasid);
WRITE_ONCE(entry->val[0], pasid_entry_val);
/* spin_lock(&iommu->lock);
* Flush PASID cache when a PASID table entry becomes ret = intel_pasid_setup_first_level(iommu, dev,
* present. mm ? mm->pgd : init_mm.pgd,
*/ svm->pasid, FLPT_DEFAULT_DID,
if (cap_caching_mode(iommu->cap)) mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
intel_flush_pasid_dev(svm, sdev, svm->pasid); spin_unlock(&iommu->lock);
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
intel_pasid_free_id(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
}
list_add_tail(&svm->list, &global_svm_list); list_add_tail(&svm->list, &global_svm_list);
} }
...@@ -475,10 +454,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) ...@@ -475,10 +454,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
* to use. We have a *shared* PASID table, because it's * to use. We have a *shared* PASID table, because it's
* large and has to be physically contiguous. So it's * large and has to be physically contiguous. So it's
* hard to be as defensive as we might like. */ * hard to be as defensive as we might like. */
intel_flush_pasid_dev(svm, sdev, svm->pasid); intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
kfree_rcu(sdev, rcu); kfree_rcu(sdev, rcu);
intel_pasid_clear_entry(dev, svm->pasid);
if (list_empty(&svm->devs)) { if (list_empty(&svm->devs)) {
intel_pasid_free_id(svm->pasid); intel_pasid_free_id(svm->pasid);
......
...@@ -54,14 +54,7 @@ ...@@ -54,14 +54,7 @@
#define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1 #define CONTEXT_TT_DEV_IOTLB 1
#define CONTEXT_TT_PASS_THROUGH 2 #define CONTEXT_TT_PASS_THROUGH 2
/* Extended context entry types */ #define CONTEXT_PASIDE BIT_ULL(3)
#define CONTEXT_TT_PT_PASID 4
#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
#define CONTEXT_TT_MASK (7ULL << 2)
#define CONTEXT_DINVE (1ULL << 8)
#define CONTEXT_PRS (1ULL << 9)
#define CONTEXT_PASIDE (1ULL << 11)
/* /*
* Intel IOMMU register specification per version 1.0 public spec. * Intel IOMMU register specification per version 1.0 public spec.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment