Commit 4c82b886 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu/vt-d: Allocate/register iopf queue for sva devices

This allocates and registers the iopf queue infrastructure for devices
which want to support IO page fault for SVA.
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20210520031531.712333-1-baolu.lu@linux.intel.com
Link: https://lore.kernel.org/r/20210610020115.1637656-11-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent ae7f09b1
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <asm/iommu.h> #include <asm/iommu.h>
#include "../irq_remapping.h" #include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "pasid.h" #include "pasid.h"
#include "cap_audit.h" #include "cap_audit.h"
...@@ -5338,6 +5339,34 @@ static int intel_iommu_disable_auxd(struct device *dev) ...@@ -5338,6 +5339,34 @@ static int intel_iommu_disable_auxd(struct device *dev)
return 0; return 0;
} }
static int intel_iommu_enable_sva(struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
struct intel_iommu *iommu = info->iommu;
if (!info || !iommu || dmar_disabled)
return -EINVAL;
if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
return -ENODEV;
if (intel_iommu_enable_pasid(iommu, dev))
return -ENODEV;
if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
return -EINVAL;
return iopf_queue_add_device(iommu->iopf_queue, dev);
}
static int intel_iommu_disable_sva(struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
struct intel_iommu *iommu = info->iommu;
return iopf_queue_remove_device(iommu->iopf_queue, dev);
}
/* /*
* A PCI express designated vendor specific extended capability is defined * A PCI express designated vendor specific extended capability is defined
* in the section 3.7 of Intel scalable I/O virtualization technical spec * in the section 3.7 of Intel scalable I/O virtualization technical spec
...@@ -5399,38 +5428,37 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat) ...@@ -5399,38 +5428,37 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
static int static int
intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat) intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
{ {
if (feat == IOMMU_DEV_FEAT_AUX) switch (feat) {
case IOMMU_DEV_FEAT_AUX:
return intel_iommu_enable_auxd(dev); return intel_iommu_enable_auxd(dev);
if (feat == IOMMU_DEV_FEAT_IOPF) case IOMMU_DEV_FEAT_IOPF:
return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV; return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV;
if (feat == IOMMU_DEV_FEAT_SVA) { case IOMMU_DEV_FEAT_SVA:
struct device_domain_info *info = get_domain_info(dev); return intel_iommu_enable_sva(dev);
if (!info)
return -EINVAL;
if (intel_iommu_enable_pasid(info->iommu, dev))
return -ENODEV;
if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled) default:
return -EINVAL; return -ENODEV;
if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
return 0;
} }
return -ENODEV;
} }
static int static int
intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
{ {
if (feat == IOMMU_DEV_FEAT_AUX) switch (feat) {
case IOMMU_DEV_FEAT_AUX:
return intel_iommu_disable_auxd(dev); return intel_iommu_disable_auxd(dev);
return -ENODEV; case IOMMU_DEV_FEAT_IOPF:
return 0;
case IOMMU_DEV_FEAT_SVA:
return intel_iommu_disable_sva(dev);
default:
return -ENODEV;
}
} }
static bool static bool
......
...@@ -84,6 +84,7 @@ svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev) ...@@ -84,6 +84,7 @@ svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
int intel_svm_enable_prq(struct intel_iommu *iommu) int intel_svm_enable_prq(struct intel_iommu *iommu)
{ {
struct iopf_queue *iopfq;
struct page *pages; struct page *pages;
int irq, ret; int irq, ret;
...@@ -100,13 +101,20 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) ...@@ -100,13 +101,20 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
iommu->name); iommu->name);
ret = -EINVAL; ret = -EINVAL;
err: goto free_prq;
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
return ret;
} }
iommu->pr_irq = irq; iommu->pr_irq = irq;
snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
"dmar%d-iopfq", iommu->seq_id);
iopfq = iopf_queue_alloc(iommu->iopfq_name);
if (!iopfq) {
pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
ret = -ENOMEM;
goto free_hwirq;
}
iommu->iopf_queue = iopfq;
snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT, ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
...@@ -114,9 +122,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) ...@@ -114,9 +122,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
if (ret) { if (ret) {
pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
iommu->name); iommu->name);
dmar_free_hwirq(irq); goto free_iopfq;
iommu->pr_irq = 0;
goto err;
} }
dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
...@@ -125,6 +131,18 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) ...@@ -125,6 +131,18 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
init_completion(&iommu->prq_complete); init_completion(&iommu->prq_complete);
return 0; return 0;
free_iopfq:
iopf_queue_free(iommu->iopf_queue);
iommu->iopf_queue = NULL;
free_hwirq:
dmar_free_hwirq(irq);
iommu->pr_irq = 0;
free_prq:
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
return ret;
} }
int intel_svm_finish_prq(struct intel_iommu *iommu) int intel_svm_finish_prq(struct intel_iommu *iommu)
...@@ -139,6 +157,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) ...@@ -139,6 +157,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
iommu->pr_irq = 0; iommu->pr_irq = 0;
} }
if (iommu->iopf_queue) {
iopf_queue_free(iommu->iopf_queue);
iommu->iopf_queue = NULL;
}
free_pages((unsigned long)iommu->prq, PRQ_ORDER); free_pages((unsigned long)iommu->prq, PRQ_ORDER);
iommu->prq = NULL; iommu->prq = NULL;
......
...@@ -606,6 +606,8 @@ struct intel_iommu { ...@@ -606,6 +606,8 @@ struct intel_iommu {
struct completion prq_complete; struct completion prq_complete;
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */ struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif #endif
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
struct q_inval *qi; /* Queued invalidation info */ struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/ u32 *iommu_state; /* Store iommu states between suspend and resume.*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment