Commit 067e95fc authored by Joerg Roedel's avatar Joerg Roedel

Merge branch 'core' into x86/vt-d

parents f266c11b 5b61343b
...@@ -133,7 +133,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) ...@@ -133,7 +133,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
* or equal to the system's PAGE_SIZE, with a preference if * or equal to the system's PAGE_SIZE, with a preference if
* both are equal. * both are equal.
*/ */
pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; pgsize_bitmap = tdev->iommu.domain->pgsize_bitmap;
if (pgsize_bitmap & PAGE_SIZE) { if (pgsize_bitmap & PAGE_SIZE) {
tdev->iommu.pgshift = PAGE_SHIFT; tdev->iommu.pgshift = PAGE_SHIFT;
} else { } else {
......
...@@ -116,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) ...@@ -116,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
extern bool translation_pre_enabled(struct amd_iommu *iommu); extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, extern bool amd_iommu_is_attach_deferred(struct device *dev);
struct device *dev);
extern int __init add_special_device(u8 type, u8 id, u16 *devid, extern int __init add_special_device(u8 type, u8 id, u16 *devid,
bool cmd_line); bool cmd_line);
......
...@@ -2215,8 +2215,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, ...@@ -2215,8 +2215,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head); list_add_tail(&region->list, head);
} }
bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, bool amd_iommu_is_attach_deferred(struct device *dev)
struct device *dev)
{ {
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
...@@ -2269,13 +2268,6 @@ static int amd_iommu_def_domain_type(struct device *dev) ...@@ -2269,13 +2268,6 @@ static int amd_iommu_def_domain_type(struct device *dev)
const struct iommu_ops amd_iommu_ops = { const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable, .capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc, .domain_alloc = amd_iommu_domain_alloc,
.domain_free = amd_iommu_domain_free,
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.probe_device = amd_iommu_probe_device, .probe_device = amd_iommu_probe_device,
.release_device = amd_iommu_release_device, .release_device = amd_iommu_release_device,
.probe_finalize = amd_iommu_probe_finalize, .probe_finalize = amd_iommu_probe_finalize,
...@@ -2284,9 +2276,18 @@ const struct iommu_ops amd_iommu_ops = { ...@@ -2284,9 +2276,18 @@ const struct iommu_ops amd_iommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred, .is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES, .pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
.def_domain_type = amd_iommu_def_domain_type, .def_domain_type = amd_iommu_def_domain_type,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
.iova_to_phys = amd_iommu_iova_to_phys,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
.free = amd_iommu_domain_free,
}
}; };
/***************************************************************************** /*****************************************************************************
......
...@@ -537,7 +537,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) ...@@ -537,7 +537,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
ret = NOTIFY_DONE; ret = NOTIFY_DONE;
/* In kdump kernel pci dev is not initialized yet -> send INVALID */ /* In kdump kernel pci dev is not initialized yet -> send INVALID */
if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) { if (amd_iommu_is_attach_deferred(&pdev->dev)) {
amd_iommu_complete_ppr(pdev, iommu_fault->pasid, amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
PPR_INVALID, tag); PPR_INVALID, tag);
goto out; goto out;
......
...@@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev, ...@@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev,
static const struct iommu_ops apple_dart_iommu_ops = { static const struct iommu_ops apple_dart_iommu_ops = {
.domain_alloc = apple_dart_domain_alloc, .domain_alloc = apple_dart_domain_alloc,
.domain_free = apple_dart_domain_free,
.attach_dev = apple_dart_attach_dev,
.detach_dev = apple_dart_detach_dev,
.map_pages = apple_dart_map_pages,
.unmap_pages = apple_dart_unmap_pages,
.flush_iotlb_all = apple_dart_flush_iotlb_all,
.iotlb_sync = apple_dart_iotlb_sync,
.iotlb_sync_map = apple_dart_iotlb_sync_map,
.iova_to_phys = apple_dart_iova_to_phys,
.probe_device = apple_dart_probe_device, .probe_device = apple_dart_probe_device,
.release_device = apple_dart_release_device, .release_device = apple_dart_release_device,
.device_group = apple_dart_device_group, .device_group = apple_dart_device_group,
...@@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = { ...@@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.get_resv_regions = apple_dart_get_resv_regions, .get_resv_regions = apple_dart_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */ .pgsize_bitmap = -1UL, /* Restricted during dart probe */
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = apple_dart_attach_dev,
.detach_dev = apple_dart_detach_dev,
.map_pages = apple_dart_map_pages,
.unmap_pages = apple_dart_unmap_pages,
.flush_iotlb_all = apple_dart_flush_iotlb_all,
.iotlb_sync = apple_dart_iotlb_sync,
.iotlb_sync_map = apple_dart_iotlb_sync_map,
.iova_to_phys = apple_dart_iova_to_phys,
.free = apple_dart_domain_free,
}
}; };
static irqreturn_t apple_dart_irq(int irq, void *dev) static irqreturn_t apple_dart_irq(int irq, void *dev)
......
...@@ -2841,17 +2841,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev, ...@@ -2841,17 +2841,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc, .domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
.map_pages = arm_smmu_map_pages,
.unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.probe_device = arm_smmu_probe_device, .probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device, .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group, .device_group = arm_smmu_device_group,
.enable_nesting = arm_smmu_enable_nesting,
.of_xlate = arm_smmu_of_xlate, .of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions, .get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
...@@ -2865,6 +2857,16 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -2865,6 +2857,16 @@ static struct iommu_ops arm_smmu_ops = {
.page_response = arm_smmu_page_response, .page_response = arm_smmu_page_response,
.pgsize_bitmap = -1UL, /* Restricted during device attach */ .pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE, .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = arm_smmu_attach_dev,
.map_pages = arm_smmu_map_pages,
.unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.enable_nesting = arm_smmu_enable_nesting,
.free = arm_smmu_domain_free,
}
}; };
/* Probing and initialisation functions */ /* Probing and initialisation functions */
......
...@@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev) ...@@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev)
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc, .domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
.map_pages = arm_smmu_map_pages,
.unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.probe_device = arm_smmu_probe_device, .probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device, .release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize, .probe_finalize = arm_smmu_probe_finalize,
.device_group = arm_smmu_device_group, .device_group = arm_smmu_device_group,
.enable_nesting = arm_smmu_enable_nesting,
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
.of_xlate = arm_smmu_of_xlate, .of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions, .get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type, .def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */ .pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE, .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = arm_smmu_attach_dev,
.map_pages = arm_smmu_map_pages,
.unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.enable_nesting = arm_smmu_enable_nesting,
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
.free = arm_smmu_domain_free,
}
}; };
static void arm_smmu_device_reset(struct arm_smmu_device *smmu) static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
......
...@@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) ...@@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops qcom_iommu_ops = { static const struct iommu_ops qcom_iommu_ops = {
.capable = qcom_iommu_capable, .capable = qcom_iommu_capable,
.domain_alloc = qcom_iommu_domain_alloc, .domain_alloc = qcom_iommu_domain_alloc,
.domain_free = qcom_iommu_domain_free,
.attach_dev = qcom_iommu_attach_dev,
.detach_dev = qcom_iommu_detach_dev,
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
.probe_device = qcom_iommu_probe_device, .probe_device = qcom_iommu_probe_device,
.release_device = qcom_iommu_release_device, .release_device = qcom_iommu_release_device,
.device_group = generic_device_group, .device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate, .of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = qcom_iommu_attach_dev,
.detach_dev = qcom_iommu_detach_dev,
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
.free = qcom_iommu_domain_free,
}
}; };
static int qcom_iommu_sec_ptbl_init(struct device *dev) static int qcom_iommu_sec_ptbl_init(struct device *dev)
......
...@@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn; unsigned long order, base_pfn;
struct iova_domain *iovad; struct iova_domain *iovad;
int ret;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL; return -EINVAL;
...@@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
} }
init_iova_domain(iovad, 1UL << order, base_pfn); init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad);
if (ret)
return ret;
/* If the FQ fails we can simply fall back to strict mode */ /* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
......
...@@ -1309,17 +1309,19 @@ static int exynos_iommu_of_xlate(struct device *dev, ...@@ -1309,17 +1309,19 @@ static int exynos_iommu_of_xlate(struct device *dev,
static const struct iommu_ops exynos_iommu_ops = { static const struct iommu_ops exynos_iommu_ops = {
.domain_alloc = exynos_iommu_domain_alloc, .domain_alloc = exynos_iommu_domain_alloc,
.domain_free = exynos_iommu_domain_free,
.attach_dev = exynos_iommu_attach_device,
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
.device_group = generic_device_group, .device_group = generic_device_group,
.probe_device = exynos_iommu_probe_device, .probe_device = exynos_iommu_probe_device,
.release_device = exynos_iommu_release_device, .release_device = exynos_iommu_release_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
.of_xlate = exynos_iommu_of_xlate, .of_xlate = exynos_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = exynos_iommu_attach_device,
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
.free = exynos_iommu_domain_free,
}
}; };
static int __init exynos_iommu_init(void) static int __init exynos_iommu_init(void)
......
...@@ -453,13 +453,15 @@ static void fsl_pamu_release_device(struct device *dev) ...@@ -453,13 +453,15 @@ static void fsl_pamu_release_device(struct device *dev)
static const struct iommu_ops fsl_pamu_ops = { static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable, .capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc, .domain_alloc = fsl_pamu_domain_alloc,
.domain_free = fsl_pamu_domain_free,
.attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device,
.iova_to_phys = fsl_pamu_iova_to_phys,
.probe_device = fsl_pamu_probe_device, .probe_device = fsl_pamu_probe_device,
.release_device = fsl_pamu_release_device, .release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group, .device_group = fsl_pamu_device_group,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device,
.iova_to_phys = fsl_pamu_iova_to_phys,
.free = fsl_pamu_domain_free,
}
}; };
int __init pamu_domain_init(void) int __init pamu_domain_init(void)
......
...@@ -351,8 +351,7 @@ static int show_device_domain_translation(struct device *dev, void *data) ...@@ -351,8 +351,7 @@ static int show_device_domain_translation(struct device *dev, void *data)
if (!domain) if (!domain)
return 0; return 0;
seq_printf(m, "Device %s with pasid %d @0x%llx\n", seq_printf(m, "Device %s @0x%llx\n", dev_name(dev),
dev_name(dev), domain->default_pasid,
(u64)virt_to_phys(domain->pgd)); (u64)virt_to_phys(domain->pgd));
seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n"); seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
......
This diff is collapsed.
...@@ -762,164 +762,3 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, ...@@ -762,164 +762,3 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return 0; return 0;
} }
static int
intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
struct iommu_gpasid_bind_data_vtd *pasid_data)
{
/*
* Not all guest PASID table entry fields are passed down during bind,
* here we only set up the ones that are dependent on guest settings.
* Execution related bits such as NXE, SMEP are not supported.
* Other fields, such as snoop related, are set based on host needs
* regardless of guest settings.
*/
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
if (!ecap_srs(iommu->ecap)) {
pr_err_ratelimited("No supervisor request support on %s\n",
iommu->name);
return -EINVAL;
}
pasid_set_sre(pte);
/* Enable write protect WP if guest requested */
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE)
pasid_set_wpe(pte);
}
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
if (!ecap_eafs(iommu->ecap)) {
pr_err_ratelimited("No extended access flag support on %s\n",
iommu->name);
return -EINVAL;
}
pasid_set_eafe(pte);
}
/*
* Memory type is only applicable to devices inside processor coherent
* domain. Will add MTS support once coherent devices are available.
*/
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
pr_warn_ratelimited("No memory type support %s\n",
iommu->name);
return -EINVAL;
}
return 0;
}
/**
* intel_pasid_setup_nested() - Set up PASID entry for nested translation.
* This could be used for guest shared virtual address. In this case, the
* first level page tables are used for GVA-GPA translation in the guest,
* second level page tables are used for GPA-HPA translation.
*
* @iommu: IOMMU which the device belong to
* @dev: Device to be set up for translation
* @gpgd: FLPTPTR: First Level Page translation pointer in GPA
* @pasid: PASID to be programmed in the device PASID table
* @pasid_data: Additional PASID info from the guest bind request
* @domain: Domain info for setting up second level page tables
* @addr_width: Address width of the first level (guest)
*/
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
pgd_t *gpgd, u32 pasid,
struct iommu_gpasid_bind_data_vtd *pasid_data,
struct dmar_domain *domain, int addr_width)
{
struct pasid_entry *pte;
struct dma_pte *pgd;
int ret = 0;
u64 pgd_val;
int agaw;
u16 did;
if (!ecap_nest(iommu->ecap)) {
pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
iommu->name);
return -EINVAL;
}
if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
pr_err_ratelimited("Domain is not in nesting mode, %x\n",
domain->flags);
return -EINVAL;
}
pte = intel_pasid_get_entry(dev, pasid);
if (WARN_ON(!pte))
return -EINVAL;
/*
* Caller must ensure PASID entry is not in use, i.e. not bind the
* same PASID to the same device twice.
*/
if (pasid_pte_is_present(pte))
return -EBUSY;
pasid_clear_entry(pte);
/* Sanity checking performed by caller to make sure address
* width matching in two dimensions:
* 1. CPU vs. IOMMU
* 2. Guest vs. Host.
*/
switch (addr_width) {
#ifdef CONFIG_X86
case ADDR_WIDTH_5LEVEL:
if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
!cap_5lp_support(iommu->cap)) {
dev_err_ratelimited(dev,
"5-level paging not supported\n");
return -EINVAL;
}
pasid_set_flpm(pte, 1);
break;
#endif
case ADDR_WIDTH_4LEVEL:
pasid_set_flpm(pte, 0);
break;
default:
dev_err_ratelimited(dev, "Invalid guest address width %d\n",
addr_width);
return -EINVAL;
}
/* First level PGD is in GPA, must be supported by the second level */
if ((uintptr_t)gpgd > domain->max_addr) {
dev_err_ratelimited(dev,
"Guest PGD %lx not supported, max %llx\n",
(uintptr_t)gpgd, domain->max_addr);
return -EINVAL;
}
pasid_set_flptr(pte, (uintptr_t)gpgd);
ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
if (ret)
return ret;
/* Setup the second level based on the given domain */
pgd = domain->pgd;
agaw = iommu_skip_agaw(domain, iommu, &pgd);
if (agaw < 0) {
dev_err_ratelimited(dev, "Invalid domain page table\n");
return -EINVAL;
}
pgd_val = virt_to_phys(pgd);
pasid_set_slptr(pte, pgd_val);
pasid_set_fault_enable(pte);
did = domain->iommu_did[iommu->seq_id];
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
return ret;
}
...@@ -118,10 +118,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, ...@@ -118,10 +118,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
int intel_pasid_setup_pass_through(struct intel_iommu *iommu, int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain, struct dmar_domain *domain,
struct device *dev, u32 pasid); struct device *dev, u32 pasid);
int intel_pasid_setup_nested(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd, u32 pasid,
struct iommu_gpasid_bind_data_vtd *pasid_data,
struct dmar_domain *domain, int addr_width);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu, void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
struct device *dev, u32 pasid, struct device *dev, u32 pasid,
bool fault_ignore); bool fault_ignore);
......
...@@ -318,193 +318,6 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid, ...@@ -318,193 +318,6 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
return 0; return 0;
} }
int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct iommu_gpasid_bind_data *data)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev = NULL;
struct dmar_domain *dmar_domain;
struct device_domain_info *info;
struct intel_svm *svm = NULL;
unsigned long iflags;
int ret = 0;
if (WARN_ON(!iommu) || !data)
return -EINVAL;
if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
return -EINVAL;
/* IOMMU core ensures argsz is more than the start of the union */
if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd))
return -EINVAL;
/* Make sure no undefined flags are used in vendor data */
if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1))
return -EINVAL;
if (!dev_is_pci(dev))
return -ENOTSUPP;
/* VT-d supports devices with full 20 bit PASIDs only */
if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
return -EINVAL;
/*
* We only check host PASID range, we have no knowledge to check
* guest PASID range.
*/
if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
return -EINVAL;
info = get_domain_info(dev);
if (!info)
return -EINVAL;
dmar_domain = to_dmar_domain(domain);
mutex_lock(&pasid_mutex);
ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
if (ret)
goto out;
if (sdev) {
/*
* Do not allow multiple bindings of the same device-PASID since
* there is only one SL page tables per PASID. We may revisit
* once sharing PGD across domains are supported.
*/
dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
svm->pasid);
ret = -EBUSY;
goto out;
}
if (!svm) {
/* We come here when PASID has never been bond to a device. */
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
if (!svm) {
ret = -ENOMEM;
goto out;
}
/* REVISIT: upper layer/VFIO can track host process that bind
* the PASID. ioasid_set = mm might be sufficient for vfio to
* check pasid VMM ownership. We can drop the following line
* once VFIO and IOASID set check is in place.
*/
svm->mm = get_task_mm(current);
svm->pasid = data->hpasid;
if (data->flags & IOMMU_SVA_GPASID_VAL) {
svm->gpasid = data->gpasid;
svm->flags |= SVM_FLAG_GUEST_PASID;
}
pasid_private_add(data->hpasid, svm);
INIT_LIST_HEAD_RCU(&svm->devs);
mmput(svm->mm);
}
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev) {
ret = -ENOMEM;
goto out;
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
sdev->users = 1;
/* Set up device context entry for PASID if not enabled already */
ret = intel_iommu_enable_pasid(iommu, sdev->dev);
if (ret) {
dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
kfree(sdev);
goto out;
}
/*
* PASID table is per device for better security. Therefore, for
* each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here.
*/
spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret);
/*
* PASID entry should be in cleared state if nested mode
* set up failed. So we only need to clear IOASID tracking
* data such that free call will succeed.
*/
kfree(sdev);
goto out;
}
svm->flags |= SVM_FLAG_GUEST_MODE;
init_rcu_head(&sdev->rcu);
list_add_rcu(&sdev->list, &svm->devs);
out:
if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
pasid_private_remove(data->hpasid);
kfree(svm);
}
mutex_unlock(&pasid_mutex);
return ret;
}
int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev;
struct intel_svm *svm;
int ret;
if (WARN_ON(!iommu))
return -EINVAL;
mutex_lock(&pasid_mutex);
ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
if (ret)
goto out;
if (sdev) {
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
sdev->users--;
if (!sdev->users) {
list_del_rcu(&sdev->list);
intel_pasid_tear_down_entry(iommu, dev,
svm->pasid, false);
intel_svm_drain_prq(dev, svm->pasid);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
/*
* We do not free the IOASID here in that
* IOMMU driver did not allocate it.
* Unlike native SVM, IOASID for guest use was
* allocated prior to the bind call.
* In any case, if the free call comes before
* the unbind, IOMMU driver will get notified
* and perform cleanup.
*/
pasid_private_remove(pasid);
kfree(svm);
}
}
}
out:
mutex_unlock(&pasid_mutex);
return ret;
}
static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm, static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
unsigned int flags) unsigned int flags)
{ {
...@@ -1125,28 +938,6 @@ int intel_svm_page_response(struct device *dev, ...@@ -1125,28 +938,6 @@ int intel_svm_page_response(struct device *dev,
goto out; goto out;
} }
/*
* For responses from userspace, need to make sure that the
* pasid has been bound to its mm.
*/
if (svm->flags & SVM_FLAG_GUEST_MODE) {
struct mm_struct *mm;
mm = get_task_mm(current);
if (!mm) {
ret = -EINVAL;
goto out;
}
if (mm != svm->mm) {
ret = -ENODEV;
mmput(mm);
goto out;
}
mmput(mm);
}
/* /*
* Per VT-d spec. v3.0 ch7.7, system software must respond * Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP) * with page group response if private data is present (PDP)
......
This diff is collapsed.
...@@ -15,13 +15,14 @@ ...@@ -15,13 +15,14 @@
/* The anchor node sits above the top of the usable address space */ /* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR ~0UL #define IOVA_ANCHOR ~0UL
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
static bool iova_rcache_insert(struct iova_domain *iovad, static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn, unsigned long pfn,
unsigned long size); unsigned long size);
static unsigned long iova_rcache_get(struct iova_domain *iovad, static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size, unsigned long size,
unsigned long limit_pfn); unsigned long limit_pfn);
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad);
...@@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, ...@@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot); rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
init_iova_rcaches(iovad);
} }
EXPORT_SYMBOL_GPL(init_iova_domain); EXPORT_SYMBOL_GPL(init_iova_domain);
...@@ -95,10 +94,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) ...@@ -95,10 +94,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
cached_iova = to_iova(iovad->cached32_node); cached_iova = to_iova(iovad->cached32_node);
if (free == cached_iova || if (free == cached_iova ||
(free->pfn_hi < iovad->dma_32bit_pfn && (free->pfn_hi < iovad->dma_32bit_pfn &&
free->pfn_lo >= cached_iova->pfn_lo)) { free->pfn_lo >= cached_iova->pfn_lo))
iovad->cached32_node = rb_next(&free->node); iovad->cached32_node = rb_next(&free->node);
if (free->pfn_lo < iovad->dma_32bit_pfn)
iovad->max32_alloc_size = iovad->dma_32bit_pfn; iovad->max32_alloc_size = iovad->dma_32bit_pfn;
}
cached_iova = to_iova(iovad->cached_node); cached_iova = to_iova(iovad->cached_node);
if (free->pfn_lo >= cached_iova->pfn_lo) if (free->pfn_lo >= cached_iova->pfn_lo)
...@@ -488,6 +488,13 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) ...@@ -488,6 +488,13 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
} }
EXPORT_SYMBOL_GPL(free_iova_fast); EXPORT_SYMBOL_GPL(free_iova_fast);
static void iova_domain_free_rcaches(struct iova_domain *iovad)
{
cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
&iovad->cpuhp_dead);
free_iova_rcaches(iovad);
}
/** /**
* put_iova_domain - destroys the iova domain * put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question. * @iovad: - iova domain in question.
...@@ -497,9 +504,9 @@ void put_iova_domain(struct iova_domain *iovad) ...@@ -497,9 +504,9 @@ void put_iova_domain(struct iova_domain *iovad)
{ {
struct iova *iova, *tmp; struct iova *iova, *tmp;
cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, if (iovad->rcaches)
&iovad->cpuhp_dead); iova_domain_free_rcaches(iovad);
free_iova_rcaches(iovad);
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
free_iova_mem(iova); free_iova_mem(iova);
} }
...@@ -608,6 +615,7 @@ EXPORT_SYMBOL_GPL(reserve_iova); ...@@ -608,6 +615,7 @@ EXPORT_SYMBOL_GPL(reserve_iova);
*/ */
#define IOVA_MAG_SIZE 128 #define IOVA_MAG_SIZE 128
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
struct iova_magazine { struct iova_magazine {
unsigned long size; unsigned long size;
...@@ -620,6 +628,13 @@ struct iova_cpu_rcache { ...@@ -620,6 +628,13 @@ struct iova_cpu_rcache {
struct iova_magazine *prev; struct iova_magazine *prev;
}; };
struct iova_rcache {
spinlock_t lock;
unsigned long depot_size;
struct iova_magazine *depot[MAX_GLOBAL_MAGS];
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
static struct iova_magazine *iova_magazine_alloc(gfp_t flags) static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
{ {
return kzalloc(sizeof(struct iova_magazine), flags); return kzalloc(sizeof(struct iova_magazine), flags);
...@@ -693,28 +708,54 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn) ...@@ -693,28 +708,54 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
mag->pfns[mag->size++] = pfn; mag->pfns[mag->size++] = pfn;
} }
static void init_iova_rcaches(struct iova_domain *iovad) int iova_domain_init_rcaches(struct iova_domain *iovad)
{ {
struct iova_cpu_rcache *cpu_rcache;
struct iova_rcache *rcache;
unsigned int cpu; unsigned int cpu;
int i; int i, ret;
iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
sizeof(struct iova_rcache),
GFP_KERNEL);
if (!iovad->rcaches)
return -ENOMEM;
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
struct iova_cpu_rcache *cpu_rcache;
struct iova_rcache *rcache;
rcache = &iovad->rcaches[i]; rcache = &iovad->rcaches[i];
spin_lock_init(&rcache->lock); spin_lock_init(&rcache->lock);
rcache->depot_size = 0; rcache->depot_size = 0;
rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size()); rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache),
if (WARN_ON(!rcache->cpu_rcaches)) cache_line_size());
continue; if (!rcache->cpu_rcaches) {
ret = -ENOMEM;
goto out_err;
}
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
spin_lock_init(&cpu_rcache->lock); spin_lock_init(&cpu_rcache->lock);
cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL); cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL); cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
if (!cpu_rcache->loaded || !cpu_rcache->prev) {
ret = -ENOMEM;
goto out_err;
}
} }
} }
ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
&iovad->cpuhp_dead);
if (ret)
goto out_err;
return 0;
out_err:
free_iova_rcaches(iovad);
return ret;
} }
EXPORT_SYMBOL_GPL(iova_domain_init_rcaches);
/* /*
* Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
...@@ -831,7 +872,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, ...@@ -831,7 +872,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
{ {
unsigned int log_size = order_base_2(size); unsigned int log_size = order_base_2(size);
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE) if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
return 0; return 0;
return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
...@@ -849,6 +890,8 @@ static void free_iova_rcaches(struct iova_domain *iovad) ...@@ -849,6 +890,8 @@ static void free_iova_rcaches(struct iova_domain *iovad)
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad->rcaches[i]; rcache = &iovad->rcaches[i];
if (!rcache->cpu_rcaches)
break;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
iova_magazine_free(cpu_rcache->loaded); iova_magazine_free(cpu_rcache->loaded);
...@@ -858,6 +901,9 @@ static void free_iova_rcaches(struct iova_domain *iovad) ...@@ -858,6 +901,9 @@ static void free_iova_rcaches(struct iova_domain *iovad)
for (j = 0; j < rcache->depot_size; ++j) for (j = 0; j < rcache->depot_size; ++j)
iova_magazine_free(rcache->depot[j]); iova_magazine_free(rcache->depot[j]);
} }
kfree(iovad->rcaches);
iovad->rcaches = NULL;
} }
/* /*
......
...@@ -868,14 +868,6 @@ static struct iommu_group *ipmmu_find_group(struct device *dev) ...@@ -868,14 +868,6 @@ static struct iommu_group *ipmmu_find_group(struct device *dev)
static const struct iommu_ops ipmmu_ops = { static const struct iommu_ops ipmmu_ops = {
.domain_alloc = ipmmu_domain_alloc, .domain_alloc = ipmmu_domain_alloc,
.domain_free = ipmmu_domain_free,
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
.flush_iotlb_all = ipmmu_flush_iotlb_all,
.iotlb_sync = ipmmu_iotlb_sync,
.iova_to_phys = ipmmu_iova_to_phys,
.probe_device = ipmmu_probe_device, .probe_device = ipmmu_probe_device,
.release_device = ipmmu_release_device, .release_device = ipmmu_release_device,
.probe_finalize = ipmmu_probe_finalize, .probe_finalize = ipmmu_probe_finalize,
...@@ -883,6 +875,16 @@ static const struct iommu_ops ipmmu_ops = { ...@@ -883,6 +875,16 @@ static const struct iommu_ops ipmmu_ops = {
? generic_device_group : ipmmu_find_group, ? generic_device_group : ipmmu_find_group,
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
.of_xlate = ipmmu_of_xlate, .of_xlate = ipmmu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
.flush_iotlb_all = ipmmu_flush_iotlb_all,
.iotlb_sync = ipmmu_iotlb_sync,
.iova_to_phys = ipmmu_iova_to_phys,
.free = ipmmu_domain_free,
}
}; };
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
......
...@@ -558,11 +558,6 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -558,11 +558,6 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
return ret; return ret;
} }
static bool msm_iommu_capable(enum iommu_cap cap)
{
return false;
}
static void print_ctx_regs(void __iomem *base, int ctx) static void print_ctx_regs(void __iomem *base, int ctx)
{ {
unsigned int fsr = GET_FSR(base, ctx); unsigned int fsr = GET_FSR(base, ctx);
...@@ -672,27 +667,28 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) ...@@ -672,27 +667,28 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
} }
static struct iommu_ops msm_iommu_ops = { static struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
.domain_alloc = msm_iommu_domain_alloc, .domain_alloc = msm_iommu_domain_alloc,
.domain_free = msm_iommu_domain_free,
.attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
/*
* Nothing is needed here, the barrier to guarantee
* completion of the tlb sync operation is implicitly
* taken care when the iommu client does a writel before
* kick starting the other master.
*/
.iotlb_sync = NULL,
.iotlb_sync_map = msm_iommu_sync_map,
.iova_to_phys = msm_iommu_iova_to_phys,
.probe_device = msm_iommu_probe_device, .probe_device = msm_iommu_probe_device,
.release_device = msm_iommu_release_device, .release_device = msm_iommu_release_device,
.device_group = generic_device_group, .device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES, .pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate, .of_xlate = qcom_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
/*
* Nothing is needed here, the barrier to guarantee
* completion of the tlb sync operation is implicitly
* taken care when the iommu client does a writel before
* kick starting the other master.
*/
.iotlb_sync = NULL,
.iotlb_sync_map = msm_iommu_sync_map,
.iova_to_phys = msm_iommu_iova_to_phys,
.free = msm_iommu_domain_free,
}
}; };
static int msm_iommu_probe(struct platform_device *pdev) static int msm_iommu_probe(struct platform_device *pdev)
......
...@@ -658,15 +658,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev, ...@@ -658,15 +658,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
static const struct iommu_ops mtk_iommu_ops = { static const struct iommu_ops mtk_iommu_ops = {
.domain_alloc = mtk_iommu_domain_alloc, .domain_alloc = mtk_iommu_domain_alloc,
.domain_free = mtk_iommu_domain_free,
.attach_dev = mtk_iommu_attach_device,
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iotlb_sync_map = mtk_iommu_sync_map,
.iova_to_phys = mtk_iommu_iova_to_phys,
.probe_device = mtk_iommu_probe_device, .probe_device = mtk_iommu_probe_device,
.release_device = mtk_iommu_release_device, .release_device = mtk_iommu_release_device,
.device_group = mtk_iommu_device_group, .device_group = mtk_iommu_device_group,
...@@ -675,6 +666,17 @@ static const struct iommu_ops mtk_iommu_ops = { ...@@ -675,6 +666,17 @@ static const struct iommu_ops mtk_iommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_attach_device,
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iotlb_sync_map = mtk_iommu_sync_map,
.iova_to_phys = mtk_iommu_iova_to_phys,
.free = mtk_iommu_domain_free,
}
}; };
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
......
...@@ -514,12 +514,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) ...@@ -514,12 +514,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
static const struct iommu_ops mtk_iommu_ops = { static const struct iommu_ops mtk_iommu_ops = {
.domain_alloc = mtk_iommu_domain_alloc, .domain_alloc = mtk_iommu_domain_alloc,
.domain_free = mtk_iommu_domain_free,
.attach_dev = mtk_iommu_attach_device,
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.iova_to_phys = mtk_iommu_iova_to_phys,
.probe_device = mtk_iommu_probe_device, .probe_device = mtk_iommu_probe_device,
.probe_finalize = mtk_iommu_probe_finalize, .probe_finalize = mtk_iommu_probe_finalize,
.release_device = mtk_iommu_release_device, .release_device = mtk_iommu_release_device,
...@@ -527,6 +521,14 @@ static const struct iommu_ops mtk_iommu_ops = { ...@@ -527,6 +521,14 @@ static const struct iommu_ops mtk_iommu_ops = {
.device_group = generic_device_group, .device_group = generic_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_attach_device,
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.iova_to_phys = mtk_iommu_iova_to_phys,
.free = mtk_iommu_domain_free,
}
}; };
static const struct of_device_id mtk_iommu_of_ids[] = { static const struct of_device_id mtk_iommu_of_ids[] = {
......
...@@ -1734,16 +1734,18 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev) ...@@ -1734,16 +1734,18 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
static const struct iommu_ops omap_iommu_ops = { static const struct iommu_ops omap_iommu_ops = {
.domain_alloc = omap_iommu_domain_alloc, .domain_alloc = omap_iommu_domain_alloc,
.domain_free = omap_iommu_domain_free,
.attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.probe_device = omap_iommu_probe_device, .probe_device = omap_iommu_probe_device,
.release_device = omap_iommu_release_device, .release_device = omap_iommu_release_device,
.device_group = omap_iommu_device_group, .device_group = omap_iommu_device_group,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES, .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.free = omap_iommu_domain_free,
}
}; };
static int __init omap_iommu_init(void) static int __init omap_iommu_init(void)
......
...@@ -1187,17 +1187,19 @@ static int rk_iommu_of_xlate(struct device *dev, ...@@ -1187,17 +1187,19 @@ static int rk_iommu_of_xlate(struct device *dev,
static const struct iommu_ops rk_iommu_ops = { static const struct iommu_ops rk_iommu_ops = {
.domain_alloc = rk_iommu_domain_alloc, .domain_alloc = rk_iommu_domain_alloc,
.domain_free = rk_iommu_domain_free,
.attach_dev = rk_iommu_attach_device,
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
.probe_device = rk_iommu_probe_device, .probe_device = rk_iommu_probe_device,
.release_device = rk_iommu_release_device, .release_device = rk_iommu_release_device,
.iova_to_phys = rk_iommu_iova_to_phys,
.device_group = rk_iommu_device_group, .device_group = rk_iommu_device_group,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
.of_xlate = rk_iommu_of_xlate, .of_xlate = rk_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = rk_iommu_attach_device,
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
.iova_to_phys = rk_iommu_iova_to_phys,
.free = rk_iommu_domain_free,
}
}; };
static int rk_iommu_probe(struct platform_device *pdev) static int rk_iommu_probe(struct platform_device *pdev)
......
...@@ -363,16 +363,18 @@ void zpci_destroy_iommu(struct zpci_dev *zdev) ...@@ -363,16 +363,18 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
static const struct iommu_ops s390_iommu_ops = { static const struct iommu_ops s390_iommu_ops = {
.capable = s390_iommu_capable, .capable = s390_iommu_capable,
.domain_alloc = s390_domain_alloc, .domain_alloc = s390_domain_alloc,
.domain_free = s390_domain_free,
.attach_dev = s390_iommu_attach_device,
.detach_dev = s390_iommu_detach_device,
.map = s390_iommu_map,
.unmap = s390_iommu_unmap,
.iova_to_phys = s390_iommu_iova_to_phys,
.probe_device = s390_iommu_probe_device, .probe_device = s390_iommu_probe_device,
.release_device = s390_iommu_release_device, .release_device = s390_iommu_release_device,
.device_group = generic_device_group, .device_group = generic_device_group,
.pgsize_bitmap = S390_IOMMU_PGSIZES, .pgsize_bitmap = S390_IOMMU_PGSIZES,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = s390_iommu_attach_device,
.detach_dev = s390_iommu_detach_device,
.map = s390_iommu_map,
.unmap = s390_iommu_unmap,
.iova_to_phys = s390_iommu_iova_to_phys,
.free = s390_domain_free,
}
}; };
static int __init s390_iommu_init(void) static int __init s390_iommu_init(void)
......
...@@ -416,20 +416,22 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) ...@@ -416,20 +416,22 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops sprd_iommu_ops = { static const struct iommu_ops sprd_iommu_ops = {
.domain_alloc = sprd_iommu_domain_alloc, .domain_alloc = sprd_iommu_domain_alloc,
.domain_free = sprd_iommu_domain_free,
.attach_dev = sprd_iommu_attach_device,
.detach_dev = sprd_iommu_detach_device,
.map = sprd_iommu_map,
.unmap = sprd_iommu_unmap,
.iotlb_sync_map = sprd_iommu_sync_map,
.iotlb_sync = sprd_iommu_sync,
.iova_to_phys = sprd_iommu_iova_to_phys,
.probe_device = sprd_iommu_probe_device, .probe_device = sprd_iommu_probe_device,
.release_device = sprd_iommu_release_device, .release_device = sprd_iommu_release_device,
.device_group = sprd_iommu_device_group, .device_group = sprd_iommu_device_group,
.of_xlate = sprd_iommu_of_xlate, .of_xlate = sprd_iommu_of_xlate,
.pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT, .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sprd_iommu_attach_device,
.detach_dev = sprd_iommu_detach_device,
.map = sprd_iommu_map,
.unmap = sprd_iommu_unmap,
.iotlb_sync_map = sprd_iommu_sync_map,
.iotlb_sync = sprd_iommu_sync,
.iova_to_phys = sprd_iommu_iova_to_phys,
.free = sprd_iommu_domain_free,
}
}; };
static const struct of_device_id sprd_iommu_of_match[] = { static const struct of_device_id sprd_iommu_of_match[] = {
......
...@@ -760,19 +760,21 @@ static int sun50i_iommu_of_xlate(struct device *dev, ...@@ -760,19 +760,21 @@ static int sun50i_iommu_of_xlate(struct device *dev,
static const struct iommu_ops sun50i_iommu_ops = { static const struct iommu_ops sun50i_iommu_ops = {
.pgsize_bitmap = SZ_4K, .pgsize_bitmap = SZ_4K,
.attach_dev = sun50i_iommu_attach_device,
.detach_dev = sun50i_iommu_detach_device,
.device_group = sun50i_iommu_device_group, .device_group = sun50i_iommu_device_group,
.domain_alloc = sun50i_iommu_domain_alloc, .domain_alloc = sun50i_iommu_domain_alloc,
.domain_free = sun50i_iommu_domain_free,
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
.iotlb_sync = sun50i_iommu_iotlb_sync,
.iova_to_phys = sun50i_iommu_iova_to_phys,
.map = sun50i_iommu_map,
.of_xlate = sun50i_iommu_of_xlate, .of_xlate = sun50i_iommu_of_xlate,
.probe_device = sun50i_iommu_probe_device, .probe_device = sun50i_iommu_probe_device,
.release_device = sun50i_iommu_release_device, .release_device = sun50i_iommu_release_device,
.unmap = sun50i_iommu_unmap, .default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sun50i_iommu_attach_device,
.detach_dev = sun50i_iommu_detach_device,
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
.iotlb_sync = sun50i_iommu_iotlb_sync,
.iova_to_phys = sun50i_iommu_iova_to_phys,
.map = sun50i_iommu_map,
.unmap = sun50i_iommu_unmap,
.free = sun50i_iommu_domain_free,
}
}; };
static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
......
...@@ -238,11 +238,6 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -238,11 +238,6 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
return pte & GART_PAGE_MASK; return pte & GART_PAGE_MASK;
} }
static bool gart_iommu_capable(enum iommu_cap cap)
{
return false;
}
static struct iommu_device *gart_iommu_probe_device(struct device *dev) static struct iommu_device *gart_iommu_probe_device(struct device *dev)
{ {
if (!dev_iommu_fwspec_get(dev)) if (!dev_iommu_fwspec_get(dev))
...@@ -276,21 +271,22 @@ static void gart_iommu_sync(struct iommu_domain *domain, ...@@ -276,21 +271,22 @@ static void gart_iommu_sync(struct iommu_domain *domain,
} }
static const struct iommu_ops gart_iommu_ops = { static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable,
.domain_alloc = gart_iommu_domain_alloc, .domain_alloc = gart_iommu_domain_alloc,
.domain_free = gart_iommu_domain_free,
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.probe_device = gart_iommu_probe_device, .probe_device = gart_iommu_probe_device,
.release_device = gart_iommu_release_device, .release_device = gart_iommu_release_device,
.device_group = generic_device_group, .device_group = generic_device_group,
.map = gart_iommu_map,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES, .pgsize_bitmap = GART_IOMMU_PGSIZES,
.of_xlate = gart_iommu_of_xlate, .of_xlate = gart_iommu_of_xlate,
.iotlb_sync_map = gart_iommu_sync_map, .default_domain_ops = &(const struct iommu_domain_ops) {
.iotlb_sync = gart_iommu_sync, .attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.iotlb_sync_map = gart_iommu_sync_map,
.iotlb_sync = gart_iommu_sync,
.free = gart_iommu_domain_free,
}
}; };
int tegra_gart_suspend(struct gart_device *gart) int tegra_gart_suspend(struct gart_device *gart)
......
...@@ -272,11 +272,6 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) ...@@ -272,11 +272,6 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
clear_bit(id, smmu->asids); clear_bit(id, smmu->asids);
} }
static bool tegra_smmu_capable(enum iommu_cap cap)
{
return false;
}
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{ {
struct tegra_smmu_as *as; struct tegra_smmu_as *as;
...@@ -967,19 +962,20 @@ static int tegra_smmu_of_xlate(struct device *dev, ...@@ -967,19 +962,20 @@ static int tegra_smmu_of_xlate(struct device *dev,
} }
static const struct iommu_ops tegra_smmu_ops = { static const struct iommu_ops tegra_smmu_ops = {
.capable = tegra_smmu_capable,
.domain_alloc = tegra_smmu_domain_alloc, .domain_alloc = tegra_smmu_domain_alloc,
.domain_free = tegra_smmu_domain_free,
.attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev,
.probe_device = tegra_smmu_probe_device, .probe_device = tegra_smmu_probe_device,
.release_device = tegra_smmu_release_device, .release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group, .device_group = tegra_smmu_device_group,
.map = tegra_smmu_map,
.unmap = tegra_smmu_unmap,
.iova_to_phys = tegra_smmu_iova_to_phys,
.of_xlate = tegra_smmu_of_xlate, .of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K, .pgsize_bitmap = SZ_4K,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev,
.map = tegra_smmu_map,
.unmap = tegra_smmu_unmap,
.iova_to_phys = tegra_smmu_iova_to_phys,
.free = tegra_smmu_domain_free,
}
}; };
static void tegra_smmu_ahb_enable(void) static void tegra_smmu_ahb_enable(void)
......
...@@ -1008,12 +1008,6 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) ...@@ -1008,12 +1008,6 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static struct iommu_ops viommu_ops = { static struct iommu_ops viommu_ops = {
.domain_alloc = viommu_domain_alloc, .domain_alloc = viommu_domain_alloc,
.domain_free = viommu_domain_free,
.attach_dev = viommu_attach_dev,
.map = viommu_map,
.unmap = viommu_unmap,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.probe_device = viommu_probe_device, .probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize, .probe_finalize = viommu_probe_finalize,
.release_device = viommu_release_device, .release_device = viommu_release_device,
...@@ -1022,6 +1016,14 @@ static struct iommu_ops viommu_ops = { ...@@ -1022,6 +1016,14 @@ static struct iommu_ops viommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate, .of_xlate = viommu_of_xlate,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = viommu_attach_dev,
.map = viommu_map,
.unmap = viommu_unmap,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.free = viommu_domain_free,
}
}; };
static int viommu_init_vqs(struct viommu_dev *viommu) static int viommu_init_vqs(struct viommu_dev *viommu)
......
...@@ -480,6 +480,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size) ...@@ -480,6 +480,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
struct file *file; struct file *file;
struct vduse_bounce_map *map; struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns; unsigned long pfn, bounce_pfns;
int ret;
bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT; bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
if (iova_limit <= bounce_size) if (iova_limit <= bounce_size)
...@@ -513,10 +514,20 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size) ...@@ -513,10 +514,20 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
spin_lock_init(&domain->iotlb_lock); spin_lock_init(&domain->iotlb_lock);
init_iova_domain(&domain->stream_iovad, init_iova_domain(&domain->stream_iovad,
PAGE_SIZE, IOVA_START_PFN); PAGE_SIZE, IOVA_START_PFN);
ret = iova_domain_init_rcaches(&domain->stream_iovad);
if (ret)
goto err_iovad_stream;
init_iova_domain(&domain->consistent_iovad, init_iova_domain(&domain->consistent_iovad,
PAGE_SIZE, bounce_pfns); PAGE_SIZE, bounce_pfns);
ret = iova_domain_init_rcaches(&domain->consistent_iovad);
if (ret)
goto err_iovad_consistent;
return domain; return domain;
err_iovad_consistent:
put_iova_domain(&domain->stream_iovad);
err_iovad_stream:
fput(file);
err_file: err_file:
vfree(domain->bounce_maps); vfree(domain->bounce_maps);
err_map: err_map:
......
...@@ -525,12 +525,6 @@ struct context_entry { ...@@ -525,12 +525,6 @@ struct context_entry {
*/ */
#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) #define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
/*
* Domain represents a virtual machine which demands iommu nested
* translation mode support.
*/
#define DOMAIN_FLAG_NESTING_MODE BIT(2)
struct dmar_domain { struct dmar_domain {
int nid; /* node id */ int nid; /* node id */
...@@ -548,7 +542,6 @@ struct dmar_domain { ...@@ -548,7 +542,6 @@ struct dmar_domain {
u8 iommu_snooping: 1; /* indicate snooping control feature */ u8 iommu_snooping: 1; /* indicate snooping control feature */
struct list_head devices; /* all devices' list */ struct list_head devices; /* all devices' list */
struct list_head subdevices; /* all subdevices' list */
struct iova_domain iovad; /* iova's that belong to this domain */ struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */ struct dma_pte *pgd; /* virtual address */
...@@ -563,11 +556,6 @@ struct dmar_domain { ...@@ -563,11 +556,6 @@ struct dmar_domain {
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
u64 max_addr; /* maximum mapped address */ u64 max_addr; /* maximum mapped address */
u32 default_pasid; /*
* The default pasid used for non-SVM
* traffic on mediated devices.
*/
struct iommu_domain domain; /* generic domain data structure for struct iommu_domain domain; /* generic domain data structure for
iommu core */ iommu core */
}; };
...@@ -620,21 +608,11 @@ struct intel_iommu { ...@@ -620,21 +608,11 @@ struct intel_iommu {
void *perf_statistic; void *perf_statistic;
}; };
/* Per subdevice private data */
struct subdev_domain_info {
struct list_head link_phys; /* link to phys device siblings */
struct list_head link_domain; /* link to domain siblings */
struct device *pdev; /* physical device derived from */
struct dmar_domain *domain; /* aux-domain */
int users; /* user count */
};
/* PCI domain-device relationship */ /* PCI domain-device relationship */
struct device_domain_info { struct device_domain_info {
struct list_head link; /* link to domain siblings */ struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */ struct list_head global; /* link to global list */
struct list_head table; /* link to pasid table */ struct list_head table; /* link to pasid table */
struct list_head subdevices; /* subdevices sibling */
u32 segment; /* PCI segment number */ u32 segment; /* PCI segment number */
u8 bus; /* PCI bus number */ u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */ u8 devfn; /* PCI devfn number */
...@@ -645,7 +623,6 @@ struct device_domain_info { ...@@ -645,7 +623,6 @@ struct device_domain_info {
u8 pri_enabled:1; u8 pri_enabled:1;
u8 ats_supported:1; u8 ats_supported:1;
u8 ats_enabled:1; u8 ats_enabled:1;
u8 auxd_enabled:1; /* Multiple domains per device */
u8 ats_qdep; u8 ats_qdep;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */ struct intel_iommu *iommu; /* IOMMU used by this device */
...@@ -765,9 +742,6 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); ...@@ -765,9 +742,6 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
extern void intel_svm_check(struct intel_iommu *iommu); extern void intel_svm_check(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu);
int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct iommu_gpasid_bind_data *data);
int intel_svm_unbind_gpasid(struct device *dev, u32 pasid);
struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
void *drvdata); void *drvdata);
void intel_svm_unbind(struct iommu_sva *handle); void intel_svm_unbind(struct iommu_sva *handle);
...@@ -795,7 +769,6 @@ struct intel_svm { ...@@ -795,7 +769,6 @@ struct intel_svm {
unsigned int flags; unsigned int flags;
u32 pasid; u32 pasid;
int gpasid; /* In case that guest PASID is different from host PASID */
struct list_head devs; struct list_head devs;
}; };
#else #else
......
...@@ -25,17 +25,5 @@ ...@@ -25,17 +25,5 @@
* do such IOTLB flushes automatically. * do such IOTLB flushes automatically.
*/ */
#define SVM_FLAG_SUPERVISOR_MODE BIT(0) #define SVM_FLAG_SUPERVISOR_MODE BIT(0)
/*
* The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest
* processes. Compared to the host bind, the primary differences are:
* 1. mm life cycle management
* 2. fault reporting
*/
#define SVM_FLAG_GUEST_MODE BIT(1)
/*
* The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space,
* which requires guest and host PASID translation at both directions.
*/
#define SVM_FLAG_GUEST_PASID BIT(2)
#endif /* __INTEL_SVM_H__ */ #endif /* __INTEL_SVM_H__ */
This diff is collapsed.
...@@ -21,18 +21,8 @@ struct iova { ...@@ -21,18 +21,8 @@ struct iova {
unsigned long pfn_lo; /* Lowest allocated pfn */ unsigned long pfn_lo; /* Lowest allocated pfn */
}; };
struct iova_magazine;
struct iova_cpu_rcache;
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ struct iova_rcache;
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
struct iova_rcache {
spinlock_t lock;
unsigned long depot_size;
struct iova_magazine *depot[MAX_GLOBAL_MAGS];
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
/* holds all the iova translations for a domain */ /* holds all the iova translations for a domain */
struct iova_domain { struct iova_domain {
...@@ -46,7 +36,7 @@ struct iova_domain { ...@@ -46,7 +36,7 @@ struct iova_domain {
unsigned long max32_alloc_size; /* Size of last failed allocation */ unsigned long max32_alloc_size; /* Size of last failed allocation */
struct iova anchor; /* rbtree lookup anchor */ struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ struct iova_rcache *rcaches;
struct hlist_node cpuhp_dead; struct hlist_node cpuhp_dead;
}; };
...@@ -102,6 +92,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, ...@@ -102,6 +92,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi); unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule, void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn); unsigned long start_pfn);
int iova_domain_init_rcaches(struct iova_domain *iovad);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad); void put_iova_domain(struct iova_domain *iovad);
#else #else
......
...@@ -158,185 +158,4 @@ struct iommu_page_response { ...@@ -158,185 +158,4 @@ struct iommu_page_response {
__u32 code; __u32 code;
}; };
/* defines the granularity of the invalidation */
enum iommu_inv_granularity {
IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
};
/**
* struct iommu_inv_addr_info - Address Selective Invalidation Structure
*
* @flags: indicates the granularity of the address-selective invalidation
* - If the PASID bit is set, the @pasid field is populated and the invalidation
* relates to cache entries tagged with this PASID and matching the address
* range.
* - If ARCHID bit is set, @archid is populated and the invalidation relates
* to cache entries tagged with this architecture specific ID and matching
* the address range.
* - Both PASID and ARCHID can be set as they may tag different caches.
* - If neither PASID or ARCHID is set, global addr invalidation applies.
* - The LEAF flag indicates whether only the leaf PTE caching needs to be
* invalidated and other paging structure caches can be preserved.
* @pasid: process address space ID
* @archid: architecture-specific ID
* @addr: first stage/level input address
* @granule_size: page/block size of the mapping in bytes
* @nb_granules: number of contiguous granules to be invalidated
*/
struct iommu_inv_addr_info {
#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
__u32 flags;
__u32 archid;
__u64 pasid;
__u64 addr;
__u64 granule_size;
__u64 nb_granules;
};
/**
* struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
*
* @flags: indicates the granularity of the PASID-selective invalidation
* - If the PASID bit is set, the @pasid field is populated and the invalidation
* relates to cache entries tagged with this PASID and matching the address
* range.
* - If the ARCHID bit is set, the @archid is populated and the invalidation
* relates to cache entries tagged with this architecture specific ID and
* matching the address range.
* - Both PASID and ARCHID can be set as they may tag different caches.
* - At least one of PASID or ARCHID must be set.
* @pasid: process address space ID
* @archid: architecture-specific ID
*/
struct iommu_inv_pasid_info {
#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
__u32 flags;
__u32 archid;
__u64 pasid;
};
/**
* struct iommu_cache_invalidate_info - First level/stage invalidation
* information
* @argsz: User filled size of this data
* @version: API version of this structure
* @cache: bitfield that allows to select which caches to invalidate
* @granularity: defines the lowest granularity used for the invalidation:
* domain > PASID > addr
* @padding: reserved for future use (should be zero)
* @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
* @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
*
* Not all the combinations of cache/granularity are valid:
*
* +--------------+---------------+---------------+---------------+
* | type / | DEV_IOTLB | IOTLB | PASID |
* | granularity | | | cache |
* +==============+===============+===============+===============+
* | DOMAIN | N/A | Y | Y |
* +--------------+---------------+---------------+---------------+
* | PASID | Y | Y | Y |
* +--------------+---------------+---------------+---------------+
* | ADDR | Y | Y | N/A |
* +--------------+---------------+---------------+---------------+
*
* Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
* @version and @cache.
*
* If multiple cache types are invalidated simultaneously, they all
* must support the used granularity.
*/
struct iommu_cache_invalidate_info {
__u32 argsz;
#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
__u32 version;
/* IOMMU paging structure cache */
#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
#define IOMMU_CACHE_INV_TYPE_NR (3)
__u8 cache;
__u8 granularity;
__u8 padding[6];
union {
struct iommu_inv_pasid_info pasid_info;
struct iommu_inv_addr_info addr_info;
} granu;
};
/**
* struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
* SVA binding.
*
* @flags: VT-d PASID table entry attributes
* @pat: Page attribute table data to compute effective memory type
* @emt: Extended memory type
*
* Only guest vIOMMU selectable and effective options are passed down to
* the host IOMMU.
*/
struct iommu_gpasid_bind_data_vtd {
#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
#define IOMMU_SVA_VTD_GPASID_WPE (1 << 6) /* Write protect enable */
#define IOMMU_SVA_VTD_GPASID_LAST (1 << 7)
__u64 flags;
__u32 pat;
__u32 emt;
};
#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \
IOMMU_SVA_VTD_GPASID_EMTE | \
IOMMU_SVA_VTD_GPASID_PCD | \
IOMMU_SVA_VTD_GPASID_PWT)
/**
* struct iommu_gpasid_bind_data - Information about device and guest PASID binding
* @argsz: User filled size of this data
* @version: Version of this data structure
* @format: PASID table entry format
* @flags: Additional information on guest bind request
* @gpgd: Guest page directory base of the guest mm to bind
* @hpasid: Process address space ID used for the guest mm in host IOMMU
* @gpasid: Process address space ID used for the guest mm in guest IOMMU
* @addr_width: Guest virtual address width
* @padding: Reserved for future use (should be zero)
* @vtd: Intel VT-d specific data
*
* Guest to host PASID mapping can be an identity or non-identity, where guest
* has its own PASID space. For non-identify mapping, guest to host PASID lookup
* is needed when VM programs guest PASID into an assigned device. VMM may
* trap such PASID programming then request host IOMMU driver to convert guest
* PASID to host PASID based on this bind data.
*/
struct iommu_gpasid_bind_data {
__u32 argsz;
#define IOMMU_GPASID_BIND_VERSION_1 1
__u32 version;
#define IOMMU_PASID_FORMAT_INTEL_VTD 1
#define IOMMU_PASID_FORMAT_LAST 2
__u32 format;
__u32 addr_width;
#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
__u64 flags;
__u64 gpgd;
__u64 hpasid;
__u64 gpasid;
__u8 padding[8];
/* Vendor specific data */
union {
struct iommu_gpasid_bind_data_vtd vtd;
} vendor;
};
#endif /* _UAPI_IOMMU_H */ #endif /* _UAPI_IOMMU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment