Commit fb4e3bee authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "This update comes with:

   - Support for lockless operation in the ARM io-pgtable code.

     This is an important step to solve the scalability problems in the
     common dma-iommu code for ARM

   - Some Errata workarounds for ARM SMMU implemenations

   - Rewrite of the deferred IO/TLB flush code in the AMD IOMMU driver.

     The code suffered from very high flush rates, with the new
     implementation the flush rate is down to ~1% of what it was before

   - Support for amd_iommu=off when booting with kexec.

     The problem here was that the IOMMU driver bailed out early without
     disabling the iommu hardware, if it was enabled in the old kernel

   - The Rockchip IOMMU driver is now available on ARM64

   - Align the return value of the iommu_ops->device_group call-backs to
     not miss error values

   - Preempt-disable optimizations in the Intel VT-d and common IOVA
     code to help Linux-RT

   - Various other small cleanups and fixes"

* tag 'iommu-updates-v4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (60 commits)
  iommu/vt-d: Constify intel_dma_ops
  iommu: Warn once when device_group callback returns NULL
  iommu/omap: Return ERR_PTR in device_group call-back
  iommu: Return ERR_PTR() values from device_group call-backs
  iommu/s390: Use iommu_group_get_for_dev() in s390_iommu_add_device()
  iommu/vt-d: Don't disable preemption while accessing deferred_flush()
  iommu/iova: Don't disable preempt around this_cpu_ptr()
  iommu/arm-smmu-v3: Add workaround for Cavium ThunderX2 erratum #126
  iommu/arm-smmu-v3: Enable ACPI based HiSilicon CMD_PREFETCH quirk(erratum 161010701)
  iommu/arm-smmu-v3: Add workaround for Cavium ThunderX2 erratum #74
  ACPI/IORT: Fixup SMMUv3 resource size for Cavium ThunderX2 SMMUv3 model
  iommu/arm-smmu-v3, acpi: Add temporary Cavium SMMU-V3 IORT model number definitions
  iommu/io-pgtable-arm: Use dma_wmb() instead of wmb() when publishing table
  iommu/io-pgtable: depend on !GENERIC_ATOMIC64 when using COMPILE_TEST with LPAE
  iommu/arm-smmu-v3: Remove io-pgtable spinlock
  iommu/arm-smmu: Remove io-pgtable spinlock
  iommu/io-pgtable-arm-v7s: Support lockless operation
  iommu/io-pgtable-arm: Support lockless operation
  iommu/io-pgtable: Introduce explicit coherency
  iommu/io-pgtable-arm-v7s: Refactor split_blk_unmap
  ...
parents 6b1c776d 6a708643
...@@ -61,12 +61,15 @@ stable kernels. ...@@ -61,12 +61,15 @@ stable kernels.
| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | | Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
| Cavium | ThunderX SMMUv2 | #27704 | N/A |
| Cavium | ThunderX Core | #30115 | CAVIUM_ERRATUM_30115 | | Cavium | ThunderX Core | #30115 | CAVIUM_ERRATUM_30115 |
| Cavium | ThunderX SMMUv2 | #27704 | N/A |
| Cavium | ThunderX2 SMMUv3| #74 | N/A |
| Cavium | ThunderX2 SMMUv3| #126 | N/A |
| | | | | | | | | |
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
| | | | | | | | | |
| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
| | | | | | | | | |
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
......
...@@ -26,6 +26,12 @@ the PCIe specification. ...@@ -26,6 +26,12 @@ the PCIe specification.
* "priq" - PRI Queue not empty * "priq" - PRI Queue not empty
* "cmdq-sync" - CMD_SYNC complete * "cmdq-sync" - CMD_SYNC complete
* "gerror" - Global Error activated * "gerror" - Global Error activated
* "combined" - The combined interrupt is optional,
and should only be provided if the
hardware supports just a single,
combined interrupt line.
If provided, then the combined interrupt
will be used in preference to any others.
- #iommu-cells : See the generic IOMMU binding described in - #iommu-cells : See the generic IOMMU binding described in
devicetree/bindings/pci/pci-iommu.txt devicetree/bindings/pci/pci-iommu.txt
...@@ -49,6 +55,12 @@ the PCIe specification. ...@@ -49,6 +55,12 @@ the PCIe specification.
- hisilicon,broken-prefetch-cmd - hisilicon,broken-prefetch-cmd
: Avoid sending CMD_PREFETCH_* commands to the SMMU. : Avoid sending CMD_PREFETCH_* commands to the SMMU.
- cavium,cn9900-broken-page1-regspace
: Replaces all page 1 offsets used for EVTQ_PROD/CONS,
PRIQ_PROD/CONS register access with page 0 offsets.
Set for Cavium ThunderX2 silicon that doesn't support
SMMU page1 register space.
** Example ** Example
smmu@2b400000 { smmu@2b400000 {
......
...@@ -31,6 +31,11 @@ ...@@ -31,6 +31,11 @@
#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
(1 << ACPI_IORT_NODE_SMMU_V3)) (1 << ACPI_IORT_NODE_SMMU_V3))
/* Until ACPICA headers cover IORT rev. C */
#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
#endif
struct iort_its_msi_chip { struct iort_its_msi_chip {
struct list_head list; struct list_head list;
struct fwnode_handle *fw_node; struct fwnode_handle *fw_node;
...@@ -819,6 +824,36 @@ static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) ...@@ -819,6 +824,36 @@ static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
return num_res; return num_res;
} }
static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
{
/*
* Cavium ThunderX2 implementation doesn't not support unique
* irq line. Use single irq line for all the SMMUv3 interrupts.
*/
if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
return false;
/*
* ThunderX2 doesn't support MSIs from the SMMU, so we're checking
* SPI numbers here.
*/
return smmu->event_gsiv == smmu->pri_gsiv &&
smmu->event_gsiv == smmu->gerr_gsiv &&
smmu->event_gsiv == smmu->sync_gsiv;
}
static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
{
/*
* Override the size, for Cavium ThunderX2 implementation
* which doesn't support the page 1 SMMU register space.
*/
if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
return SZ_64K;
return SZ_128K;
}
static void __init arm_smmu_v3_init_resources(struct resource *res, static void __init arm_smmu_v3_init_resources(struct resource *res,
struct acpi_iort_node *node) struct acpi_iort_node *node)
{ {
...@@ -829,30 +864,38 @@ static void __init arm_smmu_v3_init_resources(struct resource *res, ...@@ -829,30 +864,38 @@ static void __init arm_smmu_v3_init_resources(struct resource *res,
smmu = (struct acpi_iort_smmu_v3 *)node->node_data; smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
res[num_res].start = smmu->base_address; res[num_res].start = smmu->base_address;
res[num_res].end = smmu->base_address + SZ_128K - 1; res[num_res].end = smmu->base_address +
arm_smmu_v3_resource_size(smmu) - 1;
res[num_res].flags = IORESOURCE_MEM; res[num_res].flags = IORESOURCE_MEM;
num_res++; num_res++;
if (arm_smmu_v3_is_combined_irq(smmu)) {
if (smmu->event_gsiv)
acpi_iort_register_irq(smmu->event_gsiv, "combined",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
} else {
if (smmu->event_gsiv) if (smmu->event_gsiv)
acpi_iort_register_irq(smmu->event_gsiv, "eventq", acpi_iort_register_irq(smmu->event_gsiv, "eventq",
ACPI_EDGE_SENSITIVE, ACPI_EDGE_SENSITIVE,
&res[num_res++]); &res[num_res++]);
if (smmu->pri_gsiv) if (smmu->pri_gsiv)
acpi_iort_register_irq(smmu->pri_gsiv, "priq", acpi_iort_register_irq(smmu->pri_gsiv, "priq",
ACPI_EDGE_SENSITIVE, ACPI_EDGE_SENSITIVE,
&res[num_res++]); &res[num_res++]);
if (smmu->gerr_gsiv) if (smmu->gerr_gsiv)
acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
ACPI_EDGE_SENSITIVE, ACPI_EDGE_SENSITIVE,
&res[num_res++]); &res[num_res++]);
if (smmu->sync_gsiv) if (smmu->sync_gsiv)
acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
ACPI_EDGE_SENSITIVE, ACPI_EDGE_SENSITIVE,
&res[num_res++]); &res[num_res++]);
}
} }
static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
......
...@@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE ...@@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE
config IOMMU_IO_PGTABLE_LPAE config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format" bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE select IOMMU_IO_PGTABLE
depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) depends on HAS_DMA && (ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64))
help help
Enable support for the ARM long descriptor pagetable format. Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
...@@ -219,7 +219,7 @@ config OMAP_IOMMU_DEBUG ...@@ -219,7 +219,7 @@ config OMAP_IOMMU_DEBUG
config ROCKCHIP_IOMMU config ROCKCHIP_IOMMU
bool "Rockchip IOMMU Support" bool "Rockchip IOMMU Support"
depends on ARM depends on ARM || ARM64
depends on ARCH_ROCKCHIP || COMPILE_TEST depends on ARCH_ROCKCHIP || COMPILE_TEST
select IOMMU_API select IOMMU_API
select ARM_DMA_USE_IOMMU select ARM_DMA_USE_IOMMU
...@@ -274,7 +274,7 @@ config EXYNOS_IOMMU_DEBUG ...@@ -274,7 +274,7 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU" bool "Renesas VMSA-compatible IPMMU"
depends on ARM_LPAE depends on ARM || IOMMU_DMA
depends on ARCH_RENESAS || COMPILE_TEST depends on ARCH_RENESAS || COMPILE_TEST
select IOMMU_API select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE select IOMMU_IO_PGTABLE_LPAE
......
This diff is collapsed.
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/crash_dump.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/gart.h> #include <asm/gart.h>
...@@ -236,6 +237,7 @@ enum iommu_init_state { ...@@ -236,6 +237,7 @@ enum iommu_init_state {
IOMMU_INITIALIZED, IOMMU_INITIALIZED,
IOMMU_NOT_FOUND, IOMMU_NOT_FOUND,
IOMMU_INIT_ERROR, IOMMU_INIT_ERROR,
IOMMU_CMDLINE_DISABLED,
}; };
/* Early ioapic and hpet maps from kernel command line */ /* Early ioapic and hpet maps from kernel command line */
...@@ -588,6 +590,8 @@ void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) ...@@ -588,6 +590,8 @@ void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
iommu->cmd_buf_head = 0;
iommu->cmd_buf_tail = 0;
iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
} }
...@@ -1898,6 +1902,14 @@ static void init_device_table_dma(void) ...@@ -1898,6 +1902,14 @@ static void init_device_table_dma(void)
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
set_dev_entry_bit(devid, DEV_ENTRY_VALID); set_dev_entry_bit(devid, DEV_ENTRY_VALID);
set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
/*
* In kdump kernels in-flight DMA from the old kernel might
* cause IO_PAGE_FAULTs. There are no reports that a kdump
* actually failed because of that, so just disable fault
* reporting in the hardware to get rid of the messages
*/
if (is_kdump_kernel())
set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);
} }
} }
...@@ -2097,23 +2109,27 @@ static struct syscore_ops amd_iommu_syscore_ops = { ...@@ -2097,23 +2109,27 @@ static struct syscore_ops amd_iommu_syscore_ops = {
.resume = amd_iommu_resume, .resume = amd_iommu_resume,
}; };
static void __init free_on_init_error(void) static void __init free_iommu_resources(void)
{ {
kmemleak_free(irq_lookup_table); kmemleak_free(irq_lookup_table);
free_pages((unsigned long)irq_lookup_table, free_pages((unsigned long)irq_lookup_table,
get_order(rlookup_table_size)); get_order(rlookup_table_size));
irq_lookup_table = NULL;
kmem_cache_destroy(amd_iommu_irq_cache); kmem_cache_destroy(amd_iommu_irq_cache);
amd_iommu_irq_cache = NULL; amd_iommu_irq_cache = NULL;
free_pages((unsigned long)amd_iommu_rlookup_table, free_pages((unsigned long)amd_iommu_rlookup_table,
get_order(rlookup_table_size)); get_order(rlookup_table_size));
amd_iommu_rlookup_table = NULL;
free_pages((unsigned long)amd_iommu_alias_table, free_pages((unsigned long)amd_iommu_alias_table,
get_order(alias_table_size)); get_order(alias_table_size));
amd_iommu_alias_table = NULL;
free_pages((unsigned long)amd_iommu_dev_table, free_pages((unsigned long)amd_iommu_dev_table,
get_order(dev_table_size)); get_order(dev_table_size));
amd_iommu_dev_table = NULL;
free_iommu_all(); free_iommu_all();
...@@ -2183,6 +2199,7 @@ static void __init free_dma_resources(void) ...@@ -2183,6 +2199,7 @@ static void __init free_dma_resources(void)
{ {
free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
get_order(MAX_DOMAIN_ID/8)); get_order(MAX_DOMAIN_ID/8));
amd_iommu_pd_alloc_bitmap = NULL;
free_unity_maps(); free_unity_maps();
} }
...@@ -2307,6 +2324,9 @@ static int __init early_amd_iommu_init(void) ...@@ -2307,6 +2324,9 @@ static int __init early_amd_iommu_init(void)
if (ret) if (ret)
goto out; goto out;
/* Disable any previously enabled IOMMUs */
disable_iommus();
if (amd_iommu_irq_remap) if (amd_iommu_irq_remap)
amd_iommu_irq_remap = check_ioapic_information(); amd_iommu_irq_remap = check_ioapic_information();
...@@ -2410,6 +2430,13 @@ static int __init state_next(void) ...@@ -2410,6 +2430,13 @@ static int __init state_next(void)
case IOMMU_IVRS_DETECTED: case IOMMU_IVRS_DETECTED:
ret = early_amd_iommu_init(); ret = early_amd_iommu_init();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n");
free_dma_resources();
free_iommu_resources();
init_state = IOMMU_CMDLINE_DISABLED;
ret = -EINVAL;
}
break; break;
case IOMMU_ACPI_FINISHED: case IOMMU_ACPI_FINISHED:
early_enable_iommus(); early_enable_iommus();
...@@ -2438,6 +2465,7 @@ static int __init state_next(void) ...@@ -2438,6 +2465,7 @@ static int __init state_next(void)
break; break;
case IOMMU_NOT_FOUND: case IOMMU_NOT_FOUND:
case IOMMU_INIT_ERROR: case IOMMU_INIT_ERROR:
case IOMMU_CMDLINE_DISABLED:
/* Error states => do nothing */ /* Error states => do nothing */
ret = -EINVAL; ret = -EINVAL;
break; break;
...@@ -2451,13 +2479,14 @@ static int __init state_next(void) ...@@ -2451,13 +2479,14 @@ static int __init state_next(void)
static int __init iommu_go_to_state(enum iommu_init_state state) static int __init iommu_go_to_state(enum iommu_init_state state)
{ {
int ret = 0; int ret = -EINVAL;
while (init_state != state) { while (init_state != state) {
ret = state_next(); if (init_state == IOMMU_NOT_FOUND ||
if (init_state == IOMMU_NOT_FOUND || init_state == IOMMU_INIT_ERROR ||
init_state == IOMMU_INIT_ERROR) init_state == IOMMU_CMDLINE_DISABLED)
break; break;
ret = state_next();
} }
return ret; return ret;
...@@ -2522,7 +2551,7 @@ static int __init amd_iommu_init(void) ...@@ -2522,7 +2551,7 @@ static int __init amd_iommu_init(void)
free_dma_resources(); free_dma_resources();
if (!irq_remapping_enabled) { if (!irq_remapping_enabled) {
disable_iommus(); disable_iommus();
free_on_init_error(); free_iommu_resources();
} else { } else {
struct amd_iommu *iommu; struct amd_iommu *iommu;
...@@ -2549,9 +2578,6 @@ int __init amd_iommu_detect(void) ...@@ -2549,9 +2578,6 @@ int __init amd_iommu_detect(void)
if (no_iommu || (iommu_detected && !gart_iommu_aperture)) if (no_iommu || (iommu_detected && !gart_iommu_aperture))
return -ENODEV; return -ENODEV;
if (amd_iommu_disabled)
return -ENODEV;
ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
if (ret) if (ret)
return ret; return ret;
......
...@@ -322,6 +322,7 @@ ...@@ -322,6 +322,7 @@
#define IOMMU_PTE_IW (1ULL << 62) #define IOMMU_PTE_IW (1ULL << 62)
#define DTE_FLAG_IOTLB (1ULL << 32) #define DTE_FLAG_IOTLB (1ULL << 32)
#define DTE_FLAG_SA (1ULL << 34)
#define DTE_FLAG_GV (1ULL << 55) #define DTE_FLAG_GV (1ULL << 55)
#define DTE_FLAG_MASK (0x3ffULL << 32) #define DTE_FLAG_MASK (0x3ffULL << 32)
#define DTE_GLX_SHIFT (56) #define DTE_GLX_SHIFT (56)
...@@ -516,6 +517,8 @@ struct amd_iommu { ...@@ -516,6 +517,8 @@ struct amd_iommu {
/* command buffer virtual address */ /* command buffer virtual address */
u8 *cmd_buf; u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
/* event buffer virtual address */ /* event buffer virtual address */
u8 *evt_buf; u8 *evt_buf;
......
This diff is collapsed.
...@@ -312,6 +312,14 @@ enum arm_smmu_implementation { ...@@ -312,6 +312,14 @@ enum arm_smmu_implementation {
CAVIUM_SMMUV2, CAVIUM_SMMUV2,
}; };
/* Until ACPICA headers cover IORT rev. C */
#ifndef ACPI_IORT_SMMU_CORELINK_MMU401
#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4
#endif
#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5
#endif
struct arm_smmu_s2cr { struct arm_smmu_s2cr {
struct iommu_group *group; struct iommu_group *group;
int count; int count;
...@@ -425,10 +433,10 @@ enum arm_smmu_domain_stage { ...@@ -425,10 +433,10 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain { struct arm_smmu_domain {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
spinlock_t pgtbl_lock;
struct arm_smmu_cfg cfg; struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage; enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */ struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops */
struct iommu_domain domain; struct iommu_domain domain;
}; };
...@@ -1010,6 +1018,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -1010,6 +1018,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.iommu_dev = smmu->dev, .iommu_dev = smmu->dev,
}; };
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
smmu_domain->smmu = smmu; smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) { if (!pgtbl_ops) {
...@@ -1102,7 +1113,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) ...@@ -1102,7 +1113,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
} }
mutex_init(&smmu_domain->init_mutex); mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock); spin_lock_init(&smmu_domain->cb_lock);
return &smmu_domain->domain; return &smmu_domain->domain;
} }
...@@ -1380,35 +1391,23 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1380,35 +1391,23 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot)
{ {
int ret; struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops) if (!ops)
return -ENODEV; return -ENODEV;
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); return ops->map(ops, iova, paddr, size, prot);
ret = ops->map(ops, iova, paddr, size, prot);
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
} }
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size) size_t size)
{ {
size_t ret; struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops) if (!ops)
return 0; return 0;
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); return ops->unmap(ops, iova, size);
ret = ops->unmap(ops, iova, size);
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
} }
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
...@@ -1422,10 +1421,11 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, ...@@ -1422,10 +1421,11 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
void __iomem *cb_base; void __iomem *cb_base;
u32 tmp; u32 tmp;
u64 phys; u64 phys;
unsigned long va; unsigned long va, flags;
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
/* ATS1 registers can only be written atomically */ /* ATS1 registers can only be written atomically */
va = iova & ~0xfffUL; va = iova & ~0xfffUL;
if (smmu->version == ARM_SMMU_V2) if (smmu->version == ARM_SMMU_V2)
...@@ -1435,6 +1435,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, ...@@ -1435,6 +1435,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
!(tmp & ATSR_ACTIVE), 5, 50)) { !(tmp & ATSR_ACTIVE), 5, 50)) {
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
dev_err(dev, dev_err(dev,
"iova to phys timed out on %pad. Falling back to software table walk.\n", "iova to phys timed out on %pad. Falling back to software table walk.\n",
&iova); &iova);
...@@ -1442,6 +1443,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, ...@@ -1442,6 +1443,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
} }
phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR); phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
if (phys & CB_PAR_F) { if (phys & CB_PAR_F) {
dev_err(dev, "translation fault!\n"); dev_err(dev, "translation fault!\n");
dev_err(dev, "PAR = 0x%llx\n", phys); dev_err(dev, "PAR = 0x%llx\n", phys);
...@@ -1454,10 +1456,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, ...@@ -1454,10 +1456,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
phys_addr_t ret;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
if (domain->type == IOMMU_DOMAIN_IDENTITY) if (domain->type == IOMMU_DOMAIN_IDENTITY)
return iova; return iova;
...@@ -1465,17 +1465,11 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, ...@@ -1465,17 +1465,11 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
if (!ops) if (!ops)
return 0; return 0;
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
ret = arm_smmu_iova_to_phys_hard(domain, iova); return arm_smmu_iova_to_phys_hard(domain, iova);
} else {
ret = ops->iova_to_phys(ops, iova);
}
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret; return ops->iova_to_phys(ops, iova);
} }
static bool arm_smmu_capable(enum iommu_cap cap) static bool arm_smmu_capable(enum iommu_cap cap)
...@@ -2073,6 +2067,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) ...@@ -2073,6 +2067,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
smmu->version = ARM_SMMU_V1; smmu->version = ARM_SMMU_V1;
smmu->model = GENERIC_SMMU; smmu->model = GENERIC_SMMU;
break; break;
case ACPI_IORT_SMMU_CORELINK_MMU401:
smmu->version = ARM_SMMU_V1_64K;
smmu->model = GENERIC_SMMU;
break;
case ACPI_IORT_SMMU_V2: case ACPI_IORT_SMMU_V2:
smmu->version = ARM_SMMU_V2; smmu->version = ARM_SMMU_V2;
smmu->model = GENERIC_SMMU; smmu->model = GENERIC_SMMU;
...@@ -2081,6 +2079,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) ...@@ -2081,6 +2079,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
smmu->version = ARM_SMMU_V2; smmu->version = ARM_SMMU_V2;
smmu->model = ARM_MMU500; smmu->model = ARM_MMU500;
break; break;
case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
smmu->version = ARM_SMMU_V2;
smmu->model = CAVIUM_SMMUV2;
break;
default: default:
ret = -ENODEV; ret = -ENODEV;
} }
......
...@@ -316,7 +316,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -316,7 +316,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
* If we have devices with different DMA masks, move the free * If we have devices with different DMA masks, move the free
* area cache limit down for the benefit of the smaller one. * area cache limit down for the benefit of the smaller one.
*/ */
iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn);
return 0; return 0;
} }
......
...@@ -481,7 +481,7 @@ struct deferred_flush_data { ...@@ -481,7 +481,7 @@ struct deferred_flush_data {
struct deferred_flush_table *tables; struct deferred_flush_table *tables;
}; };
DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
/* bitmap for indexing intel_iommus */ /* bitmap for indexing intel_iommus */
static int g_num_of_iommus; static int g_num_of_iommus;
...@@ -2390,7 +2390,7 @@ static struct dmar_domain *find_domain(struct device *dev) ...@@ -2390,7 +2390,7 @@ static struct dmar_domain *find_domain(struct device *dev)
/* No lock here, assumes no domain exit in normal case */ /* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu; info = dev->archdata.iommu;
if (info) if (likely(info))
return info->domain; return info->domain;
return NULL; return NULL;
} }
...@@ -3478,7 +3478,7 @@ static unsigned long intel_alloc_iova(struct device *dev, ...@@ -3478,7 +3478,7 @@ static unsigned long intel_alloc_iova(struct device *dev,
return iova_pfn; return iova_pfn;
} }
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
{ {
struct dmar_domain *domain, *tmp; struct dmar_domain *domain, *tmp;
struct dmar_rmrr_unit *rmrr; struct dmar_rmrr_unit *rmrr;
...@@ -3525,18 +3525,6 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) ...@@ -3525,18 +3525,6 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
return domain; return domain;
} }
static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
{
struct device_domain_info *info;
/* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu;
if (likely(info))
return info->domain;
return __get_valid_domain_for_dev(dev);
}
/* Check if the dev needs to go through non-identity map and unmap process.*/ /* Check if the dev needs to go through non-identity map and unmap process.*/
static int iommu_no_mapping(struct device *dev) static int iommu_no_mapping(struct device *dev)
{ {
...@@ -3725,10 +3713,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, ...@@ -3725,10 +3713,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
struct intel_iommu *iommu; struct intel_iommu *iommu;
struct deferred_flush_entry *entry; struct deferred_flush_entry *entry;
struct deferred_flush_data *flush_data; struct deferred_flush_data *flush_data;
unsigned int cpuid;
cpuid = get_cpu(); flush_data = raw_cpu_ptr(&deferred_flush);
flush_data = per_cpu_ptr(&deferred_flush, cpuid);
/* Flush all CPUs' entries to avoid deferring too much. If /* Flush all CPUs' entries to avoid deferring too much. If
* this becomes a bottleneck, can just flush us, and rely on * this becomes a bottleneck, can just flush us, and rely on
...@@ -3761,8 +3747,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, ...@@ -3761,8 +3747,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
} }
flush_data->size++; flush_data->size++;
spin_unlock_irqrestore(&flush_data->lock, flags); spin_unlock_irqrestore(&flush_data->lock, flags);
put_cpu();
} }
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
...@@ -3973,7 +3957,7 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -3973,7 +3957,7 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
return !dma_addr; return !dma_addr;
} }
struct dma_map_ops intel_dma_ops = { const struct dma_map_ops intel_dma_ops = {
.alloc = intel_alloc_coherent, .alloc = intel_alloc_coherent,
.free = intel_free_coherent, .free = intel_free_coherent,
.map_sg = intel_map_sg, .map_sg = intel_map_sg,
......
...@@ -489,6 +489,36 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) ...@@ -489,6 +489,36 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
} }
EXPORT_SYMBOL_GPL(intel_svm_unbind_mm); EXPORT_SYMBOL_GPL(intel_svm_unbind_mm);
int intel_svm_is_pasid_valid(struct device *dev, int pasid)
{
struct intel_iommu *iommu;
struct intel_svm *svm;
int ret = -EINVAL;
mutex_lock(&pasid_mutex);
iommu = intel_svm_device_to_iommu(dev);
if (!iommu || !iommu->pasid_table)
goto out;
svm = idr_find(&iommu->pasid_idr, pasid);
if (!svm)
goto out;
/* init_mm is used in this case */
if (!svm->mm)
ret = 1;
else if (atomic_read(&svm->mm->mm_users) > 0)
ret = 1;
else
ret = 0;
out:
mutex_unlock(&pasid_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid);
/* Page request queue descriptor */ /* Page request queue descriptor */
struct page_req_dsc { struct page_req_dsc {
u64 srr:1; u64 srr:1;
......
...@@ -76,7 +76,7 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS]; ...@@ -76,7 +76,7 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS];
* the dmar_global_lock. * the dmar_global_lock.
*/ */
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static struct irq_domain_ops intel_ir_domain_ops; static const struct irq_domain_ops intel_ir_domain_ops;
static void iommu_disable_irq_remapping(struct intel_iommu *iommu); static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
static int __init parse_ioapics_under_ir(void); static int __init parse_ioapics_under_ir(void);
...@@ -1407,7 +1407,7 @@ static void intel_irq_remapping_deactivate(struct irq_domain *domain, ...@@ -1407,7 +1407,7 @@ static void intel_irq_remapping_deactivate(struct irq_domain *domain,
modify_irte(&data->irq_2_iommu, &entry); modify_irte(&data->irq_2_iommu, &entry);
} }
static struct irq_domain_ops intel_ir_domain_ops = { static const struct irq_domain_ops intel_ir_domain_ops = {
.alloc = intel_irq_remapping_alloc, .alloc = intel_irq_remapping_alloc,
.free = intel_irq_remapping_free, .free = intel_irq_remapping_free,
.activate = intel_irq_remapping_activate, .activate = intel_irq_remapping_activate,
......
This diff is collapsed.
This diff is collapsed.
...@@ -65,11 +65,17 @@ struct io_pgtable_cfg { ...@@ -65,11 +65,17 @@ struct io_pgtable_cfg {
* PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
* when the SoC is in "4GB mode" and they can only access the high * when the SoC is in "4GB mode" and they can only access the high
* remap of DRAM (0x1_00000000 to 0x1_ffffffff). * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
*
* IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
* software-emulated IOMMU), such that pagetable updates need not
* be treated as explicit DMA data.
*/ */
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
unsigned long quirks; unsigned long quirks;
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
unsigned int ias; unsigned int ias;
......
...@@ -915,13 +915,7 @@ static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) ...@@ -915,13 +915,7 @@ static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
*/ */
struct iommu_group *generic_device_group(struct device *dev) struct iommu_group *generic_device_group(struct device *dev)
{ {
struct iommu_group *group; return iommu_group_alloc();
group = iommu_group_alloc();
if (IS_ERR(group))
return NULL;
return group;
} }
/* /*
...@@ -988,11 +982,7 @@ struct iommu_group *pci_device_group(struct device *dev) ...@@ -988,11 +982,7 @@ struct iommu_group *pci_device_group(struct device *dev)
return group; return group;
/* No shared group found, allocate new */ /* No shared group found, allocate new */
group = iommu_group_alloc(); return iommu_group_alloc();
if (IS_ERR(group))
return NULL;
return group;
} }
/** /**
...@@ -1020,6 +1010,9 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) ...@@ -1020,6 +1010,9 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (ops && ops->device_group) if (ops && ops->device_group)
group = ops->device_group(dev); group = ops->device_group(dev);
if (WARN_ON_ONCE(group == NULL))
return ERR_PTR(-EINVAL);
if (IS_ERR(group)) if (IS_ERR(group))
return group; return group;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/cpu.h>
static bool iova_rcache_insert(struct iova_domain *iovad, static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn, unsigned long pfn,
...@@ -48,7 +49,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, ...@@ -48,7 +49,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->cached32_node = NULL; iovad->cached32_node = NULL;
iovad->granule = granule; iovad->granule = granule;
iovad->start_pfn = start_pfn; iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit; iovad->dma_32bit_pfn = pfn_32bit + 1;
init_iova_rcaches(iovad); init_iova_rcaches(iovad);
} }
EXPORT_SYMBOL_GPL(init_iova_domain); EXPORT_SYMBOL_GPL(init_iova_domain);
...@@ -63,7 +64,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) ...@@ -63,7 +64,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
struct rb_node *prev_node = rb_prev(iovad->cached32_node); struct rb_node *prev_node = rb_prev(iovad->cached32_node);
struct iova *curr_iova = struct iova *curr_iova =
rb_entry(iovad->cached32_node, struct iova, node); rb_entry(iovad->cached32_node, struct iova, node);
*limit_pfn = curr_iova->pfn_lo - 1; *limit_pfn = curr_iova->pfn_lo;
return prev_node; return prev_node;
} }
} }
...@@ -135,7 +136,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova, ...@@ -135,7 +136,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
static unsigned int static unsigned int
iova_get_pad_size(unsigned int size, unsigned int limit_pfn) iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
{ {
return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); return (limit_pfn - size) & (__roundup_pow_of_two(size) - 1);
} }
static int __alloc_and_insert_iova_range(struct iova_domain *iovad, static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
...@@ -155,18 +156,15 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, ...@@ -155,18 +156,15 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
while (curr) { while (curr) {
struct iova *curr_iova = rb_entry(curr, struct iova, node); struct iova *curr_iova = rb_entry(curr, struct iova, node);
if (limit_pfn < curr_iova->pfn_lo) if (limit_pfn <= curr_iova->pfn_lo) {
goto move_left; goto move_left;
else if (limit_pfn < curr_iova->pfn_hi) } else if (limit_pfn > curr_iova->pfn_hi) {
goto adjust_limit_pfn;
else {
if (size_aligned) if (size_aligned)
pad_size = iova_get_pad_size(size, limit_pfn); pad_size = iova_get_pad_size(size, limit_pfn);
if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) if ((curr_iova->pfn_hi + size + pad_size) < limit_pfn)
break; /* found a free slot */ break; /* found a free slot */
} }
adjust_limit_pfn: limit_pfn = curr_iova->pfn_lo;
limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
move_left: move_left:
prev = curr; prev = curr;
curr = rb_prev(curr); curr = rb_prev(curr);
...@@ -182,7 +180,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, ...@@ -182,7 +180,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
} }
/* pfn_lo will point to size aligned address if size_aligned is set */ /* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = limit_pfn - (size + pad_size) + 1; new->pfn_lo = limit_pfn - (size + pad_size);
new->pfn_hi = new->pfn_lo + size - 1; new->pfn_hi = new->pfn_lo + size - 1;
/* If we have 'prev', it's a valid place to start the insertion. */ /* If we have 'prev', it's a valid place to start the insertion. */
...@@ -269,7 +267,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -269,7 +267,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (!new_iova) if (!new_iova)
return NULL; return NULL;
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
new_iova, size_aligned); new_iova, size_aligned);
if (ret) { if (ret) {
...@@ -398,10 +396,8 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size, ...@@ -398,10 +396,8 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
/* Try replenishing IOVAs by flushing rcache. */ /* Try replenishing IOVAs by flushing rcache. */
flushed_rcache = true; flushed_rcache = true;
preempt_disable();
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad); free_cpu_cached_iovas(cpu, iovad);
preempt_enable();
goto retry; goto retry;
} }
...@@ -729,7 +725,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, ...@@ -729,7 +725,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
bool can_insert = false; bool can_insert = false;
unsigned long flags; unsigned long flags;
cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
spin_lock_irqsave(&cpu_rcache->lock, flags); spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_full(cpu_rcache->loaded)) { if (!iova_magazine_full(cpu_rcache->loaded)) {
...@@ -759,7 +755,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, ...@@ -759,7 +755,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
iova_magazine_push(cpu_rcache->loaded, iova_pfn); iova_magazine_push(cpu_rcache->loaded, iova_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags); spin_unlock_irqrestore(&cpu_rcache->lock, flags);
put_cpu_ptr(rcache->cpu_rcaches);
if (mag_to_free) { if (mag_to_free) {
iova_magazine_free_pfns(mag_to_free, iovad); iova_magazine_free_pfns(mag_to_free, iovad);
...@@ -793,7 +788,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, ...@@ -793,7 +788,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
bool has_pfn = false; bool has_pfn = false;
unsigned long flags; unsigned long flags;
cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
spin_lock_irqsave(&cpu_rcache->lock, flags); spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_empty(cpu_rcache->loaded)) { if (!iova_magazine_empty(cpu_rcache->loaded)) {
...@@ -815,7 +810,6 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, ...@@ -815,7 +810,6 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags); spin_unlock_irqrestore(&cpu_rcache->lock, flags);
put_cpu_ptr(rcache->cpu_rcaches);
return iova_pfn; return iova_pfn;
} }
......
This diff is collapsed.
...@@ -1309,7 +1309,7 @@ static void omap_iommu_remove_device(struct device *dev) ...@@ -1309,7 +1309,7 @@ static void omap_iommu_remove_device(struct device *dev)
static struct iommu_group *omap_iommu_device_group(struct device *dev) static struct iommu_group *omap_iommu_device_group(struct device *dev)
{ {
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct iommu_group *group = NULL; struct iommu_group *group = ERR_PTR(-EINVAL);
if (arch_data->iommu_dev) if (arch_data->iommu_dev)
group = arch_data->iommu_dev->group; group = arch_data->iommu_dev->group;
......
...@@ -165,20 +165,14 @@ static void s390_iommu_detach_device(struct iommu_domain *domain, ...@@ -165,20 +165,14 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
static int s390_iommu_add_device(struct device *dev) static int s390_iommu_add_device(struct device *dev)
{ {
struct iommu_group *group; struct iommu_group *group = iommu_group_get_for_dev(dev);
int rc;
group = iommu_group_get(dev); if (IS_ERR(group))
if (!group) { return PTR_ERR(group);
group = iommu_group_alloc();
if (IS_ERR(group))
return PTR_ERR(group);
}
rc = iommu_group_add_device(group, dev);
iommu_group_put(group); iommu_group_put(group);
return rc; return 0;
} }
static void s390_iommu_remove_device(struct device *dev) static void s390_iommu_remove_device(struct device *dev)
...@@ -344,6 +338,7 @@ static struct iommu_ops s390_iommu_ops = { ...@@ -344,6 +338,7 @@ static struct iommu_ops s390_iommu_ops = {
.iova_to_phys = s390_iommu_iova_to_phys, .iova_to_phys = s390_iommu_iova_to_phys,
.add_device = s390_iommu_add_device, .add_device = s390_iommu_add_device,
.remove_device = s390_iommu_remove_device, .remove_device = s390_iommu_remove_device,
.device_group = generic_device_group,
.pgsize_bitmap = S390_IOMMU_PGSIZES, .pgsize_bitmap = S390_IOMMU_PGSIZES,
}; };
......
...@@ -102,6 +102,21 @@ extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, ...@@ -102,6 +102,21 @@ extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
*/ */
extern int intel_svm_unbind_mm(struct device *dev, int pasid); extern int intel_svm_unbind_mm(struct device *dev, int pasid);
/**
* intel_svm_is_pasid_valid() - check if pasid is valid
* @dev: Device for which PASID was allocated
* @pasid: PASID value to be checked
*
* This function checks if the specified pasid is still valid. A
* valid pasid means the backing mm is still having a valid user.
* For kernel callers init_mm is always valid. for other mm, if mm->mm_users
* is non-zero, it is valid.
*
* returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid
* 1 if pasid is valid.
*/
extern int intel_svm_is_pasid_valid(struct device *dev, int pasid);
#else /* CONFIG_INTEL_IOMMU_SVM */ #else /* CONFIG_INTEL_IOMMU_SVM */
static inline int intel_svm_bind_mm(struct device *dev, int *pasid, static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
...@@ -114,6 +129,11 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid) ...@@ -114,6 +129,11 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
{ {
BUG(); BUG();
} }
static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
{
return -EINVAL;
}
#endif /* CONFIG_INTEL_IOMMU_SVM */ #endif /* CONFIG_INTEL_IOMMU_SVM */
#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) #define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment