Commit 3f583bc2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU upates from Joerg Roedel:
 "This time a few more updates queued up.

   - Rework VT-d code to support ACPI devices

   - Improvements for memory and PCI hotplug support in the VT-d driver

   - Device-tree support for OMAP IOMMU

   - Convert OMAP IOMMU to use devm_* interfaces

   - Fixed PASID support for AMD IOMMU

   - Other random cleanups and fixes for OMAP, ARM-SMMU and SHMOBILE
     IOMMU

  Most of the changes are in the VT-d driver because some rework was
  necessary for better hotplug and ACPI device support"

* tag 'iommu-updates-v3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (75 commits)
  iommu/vt-d: Fix error handling in ANDD processing
  iommu/vt-d: returning free pointer in get_domain_for_dev()
  iommu/vt-d: Only call dmar_acpi_dev_scope_init() if DRHD units present
  iommu/vt-d: Check for NULL pointer in dmar_acpi_dev_scope_init()
  iommu/amd: Fix logic to determine and checking max PASID
  iommu/vt-d: Include ACPI devices in iommu=pt
  iommu/vt-d: Finally enable translation for non-PCI devices
  iommu/vt-d: Remove to_pci_dev() in intel_map_page()
  iommu/vt-d: Remove pdev from intel_iommu_attach_device()
  iommu/vt-d: Remove pdev from iommu_no_mapping()
  iommu/vt-d: Make domain_add_dev_info() take struct device
  iommu/vt-d: Make domain_remove_one_dev_info() take struct device
  iommu/vt-d: Rename 'hwdev' variables to 'dev' now that that's the norm
  iommu/vt-d: Remove some pointless to_pci_dev() calls
  iommu/vt-d: Make get_valid_domain_for_dev() take struct device
  iommu/vt-d: Make iommu_should_identity_map() take struct device
  iommu/vt-d: Handle RMRRs for non-PCI devices
  iommu/vt-d: Make get_domain_for_dev() take struct device
  iommu/vt-d: Make domain_context_mapp{ed,ing}() take struct device
  iommu/vt-d: Make device_to_iommu() cope with non-PCI devices
  ...
parents 3e76b749 e172b812
...@@ -48,6 +48,12 @@ conditions. ...@@ -48,6 +48,12 @@ conditions.
from the mmu-masters towards memory) node for this from the mmu-masters towards memory) node for this
SMMU. SMMU.
- calxeda,smmu-secure-config-access : Enable proper handling of buggy
implementations that always use secure access to
SMMU configuration registers. In this case non-secure
aliases of secure registers have to be used during
SMMU configuration.
Example: Example:
smmu { smmu {
......
OMAP2+ IOMMU
Required properties:
- compatible : Should be one of,
"ti,omap2-iommu" for OMAP2/OMAP3 IOMMU instances
"ti,omap4-iommu" for OMAP4/OMAP5 IOMMU instances
"ti,dra7-iommu" for DRA7xx IOMMU instances
- ti,hwmods : Name of the hwmod associated with the IOMMU instance
- reg : Address space for the configuration registers
- interrupts : Interrupt specifier for the IOMMU instance
Optional properties:
- ti,#tlb-entries : Number of entries in the translation look-aside buffer.
Should be either 8 or 32 (default: 32)
- ti,iommu-bus-err-back : Indicates the IOMMU instance supports throwing
back a bus error response on MMU faults.
Example:
/* OMAP3 ISP MMU */
mmu_isp: mmu@480bd400 {
compatible = "ti,omap2-iommu";
reg = <0x480bd400 0x80>;
interrupts = <24>;
ti,hwmods = "mmu_isp";
ti,#tlb-entries = <8>;
};
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/of.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -58,6 +59,10 @@ static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused) ...@@ -58,6 +59,10 @@ static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused)
static int __init omap_iommu_init(void) static int __init omap_iommu_init(void)
{ {
/* If dtb is there, the devices will be created dynamically */
if (of_have_populated_dt())
return -ENODEV;
return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL); return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL);
} }
/* must be ready before omap3isp is probed */ /* must be ready before omap3isp is probed */
......
...@@ -207,7 +207,7 @@ config SHMOBILE_IOMMU ...@@ -207,7 +207,7 @@ config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI" bool "IOMMU for Renesas IPMMU/IPMMUI"
default n default n
depends on ARM depends on ARM
depends on SH_MOBILE || COMPILE_TEST depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API select IOMMU_API
select ARM_DMA_USE_IOMMU select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU select SHMOBILE_IPMMU
......
...@@ -963,7 +963,7 @@ static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid, ...@@ -963,7 +963,7 @@ static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
address &= ~(0xfffULL); address &= ~(0xfffULL);
cmd->data[0] = pasid & PASID_MASK; cmd->data[0] = pasid;
cmd->data[1] = domid; cmd->data[1] = domid;
cmd->data[2] = lower_32_bits(address); cmd->data[2] = lower_32_bits(address);
cmd->data[3] = upper_32_bits(address); cmd->data[3] = upper_32_bits(address);
...@@ -982,10 +982,10 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid, ...@@ -982,10 +982,10 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
address &= ~(0xfffULL); address &= ~(0xfffULL);
cmd->data[0] = devid; cmd->data[0] = devid;
cmd->data[0] |= (pasid & 0xff) << 16; cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
cmd->data[0] |= (qdep & 0xff) << 24; cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid; cmd->data[1] = devid;
cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16; cmd->data[1] |= (pasid & 0xff) << 16;
cmd->data[2] = lower_32_bits(address); cmd->data[2] = lower_32_bits(address);
cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
cmd->data[3] = upper_32_bits(address); cmd->data[3] = upper_32_bits(address);
...@@ -1001,7 +1001,7 @@ static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid, ...@@ -1001,7 +1001,7 @@ static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
cmd->data[0] = devid; cmd->data[0] = devid;
if (gn) { if (gn) {
cmd->data[1] = pasid & PASID_MASK; cmd->data[1] = pasid;
cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
} }
cmd->data[3] = tag & 0x1ff; cmd->data[3] = tag & 0x1ff;
......
...@@ -150,7 +150,7 @@ int amd_iommus_present; ...@@ -150,7 +150,7 @@ int amd_iommus_present;
bool amd_iommu_np_cache __read_mostly; bool amd_iommu_np_cache __read_mostly;
bool amd_iommu_iotlb_sup __read_mostly = true; bool amd_iommu_iotlb_sup __read_mostly = true;
u32 amd_iommu_max_pasids __read_mostly = ~0; u32 amd_iommu_max_pasid __read_mostly = ~0;
bool amd_iommu_v2_present __read_mostly; bool amd_iommu_v2_present __read_mostly;
bool amd_iommu_pc_present __read_mostly; bool amd_iommu_pc_present __read_mostly;
...@@ -1231,14 +1231,16 @@ static int iommu_init_pci(struct amd_iommu *iommu) ...@@ -1231,14 +1231,16 @@ static int iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_GT)) { if (iommu_feature(iommu, FEATURE_GT)) {
int glxval; int glxval;
u32 pasids; u32 max_pasid;
u64 shift; u64 pasmax;
shift = iommu->features & FEATURE_PASID_MASK; pasmax = iommu->features & FEATURE_PASID_MASK;
shift >>= FEATURE_PASID_SHIFT; pasmax >>= FEATURE_PASID_SHIFT;
pasids = (1 << shift); max_pasid = (1 << (pasmax + 1)) - 1;
amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
glxval = iommu->features & FEATURE_GLXVAL_MASK; glxval = iommu->features & FEATURE_GLXVAL_MASK;
glxval >>= FEATURE_GLXVAL_SHIFT; glxval >>= FEATURE_GLXVAL_SHIFT;
......
...@@ -99,7 +99,12 @@ ...@@ -99,7 +99,12 @@
#define FEATURE_GLXVAL_SHIFT 14 #define FEATURE_GLXVAL_SHIFT 14
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
#define PASID_MASK 0x000fffff /* Note:
* The current driver only support 16-bit PASID.
* Currently, hardware only implement upto 16-bit PASID
* even though the spec says it could have upto 20 bits.
*/
#define PASID_MASK 0x0000ffff
/* MMIO status bits */ /* MMIO status bits */
#define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_EVT_INT_MASK (1 << 1)
...@@ -697,8 +702,8 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap; ...@@ -697,8 +702,8 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
*/ */
extern u32 amd_iommu_unmap_flush; extern u32 amd_iommu_unmap_flush;
/* Smallest number of PASIDs supported by any IOMMU in the system */ /* Smallest max PASID supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasids; extern u32 amd_iommu_max_pasid;
extern bool amd_iommu_v2_present; extern bool amd_iommu_v2_present;
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
/* Maximum number of stream IDs assigned to a single device */ /* Maximum number of stream IDs assigned to a single device */
#define MAX_MASTER_STREAMIDS 8 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
/* Maximum number of context banks per SMMU */ /* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128 #define ARM_SMMU_MAX_CBS 128
...@@ -60,6 +60,16 @@ ...@@ -60,6 +60,16 @@
#define ARM_SMMU_GR0(smmu) ((smmu)->base) #define ARM_SMMU_GR0(smmu) ((smmu)->base)
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
/*
* SMMU global address space with conditional offset to access secure
* aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
* nsGFSYNR0: 0x450)
*/
#define ARM_SMMU_GR0_NS(smmu) \
((smmu)->base + \
((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
? 0x400 : 0))
/* Page table bits */ /* Page table bits */
#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
...@@ -351,6 +361,9 @@ struct arm_smmu_device { ...@@ -351,6 +361,9 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
u32 features; u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
u32 options;
int version; int version;
u32 num_context_banks; u32 num_context_banks;
...@@ -401,6 +414,29 @@ struct arm_smmu_domain { ...@@ -401,6 +414,29 @@ struct arm_smmu_domain {
static DEFINE_SPINLOCK(arm_smmu_devices_lock); static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices); static LIST_HEAD(arm_smmu_devices);
struct arm_smmu_option_prop {
u32 opt;
const char *prop;
};
static struct arm_smmu_option_prop arm_smmu_options [] = {
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
{ 0, NULL},
};
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
do {
if (of_property_read_bool(smmu->dev->of_node,
arm_smmu_options[i].prop)) {
smmu->options |= arm_smmu_options[i].opt;
dev_notice(smmu->dev, "option %s\n",
arm_smmu_options[i].prop);
}
} while (arm_smmu_options[++i].opt);
}
static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
struct device_node *dev_node) struct device_node *dev_node)
{ {
...@@ -614,16 +650,16 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) ...@@ -614,16 +650,16 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
{ {
u32 gfsr, gfsynr0, gfsynr1, gfsynr2; u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
struct arm_smmu_device *smmu = dev; struct arm_smmu_device *smmu = dev;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu); void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
if (!gfsr)
return IRQ_NONE;
gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
if (!gfsr)
return IRQ_NONE;
dev_err_ratelimited(smmu->dev, dev_err_ratelimited(smmu->dev,
"Unexpected global fault, this could be serious\n"); "Unexpected global fault, this could be serious\n");
dev_err_ratelimited(smmu->dev, dev_err_ratelimited(smmu->dev,
...@@ -642,7 +678,7 @@ static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, ...@@ -642,7 +678,7 @@ static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
/* Ensure new page tables are visible to the hardware walker */ /* Ensure new page tables are visible to the hardware walker */
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
dsb(); dsb(ishst);
} else { } else {
/* /*
* If the SMMU can't walk tables in the CPU caches, treat them * If the SMMU can't walk tables in the CPU caches, treat them
...@@ -990,9 +1026,8 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) ...@@ -990,9 +1026,8 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
/* /*
* Recursively free the page tables for this domain. We don't * Recursively free the page tables for this domain. We don't
* care about speculative TLB filling, because the TLB will be * care about speculative TLB filling because the tables should
* nuked next time this context bank is re-allocated and no devices * not be active in any context bank at this point (SCTLR.M is 0).
* currently map to these tables.
*/ */
pgd = pgd_base; pgd = pgd_base;
for (i = 0; i < PTRS_PER_PGD; ++i) { for (i = 0; i < PTRS_PER_PGD; ++i) {
...@@ -1218,7 +1253,7 @@ static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, ...@@ -1218,7 +1253,7 @@ static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
unsigned long pfn, int flags, int stage) unsigned long pfn, int prot, int stage)
{ {
pte_t *pte, *start; pte_t *pte, *start;
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
...@@ -1240,28 +1275,28 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, ...@@ -1240,28 +1275,28 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
if (stage == 1) { if (stage == 1) {
pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pteval |= ARM_SMMU_PTE_AP_RDONLY; pteval |= ARM_SMMU_PTE_AP_RDONLY;
if (flags & IOMMU_CACHE) if (prot & IOMMU_CACHE)
pteval |= (MAIR_ATTR_IDX_CACHE << pteval |= (MAIR_ATTR_IDX_CACHE <<
ARM_SMMU_PTE_ATTRINDX_SHIFT); ARM_SMMU_PTE_ATTRINDX_SHIFT);
} else { } else {
pteval |= ARM_SMMU_PTE_HAP_FAULT; pteval |= ARM_SMMU_PTE_HAP_FAULT;
if (flags & IOMMU_READ) if (prot & IOMMU_READ)
pteval |= ARM_SMMU_PTE_HAP_READ; pteval |= ARM_SMMU_PTE_HAP_READ;
if (flags & IOMMU_WRITE) if (prot & IOMMU_WRITE)
pteval |= ARM_SMMU_PTE_HAP_WRITE; pteval |= ARM_SMMU_PTE_HAP_WRITE;
if (flags & IOMMU_CACHE) if (prot & IOMMU_CACHE)
pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
else else
pteval |= ARM_SMMU_PTE_MEMATTR_NC; pteval |= ARM_SMMU_PTE_MEMATTR_NC;
} }
/* If no access, create a faulting entry to avoid TLB fills */ /* If no access, create a faulting entry to avoid TLB fills */
if (flags & IOMMU_EXEC) if (prot & IOMMU_EXEC)
pteval &= ~ARM_SMMU_PTE_XN; pteval &= ~ARM_SMMU_PTE_XN;
else if (!(flags & (IOMMU_READ | IOMMU_WRITE))) else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE; pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS; pteval |= ARM_SMMU_PTE_SH_IS;
...@@ -1323,7 +1358,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, ...@@ -1323,7 +1358,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
phys_addr_t phys, int flags, int stage) phys_addr_t phys, int prot, int stage)
{ {
int ret; int ret;
pmd_t *pmd; pmd_t *pmd;
...@@ -1347,7 +1382,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, ...@@ -1347,7 +1382,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
flags, stage); prot, stage);
phys += next - addr; phys += next - addr;
} while (pmd++, addr = next, addr < end); } while (pmd++, addr = next, addr < end);
...@@ -1356,7 +1391,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, ...@@ -1356,7 +1391,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
phys_addr_t phys, int flags, int stage) phys_addr_t phys, int prot, int stage)
{ {
int ret = 0; int ret = 0;
pud_t *pud; pud_t *pud;
...@@ -1380,7 +1415,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, ...@@ -1380,7 +1415,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
do { do {
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
flags, stage); prot, stage);
phys += next - addr; phys += next - addr;
} while (pud++, addr = next, addr < end); } while (pud++, addr = next, addr < end);
...@@ -1389,7 +1424,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, ...@@ -1389,7 +1424,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
unsigned long iova, phys_addr_t paddr, unsigned long iova, phys_addr_t paddr,
size_t size, int flags) size_t size, int prot)
{ {
int ret, stage; int ret, stage;
unsigned long end; unsigned long end;
...@@ -1397,7 +1432,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, ...@@ -1397,7 +1432,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
pgd_t *pgd = root_cfg->pgd; pgd_t *pgd = root_cfg->pgd;
struct arm_smmu_device *smmu = root_cfg->smmu; struct arm_smmu_device *smmu = root_cfg->smmu;
unsigned long irqflags; unsigned long flags;
if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
stage = 2; stage = 2;
...@@ -1420,14 +1455,14 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, ...@@ -1420,14 +1455,14 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (paddr & ~output_mask) if (paddr & ~output_mask)
return -ERANGE; return -ERANGE;
spin_lock_irqsave(&smmu_domain->lock, irqflags); spin_lock_irqsave(&smmu_domain->lock, flags);
pgd += pgd_index(iova); pgd += pgd_index(iova);
end = iova + size; end = iova + size;
do { do {
unsigned long next = pgd_addr_end(iova, end); unsigned long next = pgd_addr_end(iova, end);
ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
flags, stage); prot, stage);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
...@@ -1436,13 +1471,13 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, ...@@ -1436,13 +1471,13 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
} while (pgd++, iova != end); } while (pgd++, iova != end);
out_unlock: out_unlock:
spin_unlock_irqrestore(&smmu_domain->lock, irqflags); spin_unlock_irqrestore(&smmu_domain->lock, flags);
return ret; return ret;
} }
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int flags) phys_addr_t paddr, size_t size, int prot)
{ {
struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_domain *smmu_domain = domain->priv;
...@@ -1453,7 +1488,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -1453,7 +1488,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
if ((phys_addr_t)iova & ~smmu_domain->output_mask) if ((phys_addr_t)iova & ~smmu_domain->output_mask)
return -ERANGE; return -ERANGE;
return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags); return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
} }
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
...@@ -1597,9 +1632,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) ...@@ -1597,9 +1632,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
int i = 0; int i = 0;
u32 reg; u32 reg;
/* Clear Global FSR */ /* clear global FSR */
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR); writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
/* Mark all SMRn as invalid and all S2CRn as bypass */ /* Mark all SMRn as invalid and all S2CRn as bypass */
for (i = 0; i < smmu->num_mapping_groups; ++i) { for (i = 0; i < smmu->num_mapping_groups; ++i) {
...@@ -1619,7 +1654,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) ...@@ -1619,7 +1654,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
/* Enable fault reporting */ /* Enable fault reporting */
reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
...@@ -1638,7 +1673,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) ...@@ -1638,7 +1673,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Push the button */ /* Push the button */
arm_smmu_tlb_sync(smmu); arm_smmu_tlb_sync(smmu);
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0); writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
} }
static int arm_smmu_id_size_to_bits(int size) static int arm_smmu_id_size_to_bits(int size)
...@@ -1885,6 +1920,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -1885,6 +1920,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err) if (err)
goto out_put_parent; goto out_put_parent;
parse_driver_options(smmu);
if (smmu->version > 1 && if (smmu->version > 1 &&
smmu->num_context_banks != smmu->num_context_irqs) { smmu->num_context_banks != smmu->num_context_irqs) {
dev_err(dev, dev_err(dev,
...@@ -1969,7 +2006,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) ...@@ -1969,7 +2006,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
free_irq(smmu->irqs[i], smmu); free_irq(smmu->irqs[i], smmu);
/* Turn the thing off */ /* Turn the thing off */
writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
return 0; return 0;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -38,6 +38,17 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; ...@@ -38,6 +38,17 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS]; static struct hpet_scope ir_hpet[MAX_HPET_TBS];
static int ir_ioapic_num, ir_hpet_num; static int ir_ioapic_num, ir_hpet_num;
/*
* Lock ordering:
* ->dmar_global_lock
* ->irq_2_ir_lock
* ->qi->q_lock
* ->iommu->register_lock
* Note:
* intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
* in single-threaded environment with interrupt disabled, so no need to tabke
* the dmar_global_lock.
*/
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static int __init parse_ioapics_under_ir(void); static int __init parse_ioapics_under_ir(void);
...@@ -307,12 +318,14 @@ static int set_ioapic_sid(struct irte *irte, int apic) ...@@ -307,12 +318,14 @@ static int set_ioapic_sid(struct irte *irte, int apic)
if (!irte) if (!irte)
return -1; return -1;
down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) { for (i = 0; i < MAX_IO_APICS; i++) {
if (ir_ioapic[i].id == apic) { if (ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break; break;
} }
} }
up_read(&dmar_global_lock);
if (sid == 0) { if (sid == 0) {
pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
...@@ -332,12 +345,14 @@ static int set_hpet_sid(struct irte *irte, u8 id) ...@@ -332,12 +345,14 @@ static int set_hpet_sid(struct irte *irte, u8 id)
if (!irte) if (!irte)
return -1; return -1;
down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) { for (i = 0; i < MAX_HPET_TBS; i++) {
if (ir_hpet[i].id == id) { if (ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break; break;
} }
} }
up_read(&dmar_global_lock);
if (sid == 0) { if (sid == 0) {
pr_warning("Failed to set source-id of HPET block (%d)\n", id); pr_warning("Failed to set source-id of HPET block (%d)\n", id);
...@@ -794,10 +809,16 @@ static int __init parse_ioapics_under_ir(void) ...@@ -794,10 +809,16 @@ static int __init parse_ioapics_under_ir(void)
static int __init ir_dev_scope_init(void) static int __init ir_dev_scope_init(void)
{ {
int ret;
if (!irq_remapping_enabled) if (!irq_remapping_enabled)
return 0; return 0;
return dmar_dev_scope_init(); down_write(&dmar_global_lock);
ret = dmar_dev_scope_init();
up_write(&dmar_global_lock);
return ret;
} }
rootfs_initcall(ir_dev_scope_init); rootfs_initcall(ir_dev_scope_init);
...@@ -878,23 +899,27 @@ static int intel_setup_ioapic_entry(int irq, ...@@ -878,23 +899,27 @@ static int intel_setup_ioapic_entry(int irq,
struct io_apic_irq_attr *attr) struct io_apic_irq_attr *attr)
{ {
int ioapic_id = mpc_ioapic_id(attr->ioapic); int ioapic_id = mpc_ioapic_id(attr->ioapic);
struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id); struct intel_iommu *iommu;
struct IR_IO_APIC_route_entry *entry; struct IR_IO_APIC_route_entry *entry;
struct irte irte; struct irte irte;
int index; int index;
down_read(&dmar_global_lock);
iommu = map_ioapic_to_ir(ioapic_id);
if (!iommu) { if (!iommu) {
pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
return -ENODEV; index = -ENODEV;
} } else {
index = alloc_irte(iommu, irq, 1);
entry = (struct IR_IO_APIC_route_entry *)route_entry; if (index < 0) {
pr_warn("Failed to allocate IRTE for ioapic %d\n",
index = alloc_irte(iommu, irq, 1); ioapic_id);
if (index < 0) { index = -ENOMEM;
pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id); }
return -ENOMEM;
} }
up_read(&dmar_global_lock);
if (index < 0)
return index;
prepare_irte(&irte, vector, destination); prepare_irte(&irte, vector, destination);
...@@ -913,6 +938,7 @@ static int intel_setup_ioapic_entry(int irq, ...@@ -913,6 +938,7 @@ static int intel_setup_ioapic_entry(int irq,
irte.avail, irte.vector, irte.dest_id, irte.avail, irte.vector, irte.dest_id,
irte.sid, irte.sq, irte.svt); irte.sid, irte.sq, irte.svt);
entry = (struct IR_IO_APIC_route_entry *)route_entry;
memset(entry, 0, sizeof(*entry)); memset(entry, 0, sizeof(*entry));
entry->index2 = (index >> 15) & 0x1; entry->index2 = (index >> 15) & 0x1;
...@@ -1043,20 +1069,23 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) ...@@ -1043,20 +1069,23 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
struct intel_iommu *iommu; struct intel_iommu *iommu;
int index; int index;
down_read(&dmar_global_lock);
iommu = map_dev_to_ir(dev); iommu = map_dev_to_ir(dev);
if (!iommu) { if (!iommu) {
printk(KERN_ERR printk(KERN_ERR
"Unable to map PCI %s to iommu\n", pci_name(dev)); "Unable to map PCI %s to iommu\n", pci_name(dev));
return -ENOENT; index = -ENOENT;
} else {
index = alloc_irte(iommu, irq, nvec);
if (index < 0) {
printk(KERN_ERR
"Unable to allocate %d IRTE for PCI %s\n",
nvec, pci_name(dev));
index = -ENOSPC;
}
} }
up_read(&dmar_global_lock);
index = alloc_irte(iommu, irq, nvec);
if (index < 0) {
printk(KERN_ERR
"Unable to allocate %d IRTE for PCI %s\n", nvec,
pci_name(dev));
return -ENOSPC;
}
return index; return index;
} }
...@@ -1064,33 +1093,40 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, ...@@ -1064,33 +1093,40 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
int index, int sub_handle) int index, int sub_handle)
{ {
struct intel_iommu *iommu; struct intel_iommu *iommu;
int ret = -ENOENT;
down_read(&dmar_global_lock);
iommu = map_dev_to_ir(pdev); iommu = map_dev_to_ir(pdev);
if (!iommu) if (iommu) {
return -ENOENT; /*
/* * setup the mapping between the irq and the IRTE
* setup the mapping between the irq and the IRTE * base index, the sub_handle pointing to the
* base index, the sub_handle pointing to the * appropriate interrupt remap table entry.
* appropriate interrupt remap table entry. */
*/ set_irte_irq(irq, iommu, index, sub_handle);
set_irte_irq(irq, iommu, index, sub_handle); ret = 0;
}
up_read(&dmar_global_lock);
return 0; return ret;
} }
static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
{ {
struct intel_iommu *iommu = map_hpet_to_ir(id); int ret = -1;
struct intel_iommu *iommu;
int index; int index;
if (!iommu) down_read(&dmar_global_lock);
return -1; iommu = map_hpet_to_ir(id);
if (iommu) {
index = alloc_irte(iommu, irq, 1); index = alloc_irte(iommu, irq, 1);
if (index < 0) if (index >= 0)
return -1; ret = 0;
}
up_read(&dmar_global_lock);
return 0; return ret;
} }
struct irq_remap_ops intel_irq_remap_ops = { struct irq_remap_ops intel_irq_remap_ops = {
......
...@@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node, ...@@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node,
return 0; return 0;
} }
static inline struct iova *
alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
{
struct iova *iova;
iova = alloc_iova_mem();
if (iova) {
iova->pfn_lo = pfn_lo;
iova->pfn_hi = pfn_hi;
}
return iova;
}
static struct iova * static struct iova *
__insert_new_range(struct iova_domain *iovad, __insert_new_range(struct iova_domain *iovad,
unsigned long pfn_lo, unsigned long pfn_hi) unsigned long pfn_lo, unsigned long pfn_hi)
{ {
struct iova *iova; struct iova *iova;
iova = alloc_iova_mem(); iova = alloc_and_init_iova(pfn_lo, pfn_hi);
if (!iova) if (iova)
return iova; iova_insert_rbtree(&iovad->rbroot, iova);
iova->pfn_hi = pfn_hi;
iova->pfn_lo = pfn_lo;
iova_insert_rbtree(&iovad->rbroot, iova);
return iova; return iova;
} }
...@@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
} }
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
} }
struct iova *
split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
unsigned long pfn_lo, unsigned long pfn_hi)
{
unsigned long flags;
struct iova *prev = NULL, *next = NULL;
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
if (iova->pfn_lo < pfn_lo) {
prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
if (prev == NULL)
goto error;
}
if (iova->pfn_hi > pfn_hi) {
next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
if (next == NULL)
goto error;
}
__cached_rbnode_delete_update(iovad, iova);
rb_erase(&iova->node, &iovad->rbroot);
if (prev) {
iova_insert_rbtree(&iovad->rbroot, prev);
iova->pfn_lo = pfn_lo;
}
if (next) {
iova_insert_rbtree(&iovad->rbroot, next);
iova->pfn_hi = pfn_hi;
}
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return iova;
error:
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
if (prev)
free_iova_mem(prev);
return NULL;
}
...@@ -23,6 +23,9 @@ ...@@ -23,6 +23,9 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -146,13 +149,10 @@ static int iommu_enable(struct omap_iommu *obj) ...@@ -146,13 +149,10 @@ static int iommu_enable(struct omap_iommu *obj)
struct platform_device *pdev = to_platform_device(obj->dev); struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data; struct iommu_platform_data *pdata = pdev->dev.platform_data;
if (!pdata)
return -EINVAL;
if (!arch_iommu) if (!arch_iommu)
return -ENODEV; return -ENODEV;
if (pdata->deassert_reset) { if (pdata && pdata->deassert_reset) {
err = pdata->deassert_reset(pdev, pdata->reset_name); err = pdata->deassert_reset(pdev, pdata->reset_name);
if (err) { if (err) {
dev_err(obj->dev, "deassert_reset failed: %d\n", err); dev_err(obj->dev, "deassert_reset failed: %d\n", err);
...@@ -172,14 +172,11 @@ static void iommu_disable(struct omap_iommu *obj) ...@@ -172,14 +172,11 @@ static void iommu_disable(struct omap_iommu *obj)
struct platform_device *pdev = to_platform_device(obj->dev); struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data; struct iommu_platform_data *pdata = pdev->dev.platform_data;
if (!pdata)
return;
arch_iommu->disable(obj); arch_iommu->disable(obj);
pm_runtime_put_sync(obj->dev); pm_runtime_put_sync(obj->dev);
if (pdata->assert_reset) if (pdata && pdata->assert_reset)
pdata->assert_reset(pdev, pdata->reset_name); pdata->assert_reset(pdev, pdata->reset_name);
} }
...@@ -523,7 +520,8 @@ static void flush_iopte_range(u32 *first, u32 *last) ...@@ -523,7 +520,8 @@ static void flush_iopte_range(u32 *first, u32 *last)
static void iopte_free(u32 *iopte) static void iopte_free(u32 *iopte)
{ {
/* Note: freed iopte's must be clean ready for re-use */ /* Note: freed iopte's must be clean ready for re-use */
kmem_cache_free(iopte_cachep, iopte); if (iopte)
kmem_cache_free(iopte_cachep, iopte);
} }
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
...@@ -863,7 +861,7 @@ static int device_match_by_alias(struct device *dev, void *data) ...@@ -863,7 +861,7 @@ static int device_match_by_alias(struct device *dev, void *data)
**/ **/
static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
{ {
int err = -ENOMEM; int err;
struct device *dev; struct device *dev;
struct omap_iommu *obj; struct omap_iommu *obj;
...@@ -871,7 +869,7 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) ...@@ -871,7 +869,7 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
(void *)name, (void *)name,
device_match_by_alias); device_match_by_alias);
if (!dev) if (!dev)
return NULL; return ERR_PTR(-ENODEV);
obj = to_iommu(dev); obj = to_iommu(dev);
...@@ -890,8 +888,10 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) ...@@ -890,8 +888,10 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
goto err_enable; goto err_enable;
flush_iotlb_all(obj); flush_iotlb_all(obj);
if (!try_module_get(obj->owner)) if (!try_module_get(obj->owner)) {
err = -ENODEV;
goto err_module; goto err_module;
}
spin_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
...@@ -940,17 +940,41 @@ static int omap_iommu_probe(struct platform_device *pdev) ...@@ -940,17 +940,41 @@ static int omap_iommu_probe(struct platform_device *pdev)
struct omap_iommu *obj; struct omap_iommu *obj;
struct resource *res; struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data; struct iommu_platform_data *pdata = pdev->dev.platform_data;
struct device_node *of = pdev->dev.of_node;
obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj) if (!obj)
return -ENOMEM; return -ENOMEM;
obj->nr_tlb_entries = pdata->nr_tlb_entries; if (of) {
obj->name = pdata->name; obj->name = dev_name(&pdev->dev);
obj->nr_tlb_entries = 32;
err = of_property_read_u32(of, "ti,#tlb-entries",
&obj->nr_tlb_entries);
if (err && err != -EINVAL)
return err;
if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
return -EINVAL;
/*
* da_start and da_end are needed for omap-iovmm, so hardcode
* these values as used by OMAP3 ISP - the only user for
* omap-iovmm
*/
obj->da_start = 0;
obj->da_end = 0xfffff000;
if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
} else {
obj->nr_tlb_entries = pdata->nr_tlb_entries;
obj->name = pdata->name;
obj->da_start = pdata->da_start;
obj->da_end = pdata->da_end;
}
if (obj->da_end <= obj->da_start)
return -EINVAL;
obj->dev = &pdev->dev; obj->dev = &pdev->dev;
obj->ctx = (void *)obj + sizeof(*obj); obj->ctx = (void *)obj + sizeof(*obj);
obj->da_start = pdata->da_start;
obj->da_end = pdata->da_end;
spin_lock_init(&obj->iommu_lock); spin_lock_init(&obj->iommu_lock);
mutex_init(&obj->mmap_lock); mutex_init(&obj->mmap_lock);
...@@ -958,33 +982,18 @@ static int omap_iommu_probe(struct platform_device *pdev) ...@@ -958,33 +982,18 @@ static int omap_iommu_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&obj->mmap); INIT_LIST_HEAD(&obj->mmap);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) { obj->regbase = devm_ioremap_resource(obj->dev, res);
err = -ENODEV; if (IS_ERR(obj->regbase))
goto err_mem; return PTR_ERR(obj->regbase);
}
res = request_mem_region(res->start, resource_size(res),
dev_name(&pdev->dev));
if (!res) {
err = -EIO;
goto err_mem;
}
obj->regbase = ioremap(res->start, resource_size(res));
if (!obj->regbase) {
err = -ENOMEM;
goto err_ioremap;
}
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0)
err = -ENODEV; return -ENODEV;
goto err_irq;
} err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, dev_name(obj->dev), obj);
dev_name(&pdev->dev), obj);
if (err < 0) if (err < 0)
goto err_irq; return err;
platform_set_drvdata(pdev, obj); platform_set_drvdata(pdev, obj);
pm_runtime_irq_safe(obj->dev); pm_runtime_irq_safe(obj->dev);
...@@ -992,42 +1001,34 @@ static int omap_iommu_probe(struct platform_device *pdev) ...@@ -992,42 +1001,34 @@ static int omap_iommu_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%s registered\n", obj->name); dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0; return 0;
err_irq:
iounmap(obj->regbase);
err_ioremap:
release_mem_region(res->start, resource_size(res));
err_mem:
kfree(obj);
return err;
} }
static int omap_iommu_remove(struct platform_device *pdev) static int omap_iommu_remove(struct platform_device *pdev)
{ {
int irq;
struct resource *res;
struct omap_iommu *obj = platform_get_drvdata(pdev); struct omap_iommu *obj = platform_get_drvdata(pdev);
iopgtable_clear_entry_all(obj); iopgtable_clear_entry_all(obj);
irq = platform_get_irq(pdev, 0);
free_irq(irq, obj);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
iounmap(obj->regbase);
pm_runtime_disable(obj->dev); pm_runtime_disable(obj->dev);
dev_info(&pdev->dev, "%s removed\n", obj->name); dev_info(&pdev->dev, "%s removed\n", obj->name);
kfree(obj);
return 0; return 0;
} }
static struct of_device_id omap_iommu_of_match[] = {
{ .compatible = "ti,omap2-iommu" },
{ .compatible = "ti,omap4-iommu" },
{ .compatible = "ti,dra7-iommu" },
{},
};
MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
static struct platform_driver omap_iommu_driver = { static struct platform_driver omap_iommu_driver = {
.probe = omap_iommu_probe, .probe = omap_iommu_probe,
.remove = omap_iommu_remove, .remove = omap_iommu_remove,
.driver = { .driver = {
.name = "omap-iommu", .name = "omap-iommu",
.of_match_table = of_match_ptr(omap_iommu_of_match),
}, },
}; };
...@@ -1253,6 +1254,49 @@ static int omap_iommu_domain_has_cap(struct iommu_domain *domain, ...@@ -1253,6 +1254,49 @@ static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
return 0; return 0;
} }
static int omap_iommu_add_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data;
struct device_node *np;
/*
* Allocate the archdata iommu structure for DT-based devices.
*
* TODO: Simplify this when removing non-DT support completely from the
* IOMMU users.
*/
if (!dev->of_node)
return 0;
np = of_parse_phandle(dev->of_node, "iommus", 0);
if (!np)
return 0;
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
if (!arch_data) {
of_node_put(np);
return -ENOMEM;
}
arch_data->name = kstrdup(dev_name(dev), GFP_KERNEL);
dev->archdata.iommu = arch_data;
of_node_put(np);
return 0;
}
static void omap_iommu_remove_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
if (!dev->of_node || !arch_data)
return;
kfree(arch_data->name);
kfree(arch_data);
}
static struct iommu_ops omap_iommu_ops = { static struct iommu_ops omap_iommu_ops = {
.domain_init = omap_iommu_domain_init, .domain_init = omap_iommu_domain_init,
.domain_destroy = omap_iommu_domain_destroy, .domain_destroy = omap_iommu_domain_destroy,
...@@ -1262,6 +1306,8 @@ static struct iommu_ops omap_iommu_ops = { ...@@ -1262,6 +1306,8 @@ static struct iommu_ops omap_iommu_ops = {
.unmap = omap_iommu_unmap, .unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys, .iova_to_phys = omap_iommu_iova_to_phys,
.domain_has_cap = omap_iommu_domain_has_cap, .domain_has_cap = omap_iommu_domain_has_cap,
.add_device = omap_iommu_add_device,
.remove_device = omap_iommu_remove_device,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES, .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
}; };
......
...@@ -52,6 +52,8 @@ struct omap_iommu { ...@@ -52,6 +52,8 @@ struct omap_iommu {
void *ctx; /* iommu context: registres saved area */ void *ctx; /* iommu context: registres saved area */
u32 da_start; u32 da_start;
u32 da_end; u32 da_end;
int has_bus_err_back;
}; };
struct cr_regs { struct cr_regs {
...@@ -130,6 +132,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -130,6 +132,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_READ_CAM 0x68 #define MMU_READ_CAM 0x68
#define MMU_READ_RAM 0x6c #define MMU_READ_RAM 0x6c
#define MMU_EMU_FAULT_AD 0x70 #define MMU_EMU_FAULT_AD 0x70
#define MMU_GP_REG 0x88
#define MMU_REG_SIZE 256 #define MMU_REG_SIZE 256
...@@ -163,6 +166,8 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -163,6 +166,8 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT) #define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK #define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
/* /*
* utilities for super page(16MB, 1MB, 64KB and 4KB) * utilities for super page(16MB, 1MB, 64KB and 4KB)
*/ */
......
...@@ -98,6 +98,9 @@ static int omap2_iommu_enable(struct omap_iommu *obj) ...@@ -98,6 +98,9 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
iommu_write_reg(obj, pa, MMU_TTB); iommu_write_reg(obj, pa, MMU_TTB);
if (obj->has_bus_err_back)
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
__iommu_set_twl(obj, true); __iommu_set_twl(obj, true);
return 0; return 0;
......
...@@ -424,7 +424,8 @@ enum acpi_dmar_type { ...@@ -424,7 +424,8 @@ enum acpi_dmar_type {
ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
ACPI_DMAR_TYPE_ATSR = 2, ACPI_DMAR_TYPE_ATSR = 2,
ACPI_DMAR_HARDWARE_AFFINITY = 3, ACPI_DMAR_HARDWARE_AFFINITY = 3,
ACPI_DMAR_TYPE_RESERVED = 4 /* 4 and greater are reserved */ ACPI_DMAR_TYPE_ANDD = 4,
ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */
}; };
/* DMAR Device Scope structure */ /* DMAR Device Scope structure */
...@@ -445,7 +446,8 @@ enum acpi_dmar_scope_type { ...@@ -445,7 +446,8 @@ enum acpi_dmar_scope_type {
ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
ACPI_DMAR_SCOPE_TYPE_HPET = 4, ACPI_DMAR_SCOPE_TYPE_HPET = 4,
ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */ ACPI_DMAR_SCOPE_TYPE_ACPI = 5,
ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */
}; };
struct acpi_dmar_pci_path { struct acpi_dmar_pci_path {
...@@ -507,6 +509,15 @@ struct acpi_dmar_rhsa { ...@@ -507,6 +509,15 @@ struct acpi_dmar_rhsa {
u32 proximity_domain; u32 proximity_domain;
}; };
/* 4: ACPI Namespace Device Declaration Structure */
struct acpi_dmar_andd {
struct acpi_dmar_header header;
u8 reserved[3];
u8 device_number;
u8 object_name[];
};
/******************************************************************************* /*******************************************************************************
* *
* HPET - High Precision Event Timer table * HPET - High Precision Event Timer table
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/irqreturn.h> #include <linux/irqreturn.h>
#include <linux/rwsem.h>
#include <linux/rcupdate.h>
struct acpi_dmar_header; struct acpi_dmar_header;
...@@ -34,13 +36,19 @@ struct acpi_dmar_header; ...@@ -34,13 +36,19 @@ struct acpi_dmar_header;
struct intel_iommu; struct intel_iommu;
struct dmar_dev_scope {
struct device __rcu *dev;
u8 bus;
u8 devfn;
};
#ifdef CONFIG_DMAR_TABLE #ifdef CONFIG_DMAR_TABLE
extern struct acpi_table_header *dmar_tbl; extern struct acpi_table_header *dmar_tbl;
struct dmar_drhd_unit { struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */ struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */ struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/ u64 reg_base_addr; /* register base address*/
struct pci_dev **devices; /* target device array */ struct dmar_dev_scope *devices;/* target device array */
int devices_cnt; /* target device count */ int devices_cnt; /* target device count */
u16 segment; /* PCI domain */ u16 segment; /* PCI domain */
u8 ignored:1; /* ignore drhd */ u8 ignored:1; /* ignore drhd */
...@@ -48,33 +56,66 @@ struct dmar_drhd_unit { ...@@ -48,33 +56,66 @@ struct dmar_drhd_unit {
struct intel_iommu *iommu; struct intel_iommu *iommu;
}; };
struct dmar_pci_notify_info {
struct pci_dev *dev;
unsigned long event;
int bus;
u16 seg;
u16 level;
struct acpi_dmar_pci_path path[];
} __attribute__((packed));
extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units; extern struct list_head dmar_drhd_units;
#define for_each_drhd_unit(drhd) \ #define for_each_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list) list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
#define for_each_active_drhd_unit(drhd) \ #define for_each_active_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list) \ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
if (drhd->ignored) {} else if (drhd->ignored) {} else
#define for_each_active_iommu(i, drhd) \ #define for_each_active_iommu(i, drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list) \ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
if (i=drhd->iommu, drhd->ignored) {} else if (i=drhd->iommu, drhd->ignored) {} else
#define for_each_iommu(i, drhd) \ #define for_each_iommu(i, drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list) \ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
if (i=drhd->iommu, 0) {} else if (i=drhd->iommu, 0) {} else
static inline bool dmar_rcu_check(void)
{
return rwsem_is_locked(&dmar_global_lock) ||
system_state == SYSTEM_BOOTING;
}
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
#define for_each_dev_scope(a, c, p, d) \
for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
NULL, (p) < (c)); (p)++)
#define for_each_active_dev_scope(a, c, p, d) \
for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else
extern int dmar_table_init(void); extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void); extern int dmar_dev_scope_init(void);
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
struct pci_dev ***devices, u16 segment); struct dmar_dev_scope **devices, u16 segment);
extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt); extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
void *start, void*end, u16 segment,
struct dmar_dev_scope *devices,
int devices_cnt);
extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
u16 segment, struct dmar_dev_scope *devices,
int count);
/* Intel IOMMU detection */ /* Intel IOMMU detection */
extern int detect_intel_iommu(void); extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void); extern int enable_drhd_fault_handling(void);
#else #else
struct dmar_pci_notify_info;
static inline int detect_intel_iommu(void) static inline int detect_intel_iommu(void)
{ {
return -ENODEV; return -ENODEV;
...@@ -138,30 +179,9 @@ extern int arch_setup_dmar_msi(unsigned int irq); ...@@ -138,30 +179,9 @@ extern int arch_setup_dmar_msi(unsigned int irq);
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu; extern int iommu_detected, no_iommu;
extern struct list_head dmar_rmrr_units;
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 base_address; /* reserved base address*/
u64 end_address; /* reserved end address */
struct pci_dev **devices; /* target devices */
int devices_cnt; /* target device count */
};
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
struct dmar_atsr_unit {
struct list_head list; /* list of ATSR units */
struct acpi_dmar_header *hdr; /* ACPI header */
struct pci_dev **devices; /* target devices */
int devices_cnt; /* target device count */
u8 include_all:1; /* include all ports */
};
int dmar_parse_rmrr_atsr_dev(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
extern int intel_iommu_init(void); extern int intel_iommu_init(void);
#else /* !CONFIG_INTEL_IOMMU: */ #else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; } static inline int intel_iommu_init(void) { return -ENODEV; }
...@@ -173,7 +193,7 @@ static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) ...@@ -173,7 +193,7 @@ static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
{ {
return 0; return 0;
} }
static inline int dmar_parse_rmrr_atsr_dev(void) static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{ {
return 0; return 0;
} }
......
...@@ -319,6 +319,7 @@ struct intel_iommu { ...@@ -319,6 +319,7 @@ struct intel_iommu {
int agaw; /* agaw of this iommu */ int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */ int msagaw; /* max sagaw of this iommu */
unsigned int irq; unsigned int irq;
u16 segment; /* PCI segment# */
unsigned char name[13]; /* Device Name */ unsigned char name[13]; /* Device Name */
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
......
...@@ -47,5 +47,7 @@ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); ...@@ -47,5 +47,7 @@ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad); void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment