Commit b3a4bcaa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU Updates from Joerg Roedel:
 "A few patches have been queued up for this merge window:

   - improvements for the ARM-SMMU driver (IOMMU_EXEC support, IOMMU
     group support)
   - updates and fixes for the shmobile IOMMU driver
   - various fixes to generic IOMMU code and the Intel IOMMU driver
   - some cleanups in IOMMU drivers (dev_is_pci() usage)"

* tag 'iommu-updates-v3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (36 commits)
  iommu/vt-d: Fix signedness bug in alloc_irte()
  iommu/vt-d: free all resources if failed to initialize DMARs
  iommu/vt-d, trivial: clean sparse warnings
  iommu/vt-d: fix wrong return value of dmar_table_init()
  iommu/vt-d: release invalidation queue when destroying IOMMU unit
  iommu/vt-d: fix access after free issue in function free_dmar_iommu()
  iommu/vt-d: keep shared resources when failed to initialize iommu devices
  iommu/vt-d: fix invalid memory access when freeing DMAR irq
  iommu/vt-d, trivial: simplify code with existing macros
  iommu/vt-d, trivial: use defined macro instead of hardcoding
  iommu/vt-d: mark internal functions as static
  iommu/vt-d, trivial: clean up unused code
  iommu/vt-d, trivial: check suitable flag in function detect_intel_iommu()
  iommu/vt-d, trivial: print correct domain id of static identity domain
  iommu/vt-d, trivial: refine support of 64bit guest address
  iommu/vt-d: fix resource leakage on error recovery path in iommu_init_domains()
  iommu/vt-d: fix a race window in allocating domain ID for virtual machines
  iommu/vt-d: fix PCI device reference leakage on error recovery path
  drm/msm: Fix link error with !MSM_IOMMU
  iommu/vt-d: use dedicated bitmap to track remapping entry allocation status
  ...
parents 17c7f854 dd1a1756
...@@ -4,6 +4,7 @@ config DRM_MSM ...@@ -4,6 +4,7 @@ config DRM_MSM
depends on DRM depends on DRM
depends on ARCH_MSM depends on ARCH_MSM
depends on ARCH_MSM8960 depends on ARCH_MSM8960
depends on MSM_IOMMU
select DRM_KMS_HELPER select DRM_KMS_HELPER
select SHMEM select SHMEM
select TMPFS select TMPFS
......
...@@ -207,6 +207,7 @@ config SHMOBILE_IOMMU ...@@ -207,6 +207,7 @@ config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI" bool "IOMMU for Renesas IPMMU/IPMMUI"
default n default n
depends on ARM depends on ARM
depends on SH_MOBILE || COMPILE_TEST
select IOMMU_API select IOMMU_API
select ARM_DMA_USE_IOMMU select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU select SHMOBILE_IPMMU
......
...@@ -248,8 +248,8 @@ static bool check_device(struct device *dev) ...@@ -248,8 +248,8 @@ static bool check_device(struct device *dev)
if (!dev || !dev->dma_mask) if (!dev || !dev->dma_mask)
return false; return false;
/* No device or no PCI device */ /* No PCI device */
if (dev->bus != &pci_bus_type) if (!dev_is_pci(dev))
return false; return false;
devid = get_device_id(dev); devid = get_device_id(dev);
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
* - v7/v8 long-descriptor format * - v7/v8 long-descriptor format
* - Non-secure access to the SMMU * - Non-secure access to the SMMU
* - 4k and 64k pages, with contiguous pte hints. * - 4k and 64k pages, with contiguous pte hints.
* - Up to 39-bit addressing * - Up to 42-bit addressing (dependent on VA_BITS)
* - Context fault reporting * - Context fault reporting
*/ */
...@@ -61,12 +61,13 @@ ...@@ -61,12 +61,13 @@
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
/* Page table bits */ /* Page table bits */
#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
#if PAGE_SIZE == SZ_4K #if PAGE_SIZE == SZ_4K
#define ARM_SMMU_PTE_CONT_ENTRIES 16 #define ARM_SMMU_PTE_CONT_ENTRIES 16
...@@ -1205,7 +1206,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, ...@@ -1205,7 +1206,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long pfn, int flags, int stage) unsigned long pfn, int flags, int stage)
{ {
pte_t *pte, *start; pte_t *pte, *start;
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
/* Allocate a new set of tables */ /* Allocate a new set of tables */
...@@ -1244,7 +1245,9 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, ...@@ -1244,7 +1245,9 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
} }
/* If no access, create a faulting entry to avoid TLB fills */ /* If no access, create a faulting entry to avoid TLB fills */
if (!(flags & (IOMMU_READ | IOMMU_WRITE))) if (flags & IOMMU_EXEC)
pteval &= ~ARM_SMMU_PTE_XN;
else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE; pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS; pteval |= ARM_SMMU_PTE_SH_IS;
...@@ -1494,6 +1497,13 @@ static int arm_smmu_add_device(struct device *dev) ...@@ -1494,6 +1497,13 @@ static int arm_smmu_add_device(struct device *dev)
{ {
struct arm_smmu_device *child, *parent, *smmu; struct arm_smmu_device *child, *parent, *smmu;
struct arm_smmu_master *master = NULL; struct arm_smmu_master *master = NULL;
struct iommu_group *group;
int ret;
if (dev->archdata.iommu) {
dev_warn(dev, "IOMMU driver already assigned to device\n");
return -EINVAL;
}
spin_lock(&arm_smmu_devices_lock); spin_lock(&arm_smmu_devices_lock);
list_for_each_entry(parent, &arm_smmu_devices, list) { list_for_each_entry(parent, &arm_smmu_devices, list) {
...@@ -1526,13 +1536,23 @@ static int arm_smmu_add_device(struct device *dev) ...@@ -1526,13 +1536,23 @@ static int arm_smmu_add_device(struct device *dev)
if (!master) if (!master)
return -ENODEV; return -ENODEV;
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(dev, "Failed to allocate IOMMU group\n");
return PTR_ERR(group);
}
ret = iommu_group_add_device(group, dev);
iommu_group_put(group);
dev->archdata.iommu = smmu; dev->archdata.iommu = smmu;
return 0;
return ret;
} }
static void arm_smmu_remove_device(struct device *dev) static void arm_smmu_remove_device(struct device *dev)
{ {
dev->archdata.iommu = NULL; dev->archdata.iommu = NULL;
iommu_group_remove_device(dev);
} }
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
...@@ -1730,7 +1750,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) ...@@ -1730,7 +1750,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
* allocation (PTRS_PER_PGD). * allocation (PTRS_PER_PGD).
*/ */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* Current maximum output size of 39 bits */
smmu->s1_output_size = min(39UL, size); smmu->s1_output_size = min(39UL, size);
#else #else
smmu->s1_output_size = min(32UL, size); smmu->s1_output_size = min(32UL, size);
...@@ -1745,7 +1764,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) ...@@ -1745,7 +1764,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
} else { } else {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
size = min(39, arm_smmu_id_size_to_bits(size)); size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
#else #else
size = 32; size = 32;
#endif #endif
......
...@@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units); ...@@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl; struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size; static acpi_size dmar_tbl_size;
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{ {
/* /*
...@@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, ...@@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
if (!pdev) { if (!pdev) {
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
segment, scope->bus, path->device, path->function); segment, scope->bus, path->device, path->function);
*dev = NULL;
return 0; return 0;
} }
if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
...@@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, ...@@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
ret = dmar_parse_one_dev_scope(scope, ret = dmar_parse_one_dev_scope(scope,
&(*devices)[index], segment); &(*devices)[index], segment);
if (ret) { if (ret) {
kfree(*devices); dmar_free_dev_scope(devices, cnt);
return ret; return ret;
} }
index ++; index ++;
...@@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, ...@@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
return 0; return 0;
} }
void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
{
if (*devices && *cnt) {
while (--*cnt >= 0)
pci_dev_put((*devices)[*cnt]);
kfree(*devices);
*devices = NULL;
*cnt = 0;
}
}
/** /**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit * structure which uniquely represent one DMA remapping hardware unit
...@@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) ...@@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return 0; return 0;
} }
static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
{
if (dmaru->devices && dmaru->devices_cnt)
dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
if (dmaru->iommu)
free_iommu(dmaru->iommu);
kfree(dmaru);
}
static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{ {
struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_hardware_unit *drhd;
int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
if (dmaru->include_all) if (dmaru->include_all)
return 0; return 0;
ret = dmar_parse_dev_scope((void *)(drhd + 1), return dmar_parse_dev_scope((void *)(drhd + 1),
((void *)drhd) + drhd->header.length, ((void *)drhd) + drhd->header.length,
&dmaru->devices_cnt, &dmaru->devices, &dmaru->devices_cnt, &dmaru->devices,
drhd->segment); drhd->segment);
if (ret) {
list_del(&dmaru->list);
kfree(dmaru);
}
return ret;
} }
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
...@@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) ...@@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
int __init dmar_dev_scope_init(void) int __init dmar_dev_scope_init(void)
{ {
static int dmar_dev_scope_initialized; static int dmar_dev_scope_initialized;
struct dmar_drhd_unit *drhd, *drhd_n; struct dmar_drhd_unit *drhd;
int ret = -ENODEV; int ret = -ENODEV;
if (dmar_dev_scope_initialized) if (dmar_dev_scope_initialized)
...@@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void) ...@@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void)
if (list_empty(&dmar_drhd_units)) if (list_empty(&dmar_drhd_units))
goto fail; goto fail;
list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { list_for_each_entry(drhd, &dmar_drhd_units, list) {
ret = dmar_parse_dev(drhd); ret = dmar_parse_dev(drhd);
if (ret) if (ret)
goto fail; goto fail;
...@@ -456,24 +472,23 @@ int __init dmar_table_init(void) ...@@ -456,24 +472,23 @@ int __init dmar_table_init(void)
static int dmar_table_initialized; static int dmar_table_initialized;
int ret; int ret;
if (dmar_table_initialized) if (dmar_table_initialized == 0) {
return 0; ret = parse_dmar_table();
if (ret < 0) {
dmar_table_initialized = 1; if (ret != -ENODEV)
pr_info("parse DMAR table failure.\n");
ret = parse_dmar_table(); } else if (list_empty(&dmar_drhd_units)) {
if (ret) { pr_info("No DMAR devices found\n");
if (ret != -ENODEV) ret = -ENODEV;
pr_info("parse DMAR table failure.\n"); }
return ret;
}
if (list_empty(&dmar_drhd_units)) { if (ret < 0)
pr_info("No DMAR devices found\n"); dmar_table_initialized = ret;
return -ENODEV; else
dmar_table_initialized = 1;
} }
return 0; return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
} }
static void warn_invalid_dmar(u64 addr, const char *message) static void warn_invalid_dmar(u64 addr, const char *message)
...@@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message) ...@@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION)); dmi_get_system_info(DMI_PRODUCT_VERSION));
} }
int __init check_zero_address(void) static int __init check_zero_address(void)
{ {
struct acpi_table_dmar *dmar; struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header; struct acpi_dmar_header *entry_header;
...@@ -546,14 +561,6 @@ int __init detect_intel_iommu(void) ...@@ -546,14 +561,6 @@ int __init detect_intel_iommu(void)
if (ret) if (ret)
ret = check_zero_address(); ret = check_zero_address();
{ {
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *) dmar_tbl;
if (ret && irq_remapping_enabled && cpu_has_x2apic &&
dmar->flags & 0x1)
pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1; iommu_detected = 1;
/* Make sure ACS will be enabled */ /* Make sure ACS will be enabled */
...@@ -565,7 +572,7 @@ int __init detect_intel_iommu(void) ...@@ -565,7 +572,7 @@ int __init detect_intel_iommu(void)
x86_init.iommu.iommu_init = intel_iommu_init; x86_init.iommu.iommu_init = intel_iommu_init;
#endif #endif
} }
early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL; dmar_tbl = NULL;
return ret ? 1 : -ENODEV; return ret ? 1 : -ENODEV;
...@@ -647,7 +654,7 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) ...@@ -647,7 +654,7 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
return err; return err;
} }
int alloc_iommu(struct dmar_drhd_unit *drhd) static int alloc_iommu(struct dmar_drhd_unit *drhd)
{ {
struct intel_iommu *iommu; struct intel_iommu *iommu;
u32 ver, sts; u32 ver, sts;
...@@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return err; return err;
} }
void free_iommu(struct intel_iommu *iommu) static void free_iommu(struct intel_iommu *iommu)
{ {
if (!iommu) if (iommu->irq) {
return; free_irq(iommu->irq, iommu);
irq_set_handler_data(iommu->irq, NULL);
destroy_irq(iommu->irq);
}
free_dmar_iommu(iommu); if (iommu->qi) {
free_page((unsigned long)iommu->qi->desc);
kfree(iommu->qi->desc_status);
kfree(iommu->qi);
}
if (iommu->reg) if (iommu->reg)
unmap_iommu(iommu); unmap_iommu(iommu);
...@@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu) ...@@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
if (!desc_page) { if (!desc_page) {
kfree(qi); kfree(qi);
iommu->qi = 0; iommu->qi = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu) ...@@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
if (!qi->desc_status) { if (!qi->desc_status) {
free_page((unsigned long) qi->desc); free_page((unsigned long) qi->desc);
kfree(qi); kfree(qi);
iommu->qi = 0; iommu->qi = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] = ...@@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] =
"Blocked an interrupt request due to source-id verification failure", "Blocked an interrupt request due to source-id verification failure",
}; };
#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
{ {
if (fault_reason >= 0x20 && (fault_reason - 0x20 < if (fault_reason >= 0x20 && (fault_reason - 0x20 <
ARRAY_SIZE(irq_remap_fault_reasons))) { ARRAY_SIZE(irq_remap_fault_reasons))) {
...@@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu) ...@@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
int __init enable_drhd_fault_handling(void) int __init enable_drhd_fault_handling(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
/* /*
* Enable fault control interrupt. * Enable fault control interrupt.
*/ */
for_each_drhd_unit(drhd) { for_each_iommu(iommu, drhd) {
int ret;
struct intel_iommu *iommu = drhd->iommu;
u32 fault_status; u32 fault_status;
ret = dmar_set_interrupt(iommu); int ret = dmar_set_interrupt(iommu);
if (ret) { if (ret) {
pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n", pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
...@@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void) ...@@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void)
return 0; return 0;
return dmar->flags & 0x1; return dmar->flags & 0x1;
} }
static int __init dmar_free_unused_resources(void)
{
struct dmar_drhd_unit *dmaru, *dmaru_n;
/* DMAR units are in use */
if (irq_remapping_enabled || intel_iommu_enabled)
return 0;
list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
list_del(&dmaru->list);
dmar_free_drhd(dmaru);
}
return 0;
}
late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu); IOMMU_INIT_POST(detect_intel_iommu);
...@@ -691,7 +691,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, ...@@ -691,7 +691,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while attaching a * Use LIODN of the PCI controller while attaching a
* PCI device. * PCI device.
*/ */
if (dev->bus == &pci_bus_type) { if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev); pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus); pci_ctl = pci_bus_to_host(pdev->bus);
/* /*
...@@ -729,7 +729,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, ...@@ -729,7 +729,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while detaching a * Use LIODN of the PCI controller while detaching a
* PCI device. * PCI device.
*/ */
if (dev->bus == &pci_bus_type) { if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev); pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus); pci_ctl = pci_bus_to_host(pdev->bus);
/* /*
...@@ -1056,7 +1056,7 @@ static int fsl_pamu_add_device(struct device *dev) ...@@ -1056,7 +1056,7 @@ static int fsl_pamu_add_device(struct device *dev)
* For platform devices we allocate a separate group for * For platform devices we allocate a separate group for
* each of the devices. * each of the devices.
*/ */
if (dev->bus == &pci_bus_type) { if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev); pdev = to_pci_dev(dev);
/* Don't create device groups for virtual PCI bridges */ /* Don't create device groups for virtual PCI bridges */
if (pdev->subordinate) if (pdev->subordinate)
......
This diff is collapsed.
...@@ -40,13 +40,15 @@ static int ir_ioapic_num, ir_hpet_num; ...@@ -40,13 +40,15 @@ static int ir_ioapic_num, ir_hpet_num;
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static int __init parse_ioapics_under_ir(void);
static struct irq_2_iommu *irq_2_iommu(unsigned int irq) static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{ {
struct irq_cfg *cfg = irq_get_chip_data(irq); struct irq_cfg *cfg = irq_get_chip_data(irq);
return cfg ? &cfg->irq_2_iommu : NULL; return cfg ? &cfg->irq_2_iommu : NULL;
} }
int get_irte(int irq, struct irte *entry) static int get_irte(int irq, struct irte *entry)
{ {
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags; unsigned long flags;
...@@ -69,19 +71,13 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) ...@@ -69,19 +71,13 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
struct ir_table *table = iommu->ir_table; struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_cfg *cfg = irq_get_chip_data(irq); struct irq_cfg *cfg = irq_get_chip_data(irq);
u16 index, start_index;
unsigned int mask = 0; unsigned int mask = 0;
unsigned long flags; unsigned long flags;
int i; int index;
if (!count || !irq_iommu) if (!count || !irq_iommu)
return -1; return -1;
/*
* start the IRTE search from index 0.
*/
index = start_index = 0;
if (count > 1) { if (count > 1) {
count = __roundup_pow_of_two(count); count = __roundup_pow_of_two(count);
mask = ilog2(count); mask = ilog2(count);
...@@ -96,32 +92,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) ...@@ -96,32 +92,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
} }
raw_spin_lock_irqsave(&irq_2_ir_lock, flags); raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
do { index = bitmap_find_free_region(table->bitmap,
for (i = index; i < index + count; i++) INTR_REMAP_TABLE_ENTRIES, mask);
if (table->base[i].present) if (index < 0) {
break; pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
/* empty index found */ } else {
if (i == index + count) cfg->remapped = 1;
break; irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
index = (index + count) % INTR_REMAP_TABLE_ENTRIES; irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
if (index == start_index) { }
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate an IRTE\n");
return -1;
}
} while (1);
for (i = index; i < index + count; i++)
table->base[i].present = 1;
cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index; return index;
...@@ -254,6 +235,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) ...@@ -254,6 +235,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
set_64bit(&entry->low, 0); set_64bit(&entry->low, 0);
set_64bit(&entry->high, 0); set_64bit(&entry->high, 0);
} }
bitmap_release_region(iommu->ir_table->bitmap, index,
irq_iommu->irte_mask);
return qi_flush_iec(iommu, index, irq_iommu->irte_mask); return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
} }
...@@ -336,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic) ...@@ -336,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
return -1; return -1;
} }
set_irte_sid(irte, 1, 0, sid); set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
return 0; return 0;
} }
...@@ -453,6 +436,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) ...@@ -453,6 +436,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
{ {
struct ir_table *ir_table; struct ir_table *ir_table;
struct page *pages; struct page *pages;
unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_ATOMIC); GFP_ATOMIC);
...@@ -464,13 +448,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) ...@@ -464,13 +448,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
INTR_REMAP_PAGE_ORDER); INTR_REMAP_PAGE_ORDER);
if (!pages) { if (!pages) {
printk(KERN_ERR "failed to allocate pages of order %d\n", pr_err("IR%d: failed to allocate pages of order %d\n",
INTR_REMAP_PAGE_ORDER); iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table); kfree(iommu->ir_table);
return -ENOMEM; return -ENOMEM;
} }
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
kfree(ir_table);
return -ENOMEM;
}
ir_table->base = page_address(pages); ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
iommu_set_irq_remapping(iommu, mode); iommu_set_irq_remapping(iommu, mode);
return 0; return 0;
...@@ -521,6 +515,7 @@ static int __init dmar_x2apic_optout(void) ...@@ -521,6 +515,7 @@ static int __init dmar_x2apic_optout(void)
static int __init intel_irq_remapping_supported(void) static int __init intel_irq_remapping_supported(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
if (disable_irq_remap) if (disable_irq_remap)
return 0; return 0;
...@@ -539,12 +534,9 @@ static int __init intel_irq_remapping_supported(void) ...@@ -539,12 +534,9 @@ static int __init intel_irq_remapping_supported(void)
if (!dmar_ir_support()) if (!dmar_ir_support())
return 0; return 0;
for_each_drhd_unit(drhd) { for_each_iommu(iommu, drhd)
struct intel_iommu *iommu = drhd->iommu;
if (!ecap_ir_support(iommu->ecap)) if (!ecap_ir_support(iommu->ecap))
return 0; return 0;
}
return 1; return 1;
} }
...@@ -552,6 +544,7 @@ static int __init intel_irq_remapping_supported(void) ...@@ -552,6 +544,7 @@ static int __init intel_irq_remapping_supported(void)
static int __init intel_enable_irq_remapping(void) static int __init intel_enable_irq_remapping(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool x2apic_present; bool x2apic_present;
int setup = 0; int setup = 0;
int eim = 0; int eim = 0;
...@@ -564,6 +557,8 @@ static int __init intel_enable_irq_remapping(void) ...@@ -564,6 +557,8 @@ static int __init intel_enable_irq_remapping(void)
} }
if (x2apic_present) { if (x2apic_present) {
pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
eim = !dmar_x2apic_optout(); eim = !dmar_x2apic_optout();
if (!eim) if (!eim)
printk(KERN_WARNING printk(KERN_WARNING
...@@ -572,9 +567,7 @@ static int __init intel_enable_irq_remapping(void) ...@@ -572,9 +567,7 @@ static int __init intel_enable_irq_remapping(void)
"Use 'intremap=no_x2apic_optout' to override BIOS request.\n"); "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
} }
for_each_drhd_unit(drhd) { for_each_iommu(iommu, drhd) {
struct intel_iommu *iommu = drhd->iommu;
/* /*
* If the queued invalidation is already initialized, * If the queued invalidation is already initialized,
* shouldn't disable it. * shouldn't disable it.
...@@ -599,9 +592,7 @@ static int __init intel_enable_irq_remapping(void) ...@@ -599,9 +592,7 @@ static int __init intel_enable_irq_remapping(void)
/* /*
* check for the Interrupt-remapping support * check for the Interrupt-remapping support
*/ */
for_each_drhd_unit(drhd) { for_each_iommu(iommu, drhd) {
struct intel_iommu *iommu = drhd->iommu;
if (!ecap_ir_support(iommu->ecap)) if (!ecap_ir_support(iommu->ecap))
continue; continue;
...@@ -615,10 +606,8 @@ static int __init intel_enable_irq_remapping(void) ...@@ -615,10 +606,8 @@ static int __init intel_enable_irq_remapping(void)
/* /*
* Enable queued invalidation for all the DRHD's. * Enable queued invalidation for all the DRHD's.
*/ */
for_each_drhd_unit(drhd) { for_each_iommu(iommu, drhd) {
int ret; int ret = dmar_enable_qi(iommu);
struct intel_iommu *iommu = drhd->iommu;
ret = dmar_enable_qi(iommu);
if (ret) { if (ret) {
printk(KERN_ERR "DRHD %Lx: failed to enable queued, " printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
...@@ -631,9 +620,7 @@ static int __init intel_enable_irq_remapping(void) ...@@ -631,9 +620,7 @@ static int __init intel_enable_irq_remapping(void)
/* /*
* Setup Interrupt-remapping for all the DRHD's now. * Setup Interrupt-remapping for all the DRHD's now.
*/ */
for_each_drhd_unit(drhd) { for_each_iommu(iommu, drhd) {
struct intel_iommu *iommu = drhd->iommu;
if (!ecap_ir_support(iommu->ecap)) if (!ecap_ir_support(iommu->ecap))
continue; continue;
...@@ -774,22 +761,20 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, ...@@ -774,22 +761,20 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
* Finds the assocaition between IOAPIC's and its Interrupt-remapping * Finds the assocaition between IOAPIC's and its Interrupt-remapping
* hardware unit. * hardware unit.
*/ */
int __init parse_ioapics_under_ir(void) static int __init parse_ioapics_under_ir(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int ir_supported = 0; int ir_supported = 0;
int ioapic_idx; int ioapic_idx;
for_each_drhd_unit(drhd) { for_each_iommu(iommu, drhd)
struct intel_iommu *iommu = drhd->iommu;
if (ecap_ir_support(iommu->ecap)) { if (ecap_ir_support(iommu->ecap)) {
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1; return -1;
ir_supported = 1; ir_supported = 1;
} }
}
if (!ir_supported) if (!ir_supported)
return 0; return 0;
...@@ -807,7 +792,7 @@ int __init parse_ioapics_under_ir(void) ...@@ -807,7 +792,7 @@ int __init parse_ioapics_under_ir(void)
return 1; return 1;
} }
int __init ir_dev_scope_init(void) static int __init ir_dev_scope_init(void)
{ {
if (!irq_remapping_enabled) if (!irq_remapping_enabled)
return 0; return 0;
......
...@@ -150,7 +150,7 @@ static int irq_remapping_setup_msi_irqs(struct pci_dev *dev, ...@@ -150,7 +150,7 @@ static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
return do_setup_msix_irqs(dev, nvec); return do_setup_msix_irqs(dev, nvec);
} }
void eoi_ioapic_pin_remapped(int apic, int pin, int vector) static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
{ {
/* /*
* Intr-remapping uses pin number as the virtual vector * Intr-remapping uses pin number as the virtual vector
...@@ -295,8 +295,8 @@ int setup_ioapic_remapped_entry(int irq, ...@@ -295,8 +295,8 @@ int setup_ioapic_remapped_entry(int irq,
vector, attr); vector, attr);
} }
int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask, static int set_remapped_irq_affinity(struct irq_data *data,
bool force) const struct cpumask *mask, bool force)
{ {
if (!config_enabled(CONFIG_SMP) || !remap_ops || if (!config_enabled(CONFIG_SMP) || !remap_ops ||
!remap_ops->set_affinity) !remap_ops->set_affinity)
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/limits.h> #include <linux/limits.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_iommu.h>
/** /**
* of_get_dma_window - Parse *dma-window property and returns 0 if found. * of_get_dma_window - Parse *dma-window property and returns 0 if found.
......
...@@ -380,14 +380,13 @@ int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu) ...@@ -380,14 +380,13 @@ int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
kmem_cache_destroy(l1cache); kmem_cache_destroy(l1cache);
return -ENOMEM; return -ENOMEM;
} }
archdata = kmalloc(sizeof(*archdata), GFP_KERNEL); archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
if (!archdata) { if (!archdata) {
kmem_cache_destroy(l1cache); kmem_cache_destroy(l1cache);
kmem_cache_destroy(l2cache); kmem_cache_destroy(l2cache);
return -ENOMEM; return -ENOMEM;
} }
spin_lock_init(&archdata->attach_lock); spin_lock_init(&archdata->attach_lock);
archdata->attached = NULL;
archdata->ipmmu = ipmmu; archdata->ipmmu = ipmmu;
ipmmu_archdata = archdata; ipmmu_archdata = archdata;
bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops); bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
......
...@@ -35,12 +35,12 @@ void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu) ...@@ -35,12 +35,12 @@ void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
if (!ipmmu) if (!ipmmu)
return; return;
mutex_lock(&ipmmu->flush_lock); spin_lock(&ipmmu->flush_lock);
if (ipmmu->tlb_enabled) if (ipmmu->tlb_enabled)
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN); ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
else else
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH); ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
mutex_unlock(&ipmmu->flush_lock); spin_unlock(&ipmmu->flush_lock);
} }
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
...@@ -49,7 +49,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, ...@@ -49,7 +49,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
if (!ipmmu) if (!ipmmu)
return; return;
mutex_lock(&ipmmu->flush_lock); spin_lock(&ipmmu->flush_lock);
switch (size) { switch (size) {
default: default:
ipmmu->tlb_enabled = 0; ipmmu->tlb_enabled = 0;
...@@ -85,7 +85,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, ...@@ -85,7 +85,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
} }
ipmmu_reg_write(ipmmu, IMTTBR, phys); ipmmu_reg_write(ipmmu, IMTTBR, phys);
ipmmu_reg_write(ipmmu, IMASID, asid); ipmmu_reg_write(ipmmu, IMASID, asid);
mutex_unlock(&ipmmu->flush_lock); spin_unlock(&ipmmu->flush_lock);
} }
static int ipmmu_probe(struct platform_device *pdev) static int ipmmu_probe(struct platform_device *pdev)
...@@ -104,7 +104,7 @@ static int ipmmu_probe(struct platform_device *pdev) ...@@ -104,7 +104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot allocate device data\n"); dev_err(&pdev->dev, "cannot allocate device data\n");
return -ENOMEM; return -ENOMEM;
} }
mutex_init(&ipmmu->flush_lock); spin_lock_init(&ipmmu->flush_lock);
ipmmu->dev = &pdev->dev; ipmmu->dev = &pdev->dev;
ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start, ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res)); resource_size(res));
......
...@@ -14,7 +14,7 @@ struct shmobile_ipmmu { ...@@ -14,7 +14,7 @@ struct shmobile_ipmmu {
struct device *dev; struct device *dev;
void __iomem *ipmmu_base; void __iomem *ipmmu_base;
int tlb_enabled; int tlb_enabled;
struct mutex flush_lock; spinlock_t flush_lock;
const char * const *dev_names; const char * const *dev_names;
unsigned int num_dev_names; unsigned int num_dev_names;
}; };
......
...@@ -27,7 +27,6 @@ struct root_entry; ...@@ -27,7 +27,6 @@ struct root_entry;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled; extern int dmar_disabled;
...@@ -41,9 +40,6 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) ...@@ -41,9 +40,6 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{ {
return 0; return 0;
} }
static inline void free_dmar_iommu(struct intel_iommu *iommu)
{
}
#define dmar_disabled (1) #define dmar_disabled (1)
#define intel_iommu_enabled (0) #define intel_iommu_enabled (0)
#endif #endif
......
...@@ -33,6 +33,7 @@ struct acpi_dmar_header; ...@@ -33,6 +33,7 @@ struct acpi_dmar_header;
#define DMAR_X2APIC_OPT_OUT 0x2 #define DMAR_X2APIC_OPT_OUT 0x2
struct intel_iommu; struct intel_iommu;
#ifdef CONFIG_DMAR_TABLE #ifdef CONFIG_DMAR_TABLE
extern struct acpi_table_header *dmar_tbl; extern struct acpi_table_header *dmar_tbl;
struct dmar_drhd_unit { struct dmar_drhd_unit {
...@@ -52,6 +53,10 @@ extern struct list_head dmar_drhd_units; ...@@ -52,6 +53,10 @@ extern struct list_head dmar_drhd_units;
#define for_each_drhd_unit(drhd) \ #define for_each_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list) list_for_each_entry(drhd, &dmar_drhd_units, list)
#define for_each_active_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list) \
if (drhd->ignored) {} else
#define for_each_active_iommu(i, drhd) \ #define for_each_active_iommu(i, drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list) \ list_for_each_entry(drhd, &dmar_drhd_units, list) \
if (i=drhd->iommu, drhd->ignored) {} else if (i=drhd->iommu, drhd->ignored) {} else
...@@ -62,13 +67,13 @@ extern struct list_head dmar_drhd_units; ...@@ -62,13 +67,13 @@ extern struct list_head dmar_drhd_units;
extern int dmar_table_init(void); extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void); extern int dmar_dev_scope_init(void);
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
struct pci_dev ***devices, u16 segment);
extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt);
/* Intel IOMMU detection */ /* Intel IOMMU detection */
extern int detect_intel_iommu(void); extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void); extern int enable_drhd_fault_handling(void);
extern int parse_ioapics_under_ir(void);
extern int alloc_iommu(struct dmar_drhd_unit *);
#else #else
static inline int detect_intel_iommu(void) static inline int detect_intel_iommu(void)
{ {
...@@ -157,8 +162,6 @@ struct dmar_atsr_unit { ...@@ -157,8 +162,6 @@ struct dmar_atsr_unit {
int dmar_parse_rmrr_atsr_dev(void); int dmar_parse_rmrr_atsr_dev(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
struct pci_dev ***devices, u16 segment);
extern int intel_iommu_init(void); extern int intel_iommu_init(void);
#else /* !CONFIG_INTEL_IOMMU: */ #else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; } static inline int intel_iommu_init(void) { return -ENODEV; }
......
...@@ -288,6 +288,7 @@ struct q_inval { ...@@ -288,6 +288,7 @@ struct q_inval {
struct ir_table { struct ir_table {
struct irte *base; struct irte *base;
unsigned long *bitmap;
}; };
#endif #endif
...@@ -347,8 +348,6 @@ static inline void __iommu_flush_cache( ...@@ -347,8 +348,6 @@ static inline void __iommu_flush_cache(
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
extern int alloc_iommu(struct dmar_drhd_unit *drhd);
extern void free_iommu(struct intel_iommu *iommu);
extern int dmar_enable_qi(struct intel_iommu *iommu); extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void dmar_disable_qi(struct intel_iommu *iommu); extern void dmar_disable_qi(struct intel_iommu *iommu);
extern int dmar_reenable_qi(struct intel_iommu *iommu); extern int dmar_reenable_qi(struct intel_iommu *iommu);
......
...@@ -24,9 +24,10 @@ ...@@ -24,9 +24,10 @@
#include <linux/types.h> #include <linux/types.h>
#include <trace/events/iommu.h> #include <trace/events/iommu.h>
#define IOMMU_READ (1) #define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (2) #define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (4) /* DMA cache coherency */ #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_EXEC (1 << 3)
struct iommu_ops; struct iommu_ops;
struct iommu_group; struct iommu_group;
...@@ -247,6 +248,11 @@ static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) ...@@ -247,6 +248,11 @@ static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
return NULL; return NULL;
} }
static inline struct iommu_group *iommu_group_get_by_id(int id)
{
return NULL;
}
static inline void iommu_domain_free(struct iommu_domain *domain) static inline void iommu_domain_free(struct iommu_domain *domain)
{ {
} }
...@@ -291,8 +297,8 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad ...@@ -291,8 +297,8 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad
return 0; return 0;
} }
static inline int domain_has_cap(struct iommu_domain *domain, static inline int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap) unsigned long cap)
{ {
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment