Commit 23971bdf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "This pull-request includes:

   - change in the IOMMU-API to convert the former iommu_domain_capable
     function to just iommu_capable

   - various fixes in handling RMRR ranges for the VT-d driver (one fix
     requires a device driver core change which was acked by Greg KH)

   - the AMD IOMMU driver now assigns and deassigns complete alias
     groups to fix issues with devices using the wrong PCI request-id

   - MMU-401 support for the ARM SMMU driver

   - multi-master IOMMU group support for the ARM SMMU driver

   - various other small fixes all over the place"

* tag 'iommu-updates-v3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (41 commits)
  iommu/vt-d: Work around broken RMRR firmware entries
  iommu/vt-d: Store bus information in RMRR PCI device path
  iommu/vt-d: Only remove domain when device is removed
  driver core: Add BUS_NOTIFY_REMOVED_DEVICE event
  iommu/amd: Fix devid mapping for ivrs_ioapic override
  iommu/irq_remapping: Fix the regression of hpet irq remapping
  iommu: Fix bus notifier breakage
  iommu/amd: Split init_iommu_group() from iommu_init_device()
  iommu: Rework iommu_group_get_for_pci_dev()
  iommu: Make of_device_id array const
  amd_iommu: do not dereference a NULL pointer address.
  iommu/omap: Remove omap_iommu unused owner field
  iommu: Remove iommu_domain_has_cap() API function
  IB/usnic: Convert to use new iommu_capable() API function
  vfio: Convert to use new iommu_capable() API function
  kvm: iommu: Convert to use new iommu_capable() API function
  iommu/tegra: Convert to iommu_capable() API function
  iommu/msm: Convert to iommu_capable() API function
  iommu/vt-d: Convert to iommu_capable() API function
  iommu/fsl: Convert to iommu_capable() API function
  ...
parents c0fa2373 09b5269a
...@@ -14,6 +14,7 @@ conditions. ...@@ -14,6 +14,7 @@ conditions.
"arm,smmu-v1" "arm,smmu-v1"
"arm,smmu-v2" "arm,smmu-v2"
"arm,mmu-400" "arm,mmu-400"
"arm,mmu-401"
"arm,mmu-500" "arm,mmu-500"
depending on the particular implementation and/or the depending on the particular implementation and/or the
......
...@@ -1211,6 +1211,9 @@ void device_del(struct device *dev) ...@@ -1211,6 +1211,9 @@ void device_del(struct device *dev)
*/ */
if (platform_notify_remove) if (platform_notify_remove)
platform_notify_remove(dev); platform_notify_remove(dev);
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_REMOVED_DEVICE, dev);
kobject_uevent(&dev->kobj, KOBJ_REMOVE); kobject_uevent(&dev->kobj, KOBJ_REMOVE);
cleanup_device_parent(dev); cleanup_device_parent(dev);
kobject_del(&dev->kobj); kobject_del(&dev->kobj);
......
...@@ -507,7 +507,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) ...@@ -507,7 +507,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
if (err) if (err)
goto out_free_dev; goto out_free_dev;
if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) { if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
usnic_err("IOMMU of %s does not support cache coherency\n", usnic_err("IOMMU of %s does not support cache coherency\n",
dev_name(dev)); dev_name(dev));
err = -EINVAL; err = -EINVAL;
......
...@@ -87,6 +87,27 @@ int amd_iommu_max_glx_val = -1; ...@@ -87,6 +87,27 @@ int amd_iommu_max_glx_val = -1;
static struct dma_map_ops amd_iommu_dma_ops; static struct dma_map_ops amd_iommu_dma_ops;
/*
* This struct contains device specific data for the IOMMU
*/
struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct list_head dev_data_list; /* For global dev_data_list */
struct list_head alias_list; /* Link alias-groups together */
struct iommu_dev_data *alias_data;/* The alias dev_data */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */
struct {
bool enabled;
int qdep;
} ats; /* ATS state */
bool pri_tlp; /* PASID TLB required for
PPR completions */
u32 errata; /* Bitmap for errata to apply */
};
/* /*
* general struct to manage commands send to an IOMMU * general struct to manage commands send to an IOMMU
*/ */
...@@ -114,8 +135,9 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid) ...@@ -114,8 +135,9 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
if (!dev_data) if (!dev_data)
return NULL; return NULL;
INIT_LIST_HEAD(&dev_data->alias_list);
dev_data->devid = devid; dev_data->devid = devid;
atomic_set(&dev_data->bind, 0);
spin_lock_irqsave(&dev_data_list_lock, flags); spin_lock_irqsave(&dev_data_list_lock, flags);
list_add_tail(&dev_data->dev_data_list, &dev_data_list); list_add_tail(&dev_data->dev_data_list, &dev_data_list);
...@@ -260,17 +282,13 @@ static bool check_device(struct device *dev) ...@@ -260,17 +282,13 @@ static bool check_device(struct device *dev)
return true; return true;
} }
static int init_iommu_group(struct device *dev) static void init_iommu_group(struct device *dev)
{ {
struct iommu_group *group; struct iommu_group *group;
group = iommu_group_get_for_dev(dev); group = iommu_group_get_for_dev(dev);
if (!IS_ERR(group))
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_put(group); iommu_group_put(group);
return 0;
} }
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
...@@ -340,7 +358,6 @@ static int iommu_init_device(struct device *dev) ...@@ -340,7 +358,6 @@ static int iommu_init_device(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data; struct iommu_dev_data *dev_data;
u16 alias; u16 alias;
int ret;
if (dev->archdata.iommu) if (dev->archdata.iommu)
return 0; return 0;
...@@ -362,12 +379,9 @@ static int iommu_init_device(struct device *dev) ...@@ -362,12 +379,9 @@ static int iommu_init_device(struct device *dev)
return -ENOTSUPP; return -ENOTSUPP;
} }
dev_data->alias_data = alias_data; dev_data->alias_data = alias_data;
}
ret = init_iommu_group(dev); /* Add device to the alias_list */
if (ret) { list_add(&dev_data->alias_list, &alias_data->alias_list);
free_dev_data(dev_data);
return ret;
} }
if (pci_iommuv2_capable(pdev)) { if (pci_iommuv2_capable(pdev)) {
...@@ -455,6 +469,15 @@ int __init amd_iommu_init_devices(void) ...@@ -455,6 +469,15 @@ int __init amd_iommu_init_devices(void)
goto out_free; goto out_free;
} }
/*
* Initialize IOMMU groups only after iommu_init_device() has
* had a chance to populate any IVRS defined aliases.
*/
for_each_pci_dev(pdev) {
if (check_device(&pdev->dev))
init_iommu_group(&pdev->dev);
}
return 0; return 0;
out_free: out_free:
...@@ -1368,6 +1391,9 @@ static int iommu_map_page(struct protection_domain *dom, ...@@ -1368,6 +1391,9 @@ static int iommu_map_page(struct protection_domain *dom,
count = PAGE_SIZE_PTE_COUNT(page_size); count = PAGE_SIZE_PTE_COUNT(page_size);
pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
if (!pte)
return -ENOMEM;
for (i = 0; i < count; ++i) for (i = 0; i < count; ++i)
if (IOMMU_PTE_PRESENT(pte[i])) if (IOMMU_PTE_PRESENT(pte[i]))
return -EBUSY; return -EBUSY;
...@@ -2122,35 +2148,29 @@ static void do_detach(struct iommu_dev_data *dev_data) ...@@ -2122,35 +2148,29 @@ static void do_detach(struct iommu_dev_data *dev_data)
static int __attach_device(struct iommu_dev_data *dev_data, static int __attach_device(struct iommu_dev_data *dev_data,
struct protection_domain *domain) struct protection_domain *domain)
{ {
struct iommu_dev_data *head, *entry;
int ret; int ret;
/* lock domain */ /* lock domain */
spin_lock(&domain->lock); spin_lock(&domain->lock);
if (dev_data->alias_data != NULL) { head = dev_data;
struct iommu_dev_data *alias_data = dev_data->alias_data;
/* Some sanity checks */
ret = -EBUSY;
if (alias_data->domain != NULL &&
alias_data->domain != domain)
goto out_unlock;
if (dev_data->domain != NULL && if (head->alias_data != NULL)
dev_data->domain != domain) head = head->alias_data;
goto out_unlock;
/* Do real assignment */ /* Now we have the root of the alias group, if any */
if (alias_data->domain == NULL)
do_attach(alias_data, domain);
atomic_inc(&alias_data->bind); ret = -EBUSY;
} if (head->domain != NULL)
goto out_unlock;
if (dev_data->domain == NULL) /* Attach alias group root */
do_attach(dev_data, domain); do_attach(head, domain);
atomic_inc(&dev_data->bind); /* Attach other devices in the alias group */
list_for_each_entry(entry, &head->alias_list, alias_list)
do_attach(entry, domain);
ret = 0; ret = 0;
...@@ -2298,6 +2318,7 @@ static int attach_device(struct device *dev, ...@@ -2298,6 +2318,7 @@ static int attach_device(struct device *dev,
*/ */
static void __detach_device(struct iommu_dev_data *dev_data) static void __detach_device(struct iommu_dev_data *dev_data)
{ {
struct iommu_dev_data *head, *entry;
struct protection_domain *domain; struct protection_domain *domain;
unsigned long flags; unsigned long flags;
...@@ -2307,15 +2328,14 @@ static void __detach_device(struct iommu_dev_data *dev_data) ...@@ -2307,15 +2328,14 @@ static void __detach_device(struct iommu_dev_data *dev_data)
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
if (dev_data->alias_data != NULL) { head = dev_data;
struct iommu_dev_data *alias_data = dev_data->alias_data; if (head->alias_data != NULL)
head = head->alias_data;
if (atomic_dec_and_test(&alias_data->bind)) list_for_each_entry(entry, &head->alias_list, alias_list)
do_detach(alias_data); do_detach(entry);
}
if (atomic_dec_and_test(&dev_data->bind)) do_detach(head);
do_detach(dev_data);
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
...@@ -2415,6 +2435,7 @@ static int device_change_notifier(struct notifier_block *nb, ...@@ -2415,6 +2435,7 @@ static int device_change_notifier(struct notifier_block *nb,
case BUS_NOTIFY_ADD_DEVICE: case BUS_NOTIFY_ADD_DEVICE:
iommu_init_device(dev); iommu_init_device(dev);
init_iommu_group(dev);
/* /*
* dev_data is still NULL and * dev_data is still NULL and
...@@ -3158,7 +3179,6 @@ static void cleanup_domain(struct protection_domain *domain) ...@@ -3158,7 +3179,6 @@ static void cleanup_domain(struct protection_domain *domain)
entry = list_first_entry(&domain->dev_list, entry = list_first_entry(&domain->dev_list,
struct iommu_dev_data, list); struct iommu_dev_data, list);
__detach_device(entry); __detach_device(entry);
atomic_set(&entry->bind, 0);
} }
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
...@@ -3384,20 +3404,20 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, ...@@ -3384,20 +3404,20 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return paddr; return paddr;
} }
static int amd_iommu_domain_has_cap(struct iommu_domain *domain, static bool amd_iommu_capable(enum iommu_cap cap)
unsigned long cap)
{ {
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
return 1; return true;
case IOMMU_CAP_INTR_REMAP: case IOMMU_CAP_INTR_REMAP:
return irq_remapping_enabled; return (irq_remapping_enabled == 1);
} }
return 0; return false;
} }
static const struct iommu_ops amd_iommu_ops = { static const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_init = amd_iommu_domain_init, .domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy, .domain_destroy = amd_iommu_domain_destroy,
.attach_dev = amd_iommu_attach_device, .attach_dev = amd_iommu_attach_device,
...@@ -3405,7 +3425,6 @@ static const struct iommu_ops amd_iommu_ops = { ...@@ -3405,7 +3425,6 @@ static const struct iommu_ops amd_iommu_ops = {
.map = amd_iommu_map, .map = amd_iommu_map,
.unmap = amd_iommu_unmap, .unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys, .iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap,
.pgsize_bitmap = AMD_IOMMU_PGSIZES, .pgsize_bitmap = AMD_IOMMU_PGSIZES,
}; };
...@@ -4235,7 +4254,7 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, ...@@ -4235,7 +4254,7 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
return 0; return 0;
} }
static int setup_hpet_msi(unsigned int irq, unsigned int id) static int alloc_hpet_msi(unsigned int irq, unsigned int id)
{ {
struct irq_2_irte *irte_info; struct irq_2_irte *irte_info;
struct irq_cfg *cfg; struct irq_cfg *cfg;
...@@ -4274,6 +4293,6 @@ struct irq_remap_ops amd_iommu_irq_ops = { ...@@ -4274,6 +4293,6 @@ struct irq_remap_ops amd_iommu_irq_ops = {
.compose_msi_msg = compose_msi_msg, .compose_msi_msg = compose_msi_msg,
.msi_alloc_irq = msi_alloc_irq, .msi_alloc_irq = msi_alloc_irq,
.msi_setup_irq = msi_setup_irq, .msi_setup_irq = msi_setup_irq,
.setup_hpet_msi = setup_hpet_msi, .alloc_hpet_msi = alloc_hpet_msi,
}; };
#endif #endif
...@@ -712,7 +712,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, ...@@ -712,7 +712,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
set_iommu_for_device(iommu, devid); set_iommu_for_device(iommu, devid);
} }
static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
{ {
struct devid_map *entry; struct devid_map *entry;
struct list_head *list; struct list_head *list;
...@@ -731,6 +731,8 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) ...@@ -731,6 +731,8 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
*devid = entry->devid;
return 0; return 0;
} }
...@@ -739,7 +741,7 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) ...@@ -739,7 +741,7 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
return -ENOMEM; return -ENOMEM;
entry->id = id; entry->id = id;
entry->devid = devid; entry->devid = *devid;
entry->cmd_line = cmd_line; entry->cmd_line = cmd_line;
list_add_tail(&entry->list, list); list_add_tail(&entry->list, list);
...@@ -754,7 +756,7 @@ static int __init add_early_maps(void) ...@@ -754,7 +756,7 @@ static int __init add_early_maps(void)
for (i = 0; i < early_ioapic_map_size; ++i) { for (i = 0; i < early_ioapic_map_size; ++i) {
ret = add_special_device(IVHD_SPECIAL_IOAPIC, ret = add_special_device(IVHD_SPECIAL_IOAPIC,
early_ioapic_map[i].id, early_ioapic_map[i].id,
early_ioapic_map[i].devid, &early_ioapic_map[i].devid,
early_ioapic_map[i].cmd_line); early_ioapic_map[i].cmd_line);
if (ret) if (ret)
return ret; return ret;
...@@ -763,7 +765,7 @@ static int __init add_early_maps(void) ...@@ -763,7 +765,7 @@ static int __init add_early_maps(void)
for (i = 0; i < early_hpet_map_size; ++i) { for (i = 0; i < early_hpet_map_size; ++i) {
ret = add_special_device(IVHD_SPECIAL_HPET, ret = add_special_device(IVHD_SPECIAL_HPET,
early_hpet_map[i].id, early_hpet_map[i].id,
early_hpet_map[i].devid, &early_hpet_map[i].devid,
early_hpet_map[i].cmd_line); early_hpet_map[i].cmd_line);
if (ret) if (ret)
return ret; return ret;
...@@ -978,10 +980,17 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, ...@@ -978,10 +980,17 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_SLOT(devid), PCI_SLOT(devid),
PCI_FUNC(devid)); PCI_FUNC(devid));
set_dev_entry_from_acpi(iommu, devid, e->flags, 0); ret = add_special_device(type, handle, &devid, false);
ret = add_special_device(type, handle, devid, false);
if (ret) if (ret)
return ret; return ret;
/*
* add_special_device might update the devid in case a
* command-line override is present. So call
* set_dev_entry_from_acpi after add_special_device.
*/
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
break; break;
} }
default: default:
......
...@@ -417,27 +417,6 @@ struct protection_domain { ...@@ -417,27 +417,6 @@ struct protection_domain {
}; };
/*
* This struct contains device specific data for the IOMMU
*/
struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct list_head dev_data_list; /* For global dev_data_list */
struct iommu_dev_data *alias_data;/* The alias dev_data */
struct protection_domain *domain; /* Domain the device is bound to */
atomic_t bind; /* Domain attach reference count */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */
struct {
bool enabled;
int qdep;
} ats; /* ATS state */
bool pri_tlp; /* PASID TLB required for
PPR completions */
u32 errata; /* Bitmap for errata to apply */
};
/* /*
* For dynamic growth the aperture size is split into ranges of 128MB of * For dynamic growth the aperture size is split into ranges of 128MB of
* DMA address space each. This struct represents one such range. * DMA address space each. This struct represents one such range.
......
This diff is collapsed.
...@@ -155,6 +155,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) ...@@ -155,6 +155,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
if (event == BUS_NOTIFY_ADD_DEVICE) { if (event == BUS_NOTIFY_ADD_DEVICE) {
for (tmp = dev; tmp; tmp = tmp->bus->self) { for (tmp = dev; tmp; tmp = tmp->bus->self) {
level--; level--;
info->path[level].bus = tmp->bus->number;
info->path[level].device = PCI_SLOT(tmp->devfn); info->path[level].device = PCI_SLOT(tmp->devfn);
info->path[level].function = PCI_FUNC(tmp->devfn); info->path[level].function = PCI_FUNC(tmp->devfn);
if (pci_is_root_bus(tmp->bus)) if (pci_is_root_bus(tmp->bus))
...@@ -177,17 +178,33 @@ static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus, ...@@ -177,17 +178,33 @@ static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
int i; int i;
if (info->bus != bus) if (info->bus != bus)
return false; goto fallback;
if (info->level != count) if (info->level != count)
return false; goto fallback;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (path[i].device != info->path[i].device || if (path[i].device != info->path[i].device ||
path[i].function != info->path[i].function) path[i].function != info->path[i].function)
return false; goto fallback;
} }
return true; return true;
fallback:
if (count != 1)
return false;
i = info->level - 1;
if (bus == info->path[i].bus &&
path[0].device == info->path[i].device &&
path[0].function == info->path[i].function) {
pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
bus, path[0].device, path[0].function);
return true;
}
return false;
} }
/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
...@@ -247,7 +264,7 @@ int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, ...@@ -247,7 +264,7 @@ int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
for_each_active_dev_scope(devices, count, index, tmp) for_each_active_dev_scope(devices, count, index, tmp)
if (tmp == &info->dev->dev) { if (tmp == &info->dev->dev) {
rcu_assign_pointer(devices[index].dev, NULL); RCU_INIT_POINTER(devices[index].dev, NULL);
synchronize_rcu(); synchronize_rcu();
put_device(tmp); put_device(tmp);
return 1; return 1;
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
typedef u32 sysmmu_iova_t; typedef u32 sysmmu_iova_t;
typedef u32 sysmmu_pte_t; typedef u32 sysmmu_pte_t;
/* We does not consider super section mapping (16MB) */ /* We do not consider super section mapping (16MB) */
#define SECT_ORDER 20 #define SECT_ORDER 20
#define LPAGE_ORDER 16 #define LPAGE_ORDER 16
#define SPAGE_ORDER 12 #define SPAGE_ORDER 12
...@@ -307,7 +307,7 @@ static void show_fault_information(const char *name, ...@@ -307,7 +307,7 @@ static void show_fault_information(const char *name,
static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
{ {
/* SYSMMU is in blocked when interrupt occurred. */ /* SYSMMU is in blocked state when interrupt occurred. */
struct sysmmu_drvdata *data = dev_id; struct sysmmu_drvdata *data = dev_id;
enum exynos_sysmmu_inttype itype; enum exynos_sysmmu_inttype itype;
sysmmu_iova_t addr = -1; sysmmu_iova_t addr = -1;
...@@ -567,8 +567,8 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova, ...@@ -567,8 +567,8 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
/* /*
* L2TLB invalidation required * L2TLB invalidation required
* 4KB page: 1 invalidation * 4KB page: 1 invalidation
* 64KB page: 16 invalidation * 64KB page: 16 invalidations
* 1MB page: 64 invalidation * 1MB page: 64 invalidations
* because it is set-associative TLB * because it is set-associative TLB
* with 8-way and 64 sets. * with 8-way and 64 sets.
* 1MB page can be cached in one of all sets. * 1MB page can be cached in one of all sets.
...@@ -714,7 +714,7 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain) ...@@ -714,7 +714,7 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
if (!priv->lv2entcnt) if (!priv->lv2entcnt)
goto err_counter; goto err_counter;
/* w/a of System MMU v3.3 to prevent caching 1MiB mapping */ /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
for (i = 0; i < NUM_LV1ENTRIES; i += 8) { for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
priv->pgtable[i + 0] = ZERO_LV2LINK; priv->pgtable[i + 0] = ZERO_LV2LINK;
priv->pgtable[i + 1] = ZERO_LV2LINK; priv->pgtable[i + 1] = ZERO_LV2LINK;
...@@ -861,14 +861,14 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv, ...@@ -861,14 +861,14 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
pgtable_flush(sent, sent + 1); pgtable_flush(sent, sent + 1);
/* /*
* If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
* may caches the address of zero_l2_table. This function * FLPD cache may cache the address of zero_l2_table. This
* replaces the zero_l2_table with new L2 page table to write * function replaces the zero_l2_table with new L2 page table
* valid mappings. * to write valid mappings.
* Accessing the valid area may cause page fault since FLPD * Accessing the valid area may cause page fault since FLPD
* cache may still caches zero_l2_table for the valid area * cache may still cache zero_l2_table for the valid area
* instead of new L2 page table that have the mapping * instead of new L2 page table that has the mapping
* information of the valid area * information of the valid area.
* Thus any replacement of zero_l2_table with other valid L2 * Thus any replacement of zero_l2_table with other valid L2
* page table must involve FLPD cache invalidation for System * page table must involve FLPD cache invalidation for System
* MMU v3.3. * MMU v3.3.
...@@ -963,27 +963,27 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, ...@@ -963,27 +963,27 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
/* /*
* *CAUTION* to the I/O virtual memory managers that support exynos-iommu: * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
* *
* System MMU v3.x have an advanced logic to improve address translation * System MMU v3.x has advanced logic to improve address translation
* performance with caching more page table entries by a page table walk. * performance with caching more page table entries by a page table walk.
* However, the logic has a bug that caching fault page table entries and System * However, the logic has a bug that while caching faulty page table entries,
* MMU reports page fault if the cached fault entry is hit even though the fault * System MMU reports page fault if the cached fault entry is hit even though
* entry is updated to a valid entry after the entry is cached. * the fault entry is updated to a valid entry after the entry is cached.
* To prevent caching fault page table entries which may be updated to valid * To prevent caching faulty page table entries which may be updated to valid
* entries later, the virtual memory manager should care about the w/a about the * entries later, the virtual memory manager should care about the workaround
* problem. The followings describe w/a. * for the problem. The following describes the workaround.
* *
* Any two consecutive I/O virtual address regions must have a hole of 128KiB * Any two consecutive I/O virtual address regions must have a hole of 128KiB
* in maximum to prevent misbehavior of System MMU 3.x. (w/a of h/w bug) * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
* *
* Precisely, any start address of I/O virtual region must be aligned by * Precisely, any start address of I/O virtual region must be aligned with
* the following sizes for System MMU v3.1 and v3.2. * the following sizes for System MMU v3.1 and v3.2.
* System MMU v3.1: 128KiB * System MMU v3.1: 128KiB
* System MMU v3.2: 256KiB * System MMU v3.2: 256KiB
* *
* Because System MMU v3.3 caches page table entries more aggressively, it needs * Because System MMU v3.3 caches page table entries more aggressively, it needs
* more w/a. * more workarounds.
* - Any two consecutive I/O virtual regions must be have a hole of larger size * - Any two consecutive I/O virtual regions must have a hole of size larger
* than or equal size to 128KiB. * than or equal to 128KiB.
* - Start address of an I/O virtual region must be aligned by 128KiB. * - Start address of an I/O virtual region must be aligned by 128KiB.
*/ */
static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
...@@ -1061,7 +1061,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, ...@@ -1061,7 +1061,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
goto err; goto err;
} }
*ent = ZERO_LV2LINK; /* w/a for h/w bug in Sysmem MMU v3.3 */ /* workaround for h/w bug in System MMU v3.3 */
*ent = ZERO_LV2LINK;
pgtable_flush(ent, ent + 1); pgtable_flush(ent, ent + 1);
size = SECT_SIZE; size = SECT_SIZE;
goto done; goto done;
......
...@@ -411,8 +411,7 @@ static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, ...@@ -411,8 +411,7 @@ static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
return get_phys_addr(dma_domain, iova); return get_phys_addr(dma_domain, iova);
} }
static int fsl_pamu_domain_has_cap(struct iommu_domain *domain, static bool fsl_pamu_capable(enum iommu_cap cap)
unsigned long cap)
{ {
return cap == IOMMU_CAP_CACHE_COHERENCY; return cap == IOMMU_CAP_CACHE_COHERENCY;
} }
...@@ -1080,6 +1079,7 @@ static u32 fsl_pamu_get_windows(struct iommu_domain *domain) ...@@ -1080,6 +1079,7 @@ static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
} }
static const struct iommu_ops fsl_pamu_ops = { static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_init = fsl_pamu_domain_init, .domain_init = fsl_pamu_domain_init,
.domain_destroy = fsl_pamu_domain_destroy, .domain_destroy = fsl_pamu_domain_destroy,
.attach_dev = fsl_pamu_attach_device, .attach_dev = fsl_pamu_attach_device,
...@@ -1089,7 +1089,6 @@ static const struct iommu_ops fsl_pamu_ops = { ...@@ -1089,7 +1089,6 @@ static const struct iommu_ops fsl_pamu_ops = {
.domain_get_windows = fsl_pamu_get_windows, .domain_get_windows = fsl_pamu_get_windows,
.domain_set_windows = fsl_pamu_set_windows, .domain_set_windows = fsl_pamu_set_windows,
.iova_to_phys = fsl_pamu_iova_to_phys, .iova_to_phys = fsl_pamu_iova_to_phys,
.domain_has_cap = fsl_pamu_domain_has_cap,
.domain_set_attr = fsl_pamu_set_domain_attr, .domain_set_attr = fsl_pamu_set_domain_attr,
.domain_get_attr = fsl_pamu_get_domain_attr, .domain_get_attr = fsl_pamu_get_domain_attr,
.add_device = fsl_pamu_add_device, .add_device = fsl_pamu_add_device,
......
...@@ -3865,8 +3865,7 @@ static int device_notifier(struct notifier_block *nb, ...@@ -3865,8 +3865,7 @@ static int device_notifier(struct notifier_block *nb,
if (iommu_dummy(dev)) if (iommu_dummy(dev))
return 0; return 0;
if (action != BUS_NOTIFY_UNBOUND_DRIVER && if (action != BUS_NOTIFY_REMOVED_DEVICE)
action != BUS_NOTIFY_DEL_DEVICE)
return 0; return 0;
/* /*
...@@ -4415,17 +4414,14 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -4415,17 +4414,14 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys; return phys;
} }
static int intel_iommu_domain_has_cap(struct iommu_domain *domain, static bool intel_iommu_capable(enum iommu_cap cap)
unsigned long cap)
{ {
struct dmar_domain *dmar_domain = domain->priv;
if (cap == IOMMU_CAP_CACHE_COHERENCY) if (cap == IOMMU_CAP_CACHE_COHERENCY)
return dmar_domain->iommu_snooping; return domain_update_iommu_snooping(NULL) == 1;
if (cap == IOMMU_CAP_INTR_REMAP) if (cap == IOMMU_CAP_INTR_REMAP)
return irq_remapping_enabled; return irq_remapping_enabled == 1;
return 0; return false;
} }
static int intel_iommu_add_device(struct device *dev) static int intel_iommu_add_device(struct device *dev)
...@@ -4464,6 +4460,7 @@ static void intel_iommu_remove_device(struct device *dev) ...@@ -4464,6 +4460,7 @@ static void intel_iommu_remove_device(struct device *dev)
} }
static const struct iommu_ops intel_iommu_ops = { static const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_init = intel_iommu_domain_init, .domain_init = intel_iommu_domain_init,
.domain_destroy = intel_iommu_domain_destroy, .domain_destroy = intel_iommu_domain_destroy,
.attach_dev = intel_iommu_attach_device, .attach_dev = intel_iommu_attach_device,
...@@ -4471,7 +4468,6 @@ static const struct iommu_ops intel_iommu_ops = { ...@@ -4471,7 +4468,6 @@ static const struct iommu_ops intel_iommu_ops = {
.map = intel_iommu_map, .map = intel_iommu_map,
.unmap = intel_iommu_unmap, .unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys, .iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
.add_device = intel_iommu_add_device, .add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device, .remove_device = intel_iommu_remove_device,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES, .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
......
...@@ -438,8 +438,7 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) ...@@ -438,8 +438,7 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
/* Set interrupt-remapping table pointer */ /* Set interrupt-remapping table pointer */
iommu->gcmd |= DMA_GCMD_SIRTP; writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts); readl, (sts & DMA_GSTS_IRTPS), sts);
...@@ -1139,7 +1138,7 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, ...@@ -1139,7 +1138,7 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
return ret; return ret;
} }
static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
{ {
int ret = -1; int ret = -1;
struct intel_iommu *iommu; struct intel_iommu *iommu;
...@@ -1170,5 +1169,5 @@ struct irq_remap_ops intel_irq_remap_ops = { ...@@ -1170,5 +1169,5 @@ struct irq_remap_ops intel_irq_remap_ops = {
.compose_msi_msg = intel_compose_msi_msg, .compose_msi_msg = intel_compose_msi_msg,
.msi_alloc_irq = intel_msi_alloc_irq, .msi_alloc_irq = intel_msi_alloc_irq,
.msi_setup_irq = intel_msi_setup_irq, .msi_setup_irq = intel_msi_setup_irq,
.setup_hpet_msi = intel_setup_hpet_msi, .alloc_hpet_msi = intel_alloc_hpet_msi,
}; };
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/bitops.h>
#include <trace/events/iommu.h> #include <trace/events/iommu.h>
static struct kset *iommu_group_kset; static struct kset *iommu_group_kset;
...@@ -519,6 +520,9 @@ int iommu_group_id(struct iommu_group *group) ...@@ -519,6 +520,9 @@ int iommu_group_id(struct iommu_group *group)
} }
EXPORT_SYMBOL_GPL(iommu_group_id); EXPORT_SYMBOL_GPL(iommu_group_id);
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
unsigned long *devfns);
/* /*
* To consider a PCI device isolated, we require ACS to support Source * To consider a PCI device isolated, we require ACS to support Source
* Validation, Request Redirection, Completer Redirection, and Upstream * Validation, Request Redirection, Completer Redirection, and Upstream
...@@ -529,6 +533,86 @@ EXPORT_SYMBOL_GPL(iommu_group_id); ...@@ -529,6 +533,86 @@ EXPORT_SYMBOL_GPL(iommu_group_id);
*/ */
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
/*
* For multifunction devices which are not isolated from each other, find
* all the other non-isolated functions and look for existing groups. For
* each function, we also need to look for aliases to or from other devices
* that may already have a group.
*/
static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
unsigned long *devfns)
{
struct pci_dev *tmp = NULL;
struct iommu_group *group;
if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
return NULL;
for_each_pci_dev(tmp) {
if (tmp == pdev || tmp->bus != pdev->bus ||
PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
pci_acs_enabled(tmp, REQ_ACS_FLAGS))
continue;
group = get_pci_alias_group(tmp, devfns);
if (group) {
pci_dev_put(tmp);
return group;
}
}
return NULL;
}
/*
* Look for aliases to or from the given device for exisiting groups. The
* dma_alias_devfn only supports aliases on the same bus, therefore the search
* space is quite small (especially since we're really only looking at pcie
* device, and therefore only expect multiple slots on the root complex or
* downstream switch ports). It's conceivable though that a pair of
* multifunction devices could have aliases between them that would cause a
* loop. To prevent this, we use a bitmap to track where we've been.
*/
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
unsigned long *devfns)
{
struct pci_dev *tmp = NULL;
struct iommu_group *group;
if (test_and_set_bit(pdev->devfn & 0xff, devfns))
return NULL;
group = iommu_group_get(&pdev->dev);
if (group)
return group;
for_each_pci_dev(tmp) {
if (tmp == pdev || tmp->bus != pdev->bus)
continue;
/* We alias them or they alias us */
if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
pdev->dma_alias_devfn == tmp->devfn) ||
((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
tmp->dma_alias_devfn == pdev->devfn)) {
group = get_pci_alias_group(tmp, devfns);
if (group) {
pci_dev_put(tmp);
return group;
}
group = get_pci_function_alias_group(tmp, devfns);
if (group) {
pci_dev_put(tmp);
return group;
}
}
}
return NULL;
}
struct group_for_pci_data { struct group_for_pci_data {
struct pci_dev *pdev; struct pci_dev *pdev;
struct iommu_group *group; struct iommu_group *group;
...@@ -557,7 +641,7 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) ...@@ -557,7 +641,7 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
struct group_for_pci_data data; struct group_for_pci_data data;
struct pci_bus *bus; struct pci_bus *bus;
struct iommu_group *group = NULL; struct iommu_group *group = NULL;
struct pci_dev *tmp; u64 devfns[4] = { 0 };
/* /*
* Find the upstream DMA alias for the device. A device must not * Find the upstream DMA alias for the device. A device must not
...@@ -591,76 +675,21 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) ...@@ -591,76 +675,21 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
} }
/* /*
* Next we need to consider DMA alias quirks. If one device aliases * Look for existing groups on device aliases. If we alias another
* to another, they should be grouped together. It's theoretically * device or another device aliases us, use the same group.
* possible that aliases could create chains of devices where each
* device aliases another device. If we then factor in multifunction
* ACS grouping requirements, each alias could incorporate a new slot
* with multiple functions, each with aliases. This is all extremely
* unlikely as DMA alias quirks are typically only used for PCIe
* devices where we usually have a single slot per bus. Furthermore,
* the alias quirk is usually to another function within the slot
* (and ACS multifunction is not supported) or to a different slot
* that doesn't physically exist. The likely scenario is therefore
* that everything on the bus gets grouped together. To reduce the
* problem space, share the IOMMU group for all devices on the bus
* if a DMA alias quirk is present on the bus.
*/
tmp = NULL;
for_each_pci_dev(tmp) {
if (tmp->bus != pdev->bus ||
!(tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN))
continue;
pci_dev_put(tmp);
tmp = NULL;
/* We have an alias quirk, search for an existing group */
for_each_pci_dev(tmp) {
struct iommu_group *group_tmp;
if (tmp->bus != pdev->bus)
continue;
group_tmp = iommu_group_get(&tmp->dev);
if (!group) {
group = group_tmp;
continue;
}
if (group_tmp) {
WARN_ON(group != group_tmp);
iommu_group_put(group_tmp);
}
}
return group ? group : iommu_group_alloc();
}
/*
* Non-multifunction devices or multifunction devices supporting
* ACS get their own group.
*/ */
if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) group = get_pci_alias_group(pdev, (unsigned long *)devfns);
return iommu_group_alloc(); if (group)
return group;
/* /*
* Multifunction devices not supporting ACS share a group with other * Look for existing groups on non-isolated functions on the same
* similar devices in the same slot. * slot and aliases of those funcions, if any. No need to clear
* the search bitmap, the tested devfns are still valid.
*/ */
tmp = NULL; group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
for_each_pci_dev(tmp) { if (group)
if (tmp == pdev || tmp->bus != pdev->bus ||
PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
pci_acs_enabled(tmp, REQ_ACS_FLAGS))
continue;
group = iommu_group_get(&tmp->dev);
if (group) {
pci_dev_put(tmp);
return group; return group;
}
}
/* No shared group found, allocate new */ /* No shared group found, allocate new */
return iommu_group_alloc(); return iommu_group_alloc();
...@@ -770,18 +799,26 @@ static int iommu_bus_notifier(struct notifier_block *nb, ...@@ -770,18 +799,26 @@ static int iommu_bus_notifier(struct notifier_block *nb,
return 0; return 0;
} }
static struct notifier_block iommu_bus_nb = { static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
.notifier_call = iommu_bus_notifier,
};
static void iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
{ {
int err;
struct notifier_block *nb;
struct iommu_callback_data cb = { struct iommu_callback_data cb = {
.ops = ops, .ops = ops,
}; };
bus_register_notifier(bus, &iommu_bus_nb); nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
bus_for_each_dev(bus, NULL, &cb, add_iommu_group); if (!nb)
return -ENOMEM;
nb->notifier_call = iommu_bus_notifier;
err = bus_register_notifier(bus, nb);
if (err) {
kfree(nb);
return err;
}
return bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
} }
/** /**
...@@ -805,9 +842,7 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) ...@@ -805,9 +842,7 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
bus->iommu_ops = ops; bus->iommu_ops = ops;
/* Do IOMMU specific setup for this bus-type */ /* Do IOMMU specific setup for this bus-type */
iommu_bus_init(bus, ops); return iommu_bus_init(bus, ops);
return 0;
} }
EXPORT_SYMBOL_GPL(bus_set_iommu); EXPORT_SYMBOL_GPL(bus_set_iommu);
...@@ -817,6 +852,15 @@ bool iommu_present(struct bus_type *bus) ...@@ -817,6 +852,15 @@ bool iommu_present(struct bus_type *bus)
} }
EXPORT_SYMBOL_GPL(iommu_present); EXPORT_SYMBOL_GPL(iommu_present);
bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
if (!bus->iommu_ops || !bus->iommu_ops->capable)
return false;
return bus->iommu_ops->capable(cap);
}
EXPORT_SYMBOL_GPL(iommu_capable);
/** /**
* iommu_set_fault_handler() - set a fault handler for an iommu domain * iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain * @domain: iommu domain
...@@ -947,16 +991,6 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) ...@@ -947,16 +991,6 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
} }
EXPORT_SYMBOL_GPL(iommu_iova_to_phys); EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
if (unlikely(domain->ops->domain_has_cap == NULL))
return 0;
return domain->ops->domain_has_cap(domain, cap);
}
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
static size_t iommu_pgsize(struct iommu_domain *domain, static size_t iommu_pgsize(struct iommu_domain *domain,
unsigned long addr_merge, size_t size) unsigned long addr_merge, size_t size)
{ {
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/x86_init.h> #include <asm/x86_init.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/hpet.h>
#include "irq_remapping.h" #include "irq_remapping.h"
...@@ -345,10 +346,16 @@ static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, ...@@ -345,10 +346,16 @@ static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
{ {
if (!remap_ops || !remap_ops->setup_hpet_msi) int ret;
if (!remap_ops || !remap_ops->alloc_hpet_msi)
return -ENODEV; return -ENODEV;
return remap_ops->setup_hpet_msi(irq, id); ret = remap_ops->alloc_hpet_msi(irq, id);
if (ret)
return -EINVAL;
return default_setup_hpet_msi(irq, id);
} }
void panic_if_irq_remap(const char *msg) void panic_if_irq_remap(const char *msg)
......
...@@ -80,7 +80,7 @@ struct irq_remap_ops { ...@@ -80,7 +80,7 @@ struct irq_remap_ops {
int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int); int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int);
/* Setup interrupt remapping for an HPET MSI */ /* Setup interrupt remapping for an HPET MSI */
int (*setup_hpet_msi)(unsigned int, unsigned int); int (*alloc_hpet_msi)(unsigned int, unsigned int);
}; };
extern struct irq_remap_ops intel_irq_remap_ops; extern struct irq_remap_ops intel_irq_remap_ops;
......
...@@ -603,10 +603,9 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -603,10 +603,9 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
return ret; return ret;
} }
static int msm_iommu_domain_has_cap(struct iommu_domain *domain, static bool msm_iommu_capable(enum iommu_cap cap)
unsigned long cap)
{ {
return 0; return false;
} }
static void print_ctx_regs(void __iomem *base, int ctx) static void print_ctx_regs(void __iomem *base, int ctx)
...@@ -675,6 +674,7 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) ...@@ -675,6 +674,7 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
} }
static const struct iommu_ops msm_iommu_ops = { static const struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
.domain_init = msm_iommu_domain_init, .domain_init = msm_iommu_domain_init,
.domain_destroy = msm_iommu_domain_destroy, .domain_destroy = msm_iommu_domain_destroy,
.attach_dev = msm_iommu_attach_dev, .attach_dev = msm_iommu_attach_dev,
...@@ -682,7 +682,6 @@ static const struct iommu_ops msm_iommu_ops = { ...@@ -682,7 +682,6 @@ static const struct iommu_ops msm_iommu_ops = {
.map = msm_iommu_map, .map = msm_iommu_map,
.unmap = msm_iommu_unmap, .unmap = msm_iommu_unmap,
.iova_to_phys = msm_iommu_iova_to_phys, .iova_to_phys = msm_iommu_iova_to_phys,
.domain_has_cap = msm_iommu_domain_has_cap,
.pgsize_bitmap = MSM_IOMMU_PGSIZES, .pgsize_bitmap = MSM_IOMMU_PGSIZES,
}; };
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_iommu.h> #include <linux/of_iommu.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -892,19 +893,11 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) ...@@ -892,19 +893,11 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
goto err_enable; goto err_enable;
flush_iotlb_all(obj); flush_iotlb_all(obj);
if (!try_module_get(obj->owner)) {
err = -ENODEV;
goto err_module;
}
spin_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
return obj; return obj;
err_module:
if (obj->refcount == 1)
iommu_disable(obj);
err_enable: err_enable:
obj->refcount--; obj->refcount--;
spin_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
...@@ -925,8 +918,6 @@ static void omap_iommu_detach(struct omap_iommu *obj) ...@@ -925,8 +918,6 @@ static void omap_iommu_detach(struct omap_iommu *obj)
if (--obj->refcount == 0) if (--obj->refcount == 0)
iommu_disable(obj); iommu_disable(obj);
module_put(obj->owner);
obj->iopgd = NULL; obj->iopgd = NULL;
spin_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
...@@ -1006,7 +997,7 @@ static int omap_iommu_remove(struct platform_device *pdev) ...@@ -1006,7 +997,7 @@ static int omap_iommu_remove(struct platform_device *pdev)
return 0; return 0;
} }
static struct of_device_id omap_iommu_of_match[] = { static const struct of_device_id omap_iommu_of_match[] = {
{ .compatible = "ti,omap2-iommu" }, { .compatible = "ti,omap2-iommu" },
{ .compatible = "ti,omap4-iommu" }, { .compatible = "ti,omap4-iommu" },
{ .compatible = "ti,dra7-iommu" }, { .compatible = "ti,dra7-iommu" },
...@@ -1091,6 +1082,11 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1091,6 +1082,11 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
int ret = 0; int ret = 0;
if (!arch_data || !arch_data->name) {
dev_err(dev, "device doesn't have an associated iommu\n");
return -EINVAL;
}
spin_lock(&omap_domain->lock); spin_lock(&omap_domain->lock);
/* only a single device is supported per domain for now */ /* only a single device is supported per domain for now */
...@@ -1239,6 +1235,7 @@ static int omap_iommu_add_device(struct device *dev) ...@@ -1239,6 +1235,7 @@ static int omap_iommu_add_device(struct device *dev)
{ {
struct omap_iommu_arch_data *arch_data; struct omap_iommu_arch_data *arch_data;
struct device_node *np; struct device_node *np;
struct platform_device *pdev;
/* /*
* Allocate the archdata iommu structure for DT-based devices. * Allocate the archdata iommu structure for DT-based devices.
...@@ -1253,13 +1250,19 @@ static int omap_iommu_add_device(struct device *dev) ...@@ -1253,13 +1250,19 @@ static int omap_iommu_add_device(struct device *dev)
if (!np) if (!np)
return 0; return 0;
pdev = of_find_device_by_node(np);
if (WARN_ON(!pdev)) {
of_node_put(np);
return -EINVAL;
}
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
if (!arch_data) { if (!arch_data) {
of_node_put(np); of_node_put(np);
return -ENOMEM; return -ENOMEM;
} }
arch_data->name = kstrdup(dev_name(dev), GFP_KERNEL); arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL);
dev->archdata.iommu = arch_data; dev->archdata.iommu = arch_data;
of_node_put(np); of_node_put(np);
......
...@@ -28,7 +28,6 @@ struct iotlb_entry { ...@@ -28,7 +28,6 @@ struct iotlb_entry {
struct omap_iommu { struct omap_iommu {
const char *name; const char *name;
struct module *owner;
void __iomem *regbase; void __iomem *regbase;
struct device *dev; struct device *dev;
void *isr_priv; void *isr_priv;
......
...@@ -303,13 +303,13 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -303,13 +303,13 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
return pa; return pa;
} }
static int gart_iommu_domain_has_cap(struct iommu_domain *domain, static bool gart_iommu_capable(enum iommu_cap cap)
unsigned long cap)
{ {
return 0; return false;
} }
static const struct iommu_ops gart_iommu_ops = { static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable,
.domain_init = gart_iommu_domain_init, .domain_init = gart_iommu_domain_init,
.domain_destroy = gart_iommu_domain_destroy, .domain_destroy = gart_iommu_domain_destroy,
.attach_dev = gart_iommu_attach_dev, .attach_dev = gart_iommu_attach_dev,
...@@ -317,7 +317,6 @@ static const struct iommu_ops gart_iommu_ops = { ...@@ -317,7 +317,6 @@ static const struct iommu_ops gart_iommu_ops = {
.map = gart_iommu_map, .map = gart_iommu_map,
.unmap = gart_iommu_unmap, .unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys, .iova_to_phys = gart_iommu_iova_to_phys,
.domain_has_cap = gart_iommu_domain_has_cap,
.pgsize_bitmap = GART_IOMMU_PGSIZES, .pgsize_bitmap = GART_IOMMU_PGSIZES,
}; };
...@@ -416,7 +415,7 @@ static const struct dev_pm_ops tegra_gart_pm_ops = { ...@@ -416,7 +415,7 @@ static const struct dev_pm_ops tegra_gart_pm_ops = {
.resume = tegra_gart_resume, .resume = tegra_gart_resume,
}; };
static struct of_device_id tegra_gart_of_match[] = { static const struct of_device_id tegra_gart_of_match[] = {
{ .compatible = "nvidia,tegra20-gart", }, { .compatible = "nvidia,tegra20-gart", },
{ }, { },
}; };
......
...@@ -780,10 +780,9 @@ static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -780,10 +780,9 @@ static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
return PFN_PHYS(pfn); return PFN_PHYS(pfn);
} }
static int smmu_iommu_domain_has_cap(struct iommu_domain *domain, static bool smmu_iommu_capable(enum iommu_cap cap)
unsigned long cap)
{ {
return 0; return false;
} }
static int smmu_iommu_attach_dev(struct iommu_domain *domain, static int smmu_iommu_attach_dev(struct iommu_domain *domain,
...@@ -949,6 +948,7 @@ static void smmu_iommu_domain_destroy(struct iommu_domain *domain) ...@@ -949,6 +948,7 @@ static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
} }
static const struct iommu_ops smmu_iommu_ops = { static const struct iommu_ops smmu_iommu_ops = {
.capable = smmu_iommu_capable,
.domain_init = smmu_iommu_domain_init, .domain_init = smmu_iommu_domain_init,
.domain_destroy = smmu_iommu_domain_destroy, .domain_destroy = smmu_iommu_domain_destroy,
.attach_dev = smmu_iommu_attach_dev, .attach_dev = smmu_iommu_attach_dev,
...@@ -956,7 +956,6 @@ static const struct iommu_ops smmu_iommu_ops = { ...@@ -956,7 +956,6 @@ static const struct iommu_ops smmu_iommu_ops = {
.map = smmu_iommu_map, .map = smmu_iommu_map,
.unmap = smmu_iommu_unmap, .unmap = smmu_iommu_unmap,
.iova_to_phys = smmu_iommu_iova_to_phys, .iova_to_phys = smmu_iommu_iova_to_phys,
.domain_has_cap = smmu_iommu_domain_has_cap,
.pgsize_bitmap = SMMU_IOMMU_PGSIZES, .pgsize_bitmap = SMMU_IOMMU_PGSIZES,
}; };
...@@ -1260,7 +1259,7 @@ static const struct dev_pm_ops tegra_smmu_pm_ops = { ...@@ -1260,7 +1259,7 @@ static const struct dev_pm_ops tegra_smmu_pm_ops = {
.resume = tegra_smmu_resume, .resume = tegra_smmu_resume,
}; };
static struct of_device_id tegra_smmu_of_match[] = { static const struct of_device_id tegra_smmu_of_match[] = {
{ .compatible = "nvidia,tegra30-smmu", }, { .compatible = "nvidia,tegra30-smmu", },
{ }, { },
}; };
......
...@@ -723,14 +723,14 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, ...@@ -723,14 +723,14 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
list_add(&group->next, &domain->group_list); list_add(&group->next, &domain->group_list);
if (!allow_unsafe_interrupts && if (!allow_unsafe_interrupts &&
!iommu_domain_has_cap(domain->domain, IOMMU_CAP_INTR_REMAP)) { !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
__func__); __func__);
ret = -EPERM; ret = -EPERM;
goto out_detach; goto out_detach;
} }
if (iommu_domain_has_cap(domain->domain, IOMMU_CAP_CACHE_COHERENCY)) if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
domain->prot |= IOMMU_CACHE; domain->prot |= IOMMU_CACHE;
/* /*
......
...@@ -181,13 +181,14 @@ extern int bus_unregister_notifier(struct bus_type *bus, ...@@ -181,13 +181,14 @@ extern int bus_unregister_notifier(struct bus_type *bus,
* with the device lock held in the core, so be careful. * with the device lock held in the core, so be careful.
*/ */
#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
bound */ bound */
#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */ #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
unbound */ unbound */
#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
from the device */ from the device */
extern struct kset *bus_get_kset(struct bus_type *bus); extern struct kset *bus_get_kset(struct bus_type *bus);
......
...@@ -56,13 +56,19 @@ struct dmar_drhd_unit { ...@@ -56,13 +56,19 @@ struct dmar_drhd_unit {
struct intel_iommu *iommu; struct intel_iommu *iommu;
}; };
struct dmar_pci_path {
u8 bus;
u8 device;
u8 function;
};
struct dmar_pci_notify_info { struct dmar_pci_notify_info {
struct pci_dev *dev; struct pci_dev *dev;
unsigned long event; unsigned long event;
int bus; int bus;
u16 seg; u16 seg;
u16 level; u16 level;
struct acpi_dmar_pci_path path[]; struct dmar_pci_path path[];
} __attribute__((packed)); } __attribute__((packed));
extern struct rw_semaphore dmar_global_lock; extern struct rw_semaphore dmar_global_lock;
......
...@@ -57,8 +57,11 @@ struct iommu_domain { ...@@ -57,8 +57,11 @@ struct iommu_domain {
struct iommu_domain_geometry geometry; struct iommu_domain_geometry geometry;
}; };
#define IOMMU_CAP_CACHE_COHERENCY 0x1 enum iommu_cap {
#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
};
/* /*
* Following constraints are specifc to FSL_PAMUV1: * Following constraints are specifc to FSL_PAMUV1:
...@@ -95,7 +98,6 @@ enum iommu_attr { ...@@ -95,7 +98,6 @@ enum iommu_attr {
* @map: map a physically contiguous memory region to an iommu domain * @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain
* @iova_to_phys: translate iova to physical address * @iova_to_phys: translate iova to physical address
* @domain_has_cap: domain capabilities query
* @add_device: add device to iommu grouping * @add_device: add device to iommu grouping
* @remove_device: remove device from iommu grouping * @remove_device: remove device from iommu grouping
* @domain_get_attr: Query domain attributes * @domain_get_attr: Query domain attributes
...@@ -103,6 +105,7 @@ enum iommu_attr { ...@@ -103,6 +105,7 @@ enum iommu_attr {
* @pgsize_bitmap: bitmap of supported page sizes * @pgsize_bitmap: bitmap of supported page sizes
*/ */
struct iommu_ops { struct iommu_ops {
bool (*capable)(enum iommu_cap);
int (*domain_init)(struct iommu_domain *domain); int (*domain_init)(struct iommu_domain *domain);
void (*domain_destroy)(struct iommu_domain *domain); void (*domain_destroy)(struct iommu_domain *domain);
int (*attach_dev)(struct iommu_domain *domain, struct device *dev); int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
...@@ -112,8 +115,6 @@ struct iommu_ops { ...@@ -112,8 +115,6 @@ struct iommu_ops {
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size); size_t size);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*domain_has_cap)(struct iommu_domain *domain,
unsigned long cap);
int (*add_device)(struct device *dev); int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev); void (*remove_device)(struct device *dev);
int (*device_group)(struct device *dev, unsigned int *groupid); int (*device_group)(struct device *dev, unsigned int *groupid);
...@@ -143,6 +144,7 @@ struct iommu_ops { ...@@ -143,6 +144,7 @@ struct iommu_ops {
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
extern bool iommu_present(struct bus_type *bus); extern bool iommu_present(struct bus_type *bus);
extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
extern struct iommu_group *iommu_group_get_by_id(int id); extern struct iommu_group *iommu_group_get_by_id(int id);
extern void iommu_domain_free(struct iommu_domain *domain); extern void iommu_domain_free(struct iommu_domain *domain);
...@@ -155,8 +157,6 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -155,8 +157,6 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size); size_t size);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token); iommu_fault_handler_t handler, void *token);
...@@ -251,6 +251,11 @@ static inline bool iommu_present(struct bus_type *bus) ...@@ -251,6 +251,11 @@ static inline bool iommu_present(struct bus_type *bus)
return false; return false;
} }
static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
return false;
}
static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{ {
return NULL; return NULL;
...@@ -305,12 +310,6 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad ...@@ -305,12 +310,6 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad
return 0; return 0;
} }
static inline int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
return 0;
}
static inline void iommu_set_fault_handler(struct iommu_domain *domain, static inline void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token) iommu_fault_handler_t handler, void *token)
{ {
......
...@@ -191,8 +191,7 @@ int kvm_assign_device(struct kvm *kvm, ...@@ -191,8 +191,7 @@ int kvm_assign_device(struct kvm *kvm,
return r; return r;
} }
noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain, noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
IOMMU_CAP_CACHE_COHERENCY);
/* Check if need to update IOMMU page table for guest memory */ /* Check if need to update IOMMU page table for guest memory */
if (noncoherent != kvm->arch.iommu_noncoherent) { if (noncoherent != kvm->arch.iommu_noncoherent) {
...@@ -254,8 +253,7 @@ int kvm_iommu_map_guest(struct kvm *kvm) ...@@ -254,8 +253,7 @@ int kvm_iommu_map_guest(struct kvm *kvm)
} }
if (!allow_unsafe_assigned_interrupts && if (!allow_unsafe_assigned_interrupts &&
!iommu_domain_has_cap(kvm->arch.iommu_domain, !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
IOMMU_CAP_INTR_REMAP)) {
printk(KERN_WARNING "%s: No interrupt remapping support," printk(KERN_WARNING "%s: No interrupt remapping support,"
" disallowing device assignment." " disallowing device assignment."
" Re-enble with \"allow_unsafe_assigned_interrupts=1\"" " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment