Commit 8c1d70b2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v4.14-rc1' of git://github.com/awilliam/linux-vfio

Pull VFIO updates from Alex Williamson:

 - Base MSI remapping on either IOMMU domain or IRQ domain support
   (Robin Murphy)

 - Prioritize hardware MSI regions over software defined regions (Robin
   Murphy)

 - Fix no-iommu reference counting (Eric Auger)

 - Stall removing last device from group for container cleanup (Alex
   Williamson)

 - Constify amba_id (Arvind Yadav)

* tag 'vfio-v4.14-rc1' of git://github.com/awilliam/linux-vfio:
  vfio: platform: constify amba_id
  vfio: Stall vfio_del_group_dev() for container group detach
  vfio: fix noiommu vfio_iommu_group_get reference count
  vfio/type1: Give hardware MSI regions precedence
  vfio/type1: Cope with hardware MSI reserved regions
parents d2d8f51e 417fb50d
...@@ -93,7 +93,7 @@ static int vfio_amba_remove(struct amba_device *adev) ...@@ -93,7 +93,7 @@ static int vfio_amba_remove(struct amba_device *adev)
return -EINVAL; return -EINVAL;
} }
static struct amba_id pl330_ids[] = { static const struct amba_id pl330_ids[] = {
{ 0, 0 }, { 0, 0 },
}; };
......
...@@ -85,6 +85,7 @@ struct vfio_group { ...@@ -85,6 +85,7 @@ struct vfio_group {
struct list_head unbound_list; struct list_head unbound_list;
struct mutex unbound_lock; struct mutex unbound_lock;
atomic_t opened; atomic_t opened;
wait_queue_head_t container_q;
bool noiommu; bool noiommu;
struct kvm *kvm; struct kvm *kvm;
struct blocking_notifier_head notifier; struct blocking_notifier_head notifier;
...@@ -138,9 +139,10 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev) ...@@ -138,9 +139,10 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
iommu_group_set_name(group, "vfio-noiommu"); iommu_group_set_name(group, "vfio-noiommu");
iommu_group_set_iommudata(group, &noiommu, NULL); iommu_group_set_iommudata(group, &noiommu, NULL);
ret = iommu_group_add_device(group, dev); ret = iommu_group_add_device(group, dev);
iommu_group_put(group); if (ret) {
if (ret) iommu_group_put(group);
return NULL; return NULL;
}
/* /*
* Where to taint? At this point we've added an IOMMU group for a * Where to taint? At this point we've added an IOMMU group for a
...@@ -337,6 +339,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) ...@@ -337,6 +339,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
mutex_init(&group->unbound_lock); mutex_init(&group->unbound_lock);
atomic_set(&group->container_users, 0); atomic_set(&group->container_users, 0);
atomic_set(&group->opened, 0); atomic_set(&group->opened, 0);
init_waitqueue_head(&group->container_q);
group->iommu_group = iommu_group; group->iommu_group = iommu_group;
#ifdef CONFIG_VFIO_NOIOMMU #ifdef CONFIG_VFIO_NOIOMMU
group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu); group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
...@@ -993,6 +996,23 @@ void *vfio_del_group_dev(struct device *dev) ...@@ -993,6 +996,23 @@ void *vfio_del_group_dev(struct device *dev)
} }
} while (ret <= 0); } while (ret <= 0);
/*
* In order to support multiple devices per group, devices can be
* plucked from the group while other devices in the group are still
* in use. The container persists with this group and those remaining
* devices still attached. If the user creates an isolation violation
* by binding this device to another driver while the group is still in
* use, that's their fault. However, in the case of removing the last,
* or potentially the only, device in the group there can be no other
* in-use devices in the group. The user has done their due diligence
* and we should lay no claims to those devices. In order to do that,
* we need to make sure the group is detached from the container.
* Without this stall, we're potentially racing with a user process
* that may attempt to immediately bind this device to another driver.
*/
if (list_empty(&group->device_list))
wait_event(group->container_q, !group->container);
vfio_group_put(group); vfio_group_put(group);
return device_data; return device_data;
...@@ -1298,6 +1318,7 @@ static void __vfio_group_unset_container(struct vfio_group *group) ...@@ -1298,6 +1318,7 @@ static void __vfio_group_unset_container(struct vfio_group *group)
group->iommu_group); group->iommu_group);
group->container = NULL; group->container = NULL;
wake_up(&group->container_q);
list_del(&group->container_next); list_del(&group->container_next);
/* Detaching the last group deprivileges a container, remove iommu */ /* Detaching the last group deprivileges a container, remove iommu */
......
...@@ -1169,13 +1169,21 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) ...@@ -1169,13 +1169,21 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
INIT_LIST_HEAD(&group_resv_regions); INIT_LIST_HEAD(&group_resv_regions);
iommu_get_group_resv_regions(group, &group_resv_regions); iommu_get_group_resv_regions(group, &group_resv_regions);
list_for_each_entry(region, &group_resv_regions, list) { list_for_each_entry(region, &group_resv_regions, list) {
/*
* The presence of any 'real' MSI regions should take
* precedence over the software-managed one if the
* IOMMU driver happens to advertise both types.
*/
if (region->type == IOMMU_RESV_MSI) {
ret = false;
break;
}
if (region->type == IOMMU_RESV_SW_MSI) { if (region->type == IOMMU_RESV_SW_MSI) {
*base = region->start; *base = region->start;
ret = true; ret = true;
goto out;
} }
} }
out:
list_for_each_entry_safe(region, next, &group_resv_regions, list) list_for_each_entry_safe(region, next, &group_resv_regions, list)
kfree(region); kfree(region);
return ret; return ret;
...@@ -1265,8 +1273,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, ...@@ -1265,8 +1273,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
INIT_LIST_HEAD(&domain->group_list); INIT_LIST_HEAD(&domain->group_list);
list_add(&group->next, &domain->group_list); list_add(&group->next, &domain->group_list);
msi_remap = resv_msi ? irq_domain_check_msi_remap() : msi_remap = irq_domain_check_msi_remap() ||
iommu_capable(bus, IOMMU_CAP_INTR_REMAP); iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
if (!allow_unsafe_interrupts && !msi_remap) { if (!allow_unsafe_interrupts && !msi_remap) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment