Commit 8c6f5e73 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v4.13-rc1' of git://github.com/awilliam/linux-vfio

Pull VFIO updates from Alex Williamson:

 - Include Intel XXV710 in INTx workaround (Alex Williamson)

 - Make use of ERR_CAST() for error return (Dan Carpenter)

 - Fix vfio_group release deadlock from iommu notifier (Alex Williamson)

 - Unset KVM-VFIO attributes only on group match (Alex Williamson)

 - Fix release path group/file matching with KVM-VFIO (Alex Williamson)

 - Remove unnecessary lock uses triggering lockdep splat (Alex Williamson)

* tag 'vfio-v4.13-rc1' of git://github.com/awilliam/linux-vfio:
  vfio: Remove unnecessary uses of vfio_container.group_lock
  vfio: New external user group/file match
  kvm-vfio: Decouple only when we match a group
  vfio: Fix group release deadlock
  vfio: Use ERR_CAST() instead of open coding it
  vfio/pci: Add Intel XXV710 to hidden INTx devices
parents 3a00be19 7f56c30b
......@@ -195,11 +195,11 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL:
switch (pdev->device) {
/* All i40e (XL710/X710) 10/20/40GbE NICs */
/* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
case 0x1572:
case 0x1574:
case 0x1580 ... 0x1581:
case 0x1583 ... 0x1589:
case 0x1583 ... 0x158b:
case 0x37d0 ... 0x37d2:
return true;
default:
......
......@@ -382,7 +382,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
if (IS_ERR(dev)) {
vfio_free_group_minor(minor);
vfio_group_unlock_and_free(group);
return (struct vfio_group *)dev; /* ERR_PTR */
return ERR_CAST(dev);
}
group->minor = minor;
......@@ -423,6 +423,34 @@ static void vfio_group_put(struct vfio_group *group)
kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
}
struct vfio_group_put_work {
struct work_struct work;
struct vfio_group *group;
};
static void vfio_group_put_bg(struct work_struct *work)
{
struct vfio_group_put_work *do_work;
do_work = container_of(work, struct vfio_group_put_work, work);
vfio_group_put(do_work->group);
kfree(do_work);
}
static void vfio_group_schedule_put(struct vfio_group *group)
{
struct vfio_group_put_work *do_work;
do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
if (WARN_ON(!do_work))
return;
INIT_WORK(&do_work->work, vfio_group_put_bg);
do_work->group = group;
schedule_work(&do_work->work);
}
/* Assume group_lock or group reference is held */
static void vfio_group_get(struct vfio_group *group)
{
......@@ -762,7 +790,14 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
break;
}
vfio_group_put(group);
/*
* If we're the last reference to the group, the group will be
* released, which includes unregistering the iommu group notifier.
* We hold a read-lock on that notifier list, unregistering needs
* a write-lock... deadlock. Release our reference asynchronously
* to avoid that situation.
*/
vfio_group_schedule_put(group);
return NOTIFY_OK;
}
......@@ -1140,15 +1175,11 @@ static long vfio_fops_unl_ioctl(struct file *filep,
ret = vfio_ioctl_set_iommu(container, arg);
break;
default:
down_read(&container->group_lock);
driver = container->iommu_driver;
data = container->iommu_data;
if (driver) /* passthrough all unrecognized ioctls */
ret = driver->ops->ioctl(data, cmd, arg);
up_read(&container->group_lock);
}
return ret;
......@@ -1202,15 +1233,11 @@ static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
struct vfio_iommu_driver *driver;
ssize_t ret = -EINVAL;
down_read(&container->group_lock);
driver = container->iommu_driver;
if (likely(driver && driver->ops->read))
ret = driver->ops->read(container->iommu_data,
buf, count, ppos);
up_read(&container->group_lock);
return ret;
}
......@@ -1221,15 +1248,11 @@ static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
struct vfio_iommu_driver *driver;
ssize_t ret = -EINVAL;
down_read(&container->group_lock);
driver = container->iommu_driver;
if (likely(driver && driver->ops->write))
ret = driver->ops->write(container->iommu_data,
buf, count, ppos);
up_read(&container->group_lock);
return ret;
}
......@@ -1239,14 +1262,10 @@ static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
struct vfio_iommu_driver *driver;
int ret = -EINVAL;
down_read(&container->group_lock);
driver = container->iommu_driver;
if (likely(driver && driver->ops->mmap))
ret = driver->ops->mmap(container->iommu_data, vma);
up_read(&container->group_lock);
return ret;
}
......@@ -1741,6 +1760,15 @@ void vfio_group_put_external_user(struct vfio_group *group)
}
EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
bool vfio_external_group_match_file(struct vfio_group *test_group,
struct file *filep)
{
struct vfio_group *group = filep->private_data;
return (filep->f_op == &vfio_group_fops) && (group == test_group);
}
EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
int vfio_external_user_iommu_id(struct vfio_group *group)
{
return iommu_group_id(group->iommu_group);
......@@ -1949,8 +1977,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
goto err_pin_pages;
container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
......@@ -1958,7 +1984,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
else
ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
err_pin_pages:
......@@ -1998,8 +2023,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
goto err_unpin_pages;
container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver;
if (likely(driver && driver->ops->unpin_pages))
ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
......@@ -2007,7 +2030,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
else
ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
err_unpin_pages:
......@@ -2029,8 +2051,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
return -EINVAL;
container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver;
if (likely(driver && driver->ops->register_notifier))
ret = driver->ops->register_notifier(container->iommu_data,
......@@ -2038,7 +2058,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
else
ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
return ret;
......@@ -2056,8 +2075,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
return -EINVAL;
container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver;
if (likely(driver && driver->ops->unregister_notifier))
ret = driver->ops->unregister_notifier(container->iommu_data,
......@@ -2065,7 +2082,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
else
ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
return ret;
......@@ -2083,7 +2099,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
unsigned long *events,
struct notifier_block *nb)
{
struct vfio_container *container;
int ret;
bool set_kvm = false;
......@@ -2101,9 +2116,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
if (ret)
return -EINVAL;
container = group->container;
down_read(&container->group_lock);
ret = blocking_notifier_chain_register(&group->notifier, nb);
/*
......@@ -2114,7 +2126,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
blocking_notifier_call_chain(&group->notifier,
VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
return ret;
......@@ -2123,19 +2134,14 @@ static int vfio_register_group_notifier(struct vfio_group *group,
static int vfio_unregister_group_notifier(struct vfio_group *group,
struct notifier_block *nb)
{
struct vfio_container *container;
int ret;
ret = vfio_group_add_container_user(group);
if (ret)
return -EINVAL;
container = group->container;
down_read(&container->group_lock);
ret = blocking_notifier_chain_unregister(&group->notifier, nb);
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
return ret;
......
......@@ -97,6 +97,8 @@ extern void vfio_unregister_iommu_driver(
*/
extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group);
extern bool vfio_external_group_match_file(struct vfio_group *group,
struct file *filep);
extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
......
......@@ -51,6 +51,22 @@ static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
return vfio_group;
}
static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
struct file *filep)
{
bool ret, (*fn)(struct vfio_group *, struct file *);
fn = symbol_get(vfio_external_group_match_file);
if (!fn)
return false;
ret = fn(group, filep);
symbol_put(vfio_external_group_match_file);
return ret;
}
static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
{
void (*fn)(struct vfio_group *);
......@@ -231,37 +247,31 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
if (!f.file)
return -EBADF;
vfio_group = kvm_vfio_group_get_external_user(f.file);
fdput(f);
if (IS_ERR(vfio_group))
return PTR_ERR(vfio_group);
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) {
if (kvg->vfio_group != vfio_group)
if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
f.file))
continue;
list_del(&kvg->node);
kvm_arch_end_assignment(dev->kvm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm,
kvg->vfio_group);
#endif
kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
kvm_vfio_group_put_external_user(kvg->vfio_group);
kfree(kvg);
ret = 0;
break;
}
kvm_arch_end_assignment(dev->kvm);
mutex_unlock(&kv->lock);
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, vfio_group);
#endif
kvm_vfio_group_set_kvm(vfio_group, NULL);
kvm_vfio_group_put_external_user(vfio_group);
fdput(f);
kvm_vfio_update_coherency(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment