Commit ea3fc04d authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Alex Williamson

vfio-pci: Re-indent what was vfio_pci_core_ioctl()

Done mechanically with:

 $ git clang-format-14 -i --lines 675:1210 drivers/vfio/pci/vfio_pci_core.c

And manually reflow the multi-line comments clang-format doesn't fix.
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/3-v2-0f9e632d54fb+d6-vfio_ioctl_split_jgg@nvidia.comSigned-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 2ecf3b58
...@@ -693,309 +693,300 @@ static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev, ...@@ -693,309 +693,300 @@ static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
void __user *arg) void __user *arg)
{ {
unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs); unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
struct vfio_device_info info; struct vfio_device_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
unsigned long capsz; unsigned long capsz;
int ret; int ret;
/* For backward compatibility, cannot require this */ /* For backward compatibility, cannot require this */
capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
if (copy_from_user(&info, (void __user *)arg, minsz)) if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT; return -EFAULT;
if (info.argsz < minsz) if (info.argsz < minsz)
return -EINVAL; return -EINVAL;
if (info.argsz >= capsz) { if (info.argsz >= capsz) {
minsz = capsz; minsz = capsz;
info.cap_offset = 0; info.cap_offset = 0;
} }
info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags = VFIO_DEVICE_FLAGS_PCI;
if (vdev->reset_works) if (vdev->reset_works)
info.flags |= VFIO_DEVICE_FLAGS_RESET; info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions; info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS; info.num_irqs = VFIO_PCI_NUM_IRQS;
ret = vfio_pci_info_zdev_add_caps(vdev, &caps); ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
if (ret && ret != -ENODEV) { if (ret && ret != -ENODEV) {
pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n"); pci_warn(vdev->pdev,
return ret; "Failed to setup zPCI info capabilities\n");
} return ret;
}
if (caps.size) { if (caps.size) {
info.flags |= VFIO_DEVICE_FLAGS_CAPS; info.flags |= VFIO_DEVICE_FLAGS_CAPS;
if (info.argsz < sizeof(info) + caps.size) { if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size; info.argsz = sizeof(info) + caps.size;
} else { } else {
vfio_info_cap_shift(&caps, sizeof(info)); vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user((void __user *)arg + if (copy_to_user((void __user *)arg + sizeof(info),
sizeof(info), caps.buf, caps.buf, caps.size)) {
caps.size)) { kfree(caps.buf);
kfree(caps.buf); return -EFAULT;
return -EFAULT;
}
info.cap_offset = sizeof(info);
} }
info.cap_offset = sizeof(info);
kfree(caps.buf);
} }
return copy_to_user((void __user *)arg, &info, minsz) ? kfree(caps.buf);
-EFAULT : 0; }
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
} }
static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev, static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
void __user *arg) void __user *arg)
{ {
unsigned long minsz = offsetofend(struct vfio_region_info, offset); unsigned long minsz = offsetofend(struct vfio_region_info, offset);
struct pci_dev *pdev = vdev->pdev; struct pci_dev *pdev = vdev->pdev;
struct vfio_region_info info; struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
int i, ret; int i, ret;
if (copy_from_user(&info, (void __user *)arg, minsz)) if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT; return -EFAULT;
if (info.argsz < minsz) if (info.argsz < minsz)
return -EINVAL; return -EINVAL;
switch (info.index) { switch (info.index) {
case VFIO_PCI_CONFIG_REGION_INDEX: case VFIO_PCI_CONFIG_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pdev->cfg_size; info.size = pdev->cfg_size;
info.flags = VFIO_REGION_INFO_FLAG_READ | info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE; VFIO_REGION_INFO_FLAG_WRITE;
break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
info.flags = 0;
break; break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: }
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
info.flags = 0;
break;
}
info.flags = VFIO_REGION_INFO_FLAG_READ | info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE; VFIO_REGION_INFO_FLAG_WRITE;
if (vdev->bar_mmap_supported[info.index]) { if (vdev->bar_mmap_supported[info.index]) {
info.flags |= VFIO_REGION_INFO_FLAG_MMAP; info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
if (info.index == vdev->msix_bar) { if (info.index == vdev->msix_bar) {
ret = msix_mmappable_cap(vdev, &caps); ret = msix_mmappable_cap(vdev, &caps);
if (ret) if (ret)
return ret; return ret;
}
} }
}
break; break;
case VFIO_PCI_ROM_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX: {
{ void __iomem *io;
void __iomem *io; size_t size;
size_t size; u16 cmd;
u16 cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
/* Report the BAR size, not the ROM size */
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
/* Shadow ROMs appear as PCI option ROMs */
if (pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW)
info.size = 0x20000;
else
break;
}
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); /*
info.flags = 0; * Is it really there? Enable memory decode for implicit access
* in pci_map_rom().
*/
cmd = vfio_pci_memory_lock_and_enable(vdev);
io = pci_map_rom(pdev, &size);
if (io) {
info.flags = VFIO_REGION_INFO_FLAG_READ;
pci_unmap_rom(pdev, io);
} else {
info.size = 0;
}
vfio_pci_memory_unlock_and_restore(vdev, cmd);
/* Report the BAR size, not the ROM size */ break;
info.size = pci_resource_len(pdev, info.index); }
if (!info.size) { case VFIO_PCI_VGA_REGION_INDEX:
/* Shadow ROMs appear as PCI option ROMs */ if (!vdev->has_vga)
if (pdev->resource[PCI_ROM_RESOURCE].flags & return -EINVAL;
IORESOURCE_ROM_SHADOW)
info.size = 0x20000;
else
break;
}
/* info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
* Is it really there? Enable memory decode for info.size = 0xc0000;
* implicit access in pci_map_rom(). info.flags = VFIO_REGION_INFO_FLAG_READ |
*/ VFIO_REGION_INFO_FLAG_WRITE;
cmd = vfio_pci_memory_lock_and_enable(vdev);
io = pci_map_rom(pdev, &size);
if (io) {
info.flags = VFIO_REGION_INFO_FLAG_READ;
pci_unmap_rom(pdev, io);
} else {
info.size = 0;
}
vfio_pci_memory_unlock_and_restore(vdev, cmd);
break; break;
} default: {
case VFIO_PCI_VGA_REGION_INDEX: struct vfio_region_info_cap_type cap_type = {
if (!vdev->has_vga) .header.id = VFIO_REGION_INFO_CAP_TYPE,
return -EINVAL; .header.version = 1
};
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
info.size = 0xc0000; return -EINVAL;
info.flags = VFIO_REGION_INFO_FLAG_READ | info.index = array_index_nospec(
VFIO_REGION_INFO_FLAG_WRITE; info.index, VFIO_PCI_NUM_REGIONS + vdev->num_regions);
break;
default:
{
struct vfio_region_info_cap_type cap_type = {
.header.id = VFIO_REGION_INFO_CAP_TYPE,
.header.version = 1 };
if (info.index >= i = info.index - VFIO_PCI_NUM_REGIONS;
VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
info.index = array_index_nospec(info.index,
VFIO_PCI_NUM_REGIONS +
vdev->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = vdev->region[i].size;
info.flags = vdev->region[i].flags;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); cap_type.type = vdev->region[i].type;
info.size = vdev->region[i].size; cap_type.subtype = vdev->region[i].subtype;
info.flags = vdev->region[i].flags;
cap_type.type = vdev->region[i].type; ret = vfio_info_add_capability(&caps, &cap_type.header,
cap_type.subtype = vdev->region[i].subtype; sizeof(cap_type));
if (ret)
return ret;
ret = vfio_info_add_capability(&caps, &cap_type.header, if (vdev->region[i].ops->add_capability) {
sizeof(cap_type)); ret = vdev->region[i].ops->add_capability(
vdev, &vdev->region[i], &caps);
if (ret) if (ret)
return ret; return ret;
if (vdev->region[i].ops->add_capability) {
ret = vdev->region[i].ops->add_capability(vdev,
&vdev->region[i], &caps);
if (ret)
return ret;
}
}
} }
}
}
if (caps.size) { if (caps.size) {
info.flags |= VFIO_REGION_INFO_FLAG_CAPS; info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) { if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size; info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0; info.cap_offset = 0;
} else { } else {
vfio_info_cap_shift(&caps, sizeof(info)); vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user((void __user *)arg + if (copy_to_user((void __user *)arg + sizeof(info),
sizeof(info), caps.buf, caps.buf, caps.size)) {
caps.size)) { kfree(caps.buf);
kfree(caps.buf); return -EFAULT;
return -EFAULT;
}
info.cap_offset = sizeof(info);
} }
info.cap_offset = sizeof(info);
kfree(caps.buf);
} }
return copy_to_user((void __user *)arg, &info, minsz) ? kfree(caps.buf);
-EFAULT : 0; }
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
} }
static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev, static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
void __user *arg) void __user *arg)
{ {
unsigned long minsz = offsetofend(struct vfio_irq_info, count); unsigned long minsz = offsetofend(struct vfio_irq_info, count);
struct vfio_irq_info info; struct vfio_irq_info info;
if (copy_from_user(&info, (void __user *)arg, minsz)) if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT; return -EFAULT;
if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
return -EINVAL; return -EINVAL;
switch (info.index) { switch (info.index) {
case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX: case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
case VFIO_PCI_REQ_IRQ_INDEX: case VFIO_PCI_REQ_IRQ_INDEX:
break;
case VFIO_PCI_ERR_IRQ_INDEX:
if (pci_is_pcie(vdev->pdev))
break; break;
case VFIO_PCI_ERR_IRQ_INDEX: fallthrough;
if (pci_is_pcie(vdev->pdev)) default:
break; return -EINVAL;
fallthrough; }
default:
return -EINVAL;
}
info.flags = VFIO_IRQ_INFO_EVENTFD; info.flags = VFIO_IRQ_INFO_EVENTFD;
info.count = vfio_pci_get_irq_count(vdev, info.index); info.count = vfio_pci_get_irq_count(vdev, info.index);
if (info.index == VFIO_PCI_INTX_IRQ_INDEX) if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
info.flags |= (VFIO_IRQ_INFO_MASKABLE | info.flags |=
VFIO_IRQ_INFO_AUTOMASKED); (VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED);
else else
info.flags |= VFIO_IRQ_INFO_NORESIZE; info.flags |= VFIO_IRQ_INFO_NORESIZE;
return copy_to_user((void __user *)arg, &info, minsz) ? return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
-EFAULT : 0;
} }
static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev, static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev,
void __user *arg) void __user *arg)
{ {
unsigned long minsz = offsetofend(struct vfio_irq_set, count); unsigned long minsz = offsetofend(struct vfio_irq_set, count);
struct vfio_irq_set hdr; struct vfio_irq_set hdr;
u8 *data = NULL; u8 *data = NULL;
int max, ret = 0; int max, ret = 0;
size_t data_size = 0; size_t data_size = 0;
if (copy_from_user(&hdr, (void __user *)arg, minsz)) if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT; return -EFAULT;
max = vfio_pci_get_irq_count(vdev, hdr.index); max = vfio_pci_get_irq_count(vdev, hdr.index);
ret = vfio_set_irqs_validate_and_prepare(&hdr, max, ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS,
VFIO_PCI_NUM_IRQS, &data_size); &data_size);
if (ret) if (ret)
return ret; return ret;
if (data_size) { if (data_size) {
data = memdup_user((void __user *)(arg + minsz), data = memdup_user((void __user *)(arg + minsz), data_size);
data_size); if (IS_ERR(data))
if (IS_ERR(data)) return PTR_ERR(data);
return PTR_ERR(data); }
}
mutex_lock(&vdev->igate); mutex_lock(&vdev->igate);
ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, hdr.start,
hdr.start, hdr.count, data); hdr.count, data);
mutex_unlock(&vdev->igate); mutex_unlock(&vdev->igate);
kfree(data); kfree(data);
return ret; return ret;
} }
static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev, static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev,
void __user *arg) void __user *arg)
{ {
int ret; int ret;
if (!vdev->reset_works) if (!vdev->reset_works)
return -EINVAL; return -EINVAL;
vfio_pci_zap_and_down_write_memory_lock(vdev); vfio_pci_zap_and_down_write_memory_lock(vdev);
/* /*
* This function can be invoked while the power state is non-D0. * This function can be invoked while the power state is non-D0. If
* If pci_try_reset_function() has been called while the power * pci_try_reset_function() has been called while the power state is
* state is non-D0, then pci_try_reset_function() will * non-D0, then pci_try_reset_function() will internally set the power
* internally set the power state to D0 without vfio driver * state to D0 without vfio driver involvement. For the devices which
* involvement. For the devices which have NoSoftRst-, the * have NoSoftRst-, the reset function can cause the PCI config space
* reset function can cause the PCI config space reset without * reset without restoring the original state (saved locally in
* restoring the original state (saved locally in * 'vdev->pm_save').
* 'vdev->pm_save'). */
*/ vfio_pci_set_power_state(vdev, PCI_D0);
vfio_pci_set_power_state(vdev, PCI_D0);
ret = pci_try_reset_function(vdev->pdev); ret = pci_try_reset_function(vdev->pdev);
up_write(&vdev->memory_lock); up_write(&vdev->memory_lock);
return ret; return ret;
} }
static int static int
...@@ -1004,196 +995,192 @@ vfio_pci_ioctl_get_pci_hot_reset_info(struct vfio_pci_core_device *vdev, ...@@ -1004,196 +995,192 @@ vfio_pci_ioctl_get_pci_hot_reset_info(struct vfio_pci_core_device *vdev,
{ {
unsigned long minsz = unsigned long minsz =
offsetofend(struct vfio_pci_hot_reset_info, count); offsetofend(struct vfio_pci_hot_reset_info, count);
struct vfio_pci_hot_reset_info hdr; struct vfio_pci_hot_reset_info hdr;
struct vfio_pci_fill_info fill = { 0 }; struct vfio_pci_fill_info fill = { 0 };
struct vfio_pci_dependent_device *devices = NULL; struct vfio_pci_dependent_device *devices = NULL;
bool slot = false; bool slot = false;
int ret = 0; int ret = 0;
if (copy_from_user(&hdr, (void __user *)arg, minsz)) if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT; return -EFAULT;
if (hdr.argsz < minsz) if (hdr.argsz < minsz)
return -EINVAL; return -EINVAL;
hdr.flags = 0; hdr.flags = 0;
/* Can we do a slot or bus reset or neither? */ /* Can we do a slot or bus reset or neither? */
if (!pci_probe_reset_slot(vdev->pdev->slot)) if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true; slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus)) else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV; return -ENODEV;
/* How many devices are affected? */ /* How many devices are affected? */
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
vfio_pci_count_devs, &fill.max, slot);
&fill.max, slot); if (ret)
if (ret) return ret;
return ret;
WARN_ON(!fill.max); /* Should always be at least one */ WARN_ON(!fill.max); /* Should always be at least one */
/* /*
* If there's enough space, fill it now, otherwise return * If there's enough space, fill it now, otherwise return -ENOSPC and
* -ENOSPC and the number of devices affected. * the number of devices affected.
*/ */
if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) { if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
ret = -ENOSPC; ret = -ENOSPC;
hdr.count = fill.max; hdr.count = fill.max;
goto reset_info_exit; goto reset_info_exit;
} }
devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL); devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
if (!devices) if (!devices)
return -ENOMEM; return -ENOMEM;
fill.devices = devices; fill.devices = devices;
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
vfio_pci_fill_devs, &fill, slot);
&fill, slot);
/* /*
* If a device was removed between counting and filling, * If a device was removed between counting and filling, we may come up
* we may come up short of fill.max. If a device was * short of fill.max. If a device was added, we'll have a return of
* added, we'll have a return of -EAGAIN above. * -EAGAIN above.
*/ */
if (!ret) if (!ret)
hdr.count = fill.cur; hdr.count = fill.cur;
reset_info_exit: reset_info_exit:
if (copy_to_user((void __user *)arg, &hdr, minsz)) if (copy_to_user((void __user *)arg, &hdr, minsz))
ret = -EFAULT; ret = -EFAULT;
if (!ret) { if (!ret) {
if (copy_to_user((void __user *)(arg + minsz), devices, if (copy_to_user((void __user *)(arg + minsz), devices,
hdr.count * sizeof(*devices))) hdr.count * sizeof(*devices)))
ret = -EFAULT; ret = -EFAULT;
} }
kfree(devices); kfree(devices);
return ret; return ret;
} }
static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev, static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
void __user *arg) void __user *arg)
{ {
unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count); unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
struct vfio_pci_hot_reset hdr; struct vfio_pci_hot_reset hdr;
int32_t *group_fds; int32_t *group_fds;
struct file **files; struct file **files;
struct vfio_pci_group_info info; struct vfio_pci_group_info info;
bool slot = false; bool slot = false;
int file_idx, count = 0, ret = 0; int file_idx, count = 0, ret = 0;
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz || hdr.flags) if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EINVAL; return -EFAULT;
/* Can we do a slot or bus reset or neither? */ if (hdr.argsz < minsz || hdr.flags)
if (!pci_probe_reset_slot(vdev->pdev->slot)) return -EINVAL;
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
/* /* Can we do a slot or bus reset or neither? */
* We can't let userspace give us an arbitrarily large if (!pci_probe_reset_slot(vdev->pdev->slot))
* buffer to copy, so verify how many we think there slot = true;
* could be. Note groups can have multiple devices so else if (pci_probe_reset_bus(vdev->pdev->bus))
* one group per device is the max. return -ENODEV;
*/
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_count_devs,
&count, slot);
if (ret)
return ret;
/* Somewhere between 1 and count is OK */ /*
if (!hdr.count || hdr.count > count) * We can't let userspace give us an arbitrarily large buffer to copy,
return -EINVAL; * so verify how many we think there could be. Note groups can have
* multiple devices so one group per device is the max.
*/
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
&count, slot);
if (ret)
return ret;
group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL); /* Somewhere between 1 and count is OK */
files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL); if (!hdr.count || hdr.count > count)
if (!group_fds || !files) { return -EINVAL;
kfree(group_fds);
kfree(files);
return -ENOMEM;
}
if (copy_from_user(group_fds, (void __user *)(arg + minsz), group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
hdr.count * sizeof(*group_fds))) { files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
kfree(group_fds); if (!group_fds || !files) {
kfree(files); kfree(group_fds);
return -EFAULT; kfree(files);
} return -ENOMEM;
}
/* if (copy_from_user(group_fds, (void __user *)(arg + minsz),
* For each group_fd, get the group through the vfio external hdr.count * sizeof(*group_fds))) {
* user interface and store the group and iommu ID. This kfree(group_fds);
* ensures the group is held across the reset. kfree(files);
*/ return -EFAULT;
for (file_idx = 0; file_idx < hdr.count; file_idx++) { }
struct file *file = fget(group_fds[file_idx]);
if (!file) { /*
ret = -EBADF; * For each group_fd, get the group through the vfio external user
break; * interface and store the group and iommu ID. This ensures the group
} * is held across the reset.
*/
for (file_idx = 0; file_idx < hdr.count; file_idx++) {
struct file *file = fget(group_fds[file_idx]);
/* Ensure the FD is a vfio group FD.*/ if (!file) {
if (!vfio_file_iommu_group(file)) { ret = -EBADF;
fput(file); break;
ret = -EINVAL; }
break;
}
files[file_idx] = file; /* Ensure the FD is a vfio group FD.*/
if (!vfio_file_iommu_group(file)) {
fput(file);
ret = -EINVAL;
break;
} }
kfree(group_fds); files[file_idx] = file;
}
/* release reference to groups on error */ kfree(group_fds);
if (ret)
goto hot_reset_release; /* release reference to groups on error */
if (ret)
goto hot_reset_release;
info.count = hdr.count; info.count = hdr.count;
info.files = files; info.files = files;
ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info); ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
hot_reset_release: hot_reset_release:
for (file_idx--; file_idx >= 0; file_idx--) for (file_idx--; file_idx >= 0; file_idx--)
fput(files[file_idx]); fput(files[file_idx]);
kfree(files); kfree(files);
return ret; return ret;
} }
static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev, static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
void __user *arg) void __user *arg)
{ {
unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd); unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd);
struct vfio_device_ioeventfd ioeventfd; struct vfio_device_ioeventfd ioeventfd;
int count; int count;
if (copy_from_user(&ioeventfd, (void __user *)arg, minsz)) if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
return -EFAULT; return -EFAULT;
if (ioeventfd.argsz < minsz) if (ioeventfd.argsz < minsz)
return -EINVAL; return -EINVAL;
if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK) if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
return -EINVAL; return -EINVAL;
count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK; count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
if (hweight8(count) != 1 || ioeventfd.fd < -1) if (hweight8(count) != 1 || ioeventfd.fd < -1)
return -EINVAL; return -EINVAL;
return vfio_pci_ioeventfd(vdev, ioeventfd.offset, return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count,
ioeventfd.data, count, ioeventfd.fd); ioeventfd.fd);
} }
long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment