Commit 819da99a authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Alex Williamson

vfio: Hold a reference to the iommu_group in kvm for SPAPR

SPAPR exists completely outside the normal iommu driver framework, the
groups it creates are fake and are only created to enable VFIO's uAPI.

Thus, it does not need to follow the iommu core rule that the iommu_group
will only be touched while a driver is attached.

Carry a group reference into KVM and have KVM directly manage the lifetime
of this object independently of VFIO. This means KVM no longer relies on
the vfio group file being valid to maintain the group reference.
Tested-by: default avatarMatthew Rosato <mjrosato@linux.ibm.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/2-v2-15417f29324e+1c-vfio_group_disassociate_jgg@nvidia.comSigned-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 4b22ef04
...@@ -1552,8 +1552,9 @@ static const struct file_operations vfio_device_fops = { ...@@ -1552,8 +1552,9 @@ static const struct file_operations vfio_device_fops = {
* vfio_file_iommu_group - Return the struct iommu_group for the vfio group file * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
* @file: VFIO group file * @file: VFIO group file
* *
* The returned iommu_group is valid as long as a ref is held on the file. * The returned iommu_group is valid as long as a ref is held on the file. This
* This function is deprecated, only the SPAPR path in kvm should call it. * returns a reference on the group. This function is deprecated, only the SPAPR
* path in kvm should call it.
*/ */
struct iommu_group *vfio_file_iommu_group(struct file *file) struct iommu_group *vfio_file_iommu_group(struct file *file)
{ {
...@@ -1564,6 +1565,7 @@ struct iommu_group *vfio_file_iommu_group(struct file *file) ...@@ -1564,6 +1565,7 @@ struct iommu_group *vfio_file_iommu_group(struct file *file)
if (!vfio_file_is_group(file)) if (!vfio_file_is_group(file))
return NULL; return NULL;
iommu_group_ref_get(group->iommu_group);
return group->iommu_group; return group->iommu_group;
} }
EXPORT_SYMBOL_GPL(vfio_file_iommu_group); EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
......
...@@ -24,6 +24,9 @@ ...@@ -24,6 +24,9 @@
struct kvm_vfio_group { struct kvm_vfio_group {
struct list_head node; struct list_head node;
struct file *file; struct file *file;
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct iommu_group *iommu_group;
#endif
}; };
struct kvm_vfio { struct kvm_vfio {
...@@ -97,12 +100,12 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file) ...@@ -97,12 +100,12 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm, static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
struct kvm_vfio_group *kvg) struct kvm_vfio_group *kvg)
{ {
struct iommu_group *grp = kvm_vfio_file_iommu_group(kvg->file); if (WARN_ON_ONCE(!kvg->iommu_group))
if (WARN_ON_ONCE(!grp))
return; return;
kvm_spapr_tce_release_iommu_group(kvm, grp); kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
iommu_group_put(kvg->iommu_group);
kvg->iommu_group = NULL;
} }
#endif #endif
...@@ -252,19 +255,19 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev, ...@@ -252,19 +255,19 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
mutex_lock(&kv->lock); mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) { list_for_each_entry(kvg, &kv->group_list, node) {
struct iommu_group *grp;
if (kvg->file != f.file) if (kvg->file != f.file)
continue; continue;
grp = kvm_vfio_file_iommu_group(kvg->file); if (!kvg->iommu_group) {
if (WARN_ON_ONCE(!grp)) { kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
ret = -EIO; if (WARN_ON_ONCE(!kvg->iommu_group)) {
goto err_fdput; ret = -EIO;
goto err_fdput;
}
} }
ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd, ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
grp); kvg->iommu_group);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment