Commit 296e505b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Alex Williamson

vfio/iommu_type1: remove the "external" domain

The external_domain concept rather misleading and not actually needed.
Replace it with a list of emulated groups in struct vfio_iommu.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20210924155705.4258-15-hch@lst.deSigned-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 65cdbf10
...@@ -65,7 +65,6 @@ MODULE_PARM_DESC(dma_entry_limit, ...@@ -65,7 +65,6 @@ MODULE_PARM_DESC(dma_entry_limit,
struct vfio_iommu { struct vfio_iommu {
struct list_head domain_list; struct list_head domain_list;
struct list_head iova_list; struct list_head iova_list;
struct vfio_domain *external_domain; /* domain for external user */
struct mutex lock; struct mutex lock;
struct rb_root dma_list; struct rb_root dma_list;
struct blocking_notifier_head notifier; struct blocking_notifier_head notifier;
...@@ -78,6 +77,7 @@ struct vfio_iommu { ...@@ -78,6 +77,7 @@ struct vfio_iommu {
bool nesting; bool nesting;
bool dirty_page_tracking; bool dirty_page_tracking;
bool container_open; bool container_open;
struct list_head emulated_iommu_groups;
}; };
struct vfio_domain { struct vfio_domain {
...@@ -1892,8 +1892,8 @@ static struct vfio_iommu_group* ...@@ -1892,8 +1892,8 @@ static struct vfio_iommu_group*
vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group) struct iommu_group *iommu_group)
{ {
struct vfio_iommu_group *group;
struct vfio_domain *domain; struct vfio_domain *domain;
struct vfio_iommu_group *group = NULL;
list_for_each_entry(domain, &iommu->domain_list, next) { list_for_each_entry(domain, &iommu->domain_list, next) {
group = find_iommu_group(domain, iommu_group); group = find_iommu_group(domain, iommu_group);
...@@ -1901,10 +1901,10 @@ vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, ...@@ -1901,10 +1901,10 @@ vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
return group; return group;
} }
if (iommu->external_domain) list_for_each_entry(group, &iommu->emulated_iommu_groups, next)
group = find_iommu_group(iommu->external_domain, iommu_group); if (group->iommu_group == iommu_group)
return group;
return group; return NULL;
} }
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions, static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
...@@ -2163,61 +2163,52 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, ...@@ -2163,61 +2163,52 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct vfio_iommu_group *group; struct vfio_iommu_group *group;
struct vfio_domain *domain, *d; struct vfio_domain *domain, *d;
struct bus_type *bus = NULL; struct bus_type *bus = NULL;
int ret;
bool resv_msi, msi_remap; bool resv_msi, msi_remap;
phys_addr_t resv_msi_base = 0; phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry *geo; struct iommu_domain_geometry *geo;
LIST_HEAD(iova_copy); LIST_HEAD(iova_copy);
LIST_HEAD(group_resv_regions); LIST_HEAD(group_resv_regions);
int ret = -EINVAL;
mutex_lock(&iommu->lock); mutex_lock(&iommu->lock);
/* Check for duplicates */ /* Check for duplicates */
if (vfio_iommu_find_iommu_group(iommu, iommu_group)) { if (vfio_iommu_find_iommu_group(iommu, iommu_group))
mutex_unlock(&iommu->lock); goto out_unlock;
return -EINVAL;
}
ret = -ENOMEM;
group = kzalloc(sizeof(*group), GFP_KERNEL); group = kzalloc(sizeof(*group), GFP_KERNEL);
domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!group)
if (!group || !domain) { goto out_unlock;
ret = -ENOMEM;
goto out_free;
}
group->iommu_group = iommu_group; group->iommu_group = iommu_group;
/* Determine bus_type in order to allocate a domain */
ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
if (ret)
goto out_free;
if (type == VFIO_EMULATED_IOMMU) { if (type == VFIO_EMULATED_IOMMU) {
if (!iommu->external_domain) { list_add(&group->next, &iommu->emulated_iommu_groups);
INIT_LIST_HEAD(&domain->group_list);
iommu->external_domain = domain;
} else {
kfree(domain);
}
list_add(&group->next, &iommu->external_domain->group_list);
/* /*
* Non-iommu backed group cannot dirty memory directly, it can * An emulated IOMMU group cannot dirty memory directly, it can
* only use interfaces that provide dirty tracking. * only use interfaces that provide dirty tracking.
* The iommu scope can only be promoted with the addition of a * The iommu scope can only be promoted with the addition of a
* dirty tracking group. * dirty tracking group.
*/ */
group->pinned_page_dirty_scope = true; group->pinned_page_dirty_scope = true;
mutex_unlock(&iommu->lock); ret = 0;
goto out_unlock;
return 0;
} }
/* Determine bus_type in order to allocate a domain */
ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
if (ret)
goto out_free_group;
ret = -ENOMEM;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
goto out_free_group;
ret = -EIO;
domain->domain = iommu_domain_alloc(bus); domain->domain = iommu_domain_alloc(bus);
if (!domain->domain) { if (!domain->domain)
ret = -EIO; goto out_free_domain;
goto out_free;
}
if (iommu->nesting) { if (iommu->nesting) {
ret = iommu_enable_nesting(domain->domain); ret = iommu_enable_nesting(domain->domain);
...@@ -2344,9 +2335,11 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, ...@@ -2344,9 +2335,11 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
iommu_domain_free(domain->domain); iommu_domain_free(domain->domain);
vfio_iommu_iova_free(&iova_copy); vfio_iommu_iova_free(&iova_copy);
vfio_iommu_resv_free(&group_resv_regions); vfio_iommu_resv_free(&group_resv_regions);
out_free: out_free_domain:
kfree(domain); kfree(domain);
out_free_group:
kfree(group); kfree(group);
out_unlock:
mutex_unlock(&iommu->lock); mutex_unlock(&iommu->lock);
return ret; return ret;
} }
...@@ -2471,25 +2464,19 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, ...@@ -2471,25 +2464,19 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
LIST_HEAD(iova_copy); LIST_HEAD(iova_copy);
mutex_lock(&iommu->lock); mutex_lock(&iommu->lock);
list_for_each_entry(group, &iommu->emulated_iommu_groups, next) {
if (group->iommu_group != iommu_group)
continue;
update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
if (iommu->external_domain) { if (list_empty(&iommu->emulated_iommu_groups) &&
group = find_iommu_group(iommu->external_domain, iommu_group); !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
if (group) { WARN_ON(iommu->notifier.head);
update_dirty_scope = !group->pinned_page_dirty_scope; vfio_iommu_unmap_unpin_all(iommu);
list_del(&group->next);
kfree(group);
if (list_empty(&iommu->external_domain->group_list)) {
if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
WARN_ON(iommu->notifier.head);
vfio_iommu_unmap_unpin_all(iommu);
}
kfree(iommu->external_domain);
iommu->external_domain = NULL;
}
goto detach_group_done;
} }
goto detach_group_done;
} }
/* /*
...@@ -2517,7 +2504,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, ...@@ -2517,7 +2504,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
*/ */
if (list_empty(&domain->group_list)) { if (list_empty(&domain->group_list)) {
if (list_is_singular(&iommu->domain_list)) { if (list_is_singular(&iommu->domain_list)) {
if (!iommu->external_domain) { if (list_empty(&iommu->emulated_iommu_groups)) {
WARN_ON(iommu->notifier.head); WARN_ON(iommu->notifier.head);
vfio_iommu_unmap_unpin_all(iommu); vfio_iommu_unmap_unpin_all(iommu);
} else { } else {
...@@ -2582,41 +2569,42 @@ static void *vfio_iommu_type1_open(unsigned long arg) ...@@ -2582,41 +2569,42 @@ static void *vfio_iommu_type1_open(unsigned long arg)
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
init_waitqueue_head(&iommu->vaddr_wait); init_waitqueue_head(&iommu->vaddr_wait);
iommu->pgsize_bitmap = PAGE_MASK; iommu->pgsize_bitmap = PAGE_MASK;
INIT_LIST_HEAD(&iommu->emulated_iommu_groups);
return iommu; return iommu;
} }
static void vfio_release_domain(struct vfio_domain *domain, bool external) static void vfio_release_domain(struct vfio_domain *domain)
{ {
struct vfio_iommu_group *group, *group_tmp; struct vfio_iommu_group *group, *group_tmp;
list_for_each_entry_safe(group, group_tmp, list_for_each_entry_safe(group, group_tmp,
&domain->group_list, next) { &domain->group_list, next) {
if (!external) iommu_detach_group(domain->domain, group->iommu_group);
iommu_detach_group(domain->domain, group->iommu_group);
list_del(&group->next); list_del(&group->next);
kfree(group); kfree(group);
} }
if (!external) iommu_domain_free(domain->domain);
iommu_domain_free(domain->domain);
} }
static void vfio_iommu_type1_release(void *iommu_data) static void vfio_iommu_type1_release(void *iommu_data)
{ {
struct vfio_iommu *iommu = iommu_data; struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain, *domain_tmp; struct vfio_domain *domain, *domain_tmp;
struct vfio_iommu_group *group, *next_group;
if (iommu->external_domain) { list_for_each_entry_safe(group, next_group,
vfio_release_domain(iommu->external_domain, true); &iommu->emulated_iommu_groups, next) {
kfree(iommu->external_domain); list_del(&group->next);
kfree(group);
} }
vfio_iommu_unmap_unpin_all(iommu); vfio_iommu_unmap_unpin_all(iommu);
list_for_each_entry_safe(domain, domain_tmp, list_for_each_entry_safe(domain, domain_tmp,
&iommu->domain_list, next) { &iommu->domain_list, next) {
vfio_release_domain(domain, false); vfio_release_domain(domain);
list_del(&domain->next); list_del(&domain->next);
kfree(domain); kfree(domain);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment