Commit 2169037d authored by Kirti Wankhede's avatar Kirti Wankhede Committed by Alex Williamson

vfio iommu: Added pin and unpin callback functions to vfio_iommu_driver_ops

Added APIs for pining and unpining set of pages. These call back into
backend iommu module to actually pin and unpin pages.
Added two new callback functions to struct vfio_iommu_driver_ops. Backend
IOMMU module that supports pining and unpinning pages for mdev devices
should provide these functions.

Renamed static functions in vfio_type1_iommu.c to resolve conflicts
Signed-off-by: default avatarKirti Wankhede <kwankhede@nvidia.com>
Signed-off-by: default avatarNeo Jia <cjia@nvidia.com>
Reviewed-by: default avatarDong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 32f55d83
...@@ -1799,6 +1799,108 @@ void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset) ...@@ -1799,6 +1799,108 @@ void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
} }
EXPORT_SYMBOL_GPL(vfio_info_cap_shift); EXPORT_SYMBOL_GPL(vfio_info_cap_shift);
/*
* Pin a set of guest PFNs and return their associated host PFNs for local
* domain only.
* @dev [in] : device
* @user_pfn [in]: array of user/guest PFNs to be unpinned.
* @npage [in] : count of elements in user_pfn array. This count should not
* be greater VFIO_PIN_PAGES_MAX_ENTRIES.
* @prot [in] : protection flags
* @phys_pfn[out]: array of host PFNs
* Return error or number of pages pinned.
*/
int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
int prot, unsigned long *phys_pfn)
{
struct vfio_container *container;
struct vfio_group *group;
struct vfio_iommu_driver *driver;
int ret;
if (!dev || !user_pfn || !phys_pfn || !npage)
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
return -E2BIG;
group = vfio_group_get_from_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
ret = vfio_group_add_container_user(group);
if (ret)
goto err_pin_pages;
container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
npage, prot, phys_pfn);
else
ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
err_pin_pages:
vfio_group_put(group);
return ret;
}
EXPORT_SYMBOL(vfio_pin_pages);
/*
* Unpin set of host PFNs for local domain only.
* @dev [in] : device
* @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
* PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
* @npage [in] : count of elements in user_pfn array. This count should not
* be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
* Return error or number of pages unpinned.
*/
int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
{
struct vfio_container *container;
struct vfio_group *group;
struct vfio_iommu_driver *driver;
int ret;
if (!dev || !user_pfn || !npage)
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
return -E2BIG;
group = vfio_group_get_from_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
ret = vfio_group_add_container_user(group);
if (ret)
goto err_unpin_pages;
container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver;
if (likely(driver && driver->ops->unpin_pages))
ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
npage);
else
ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
err_unpin_pages:
vfio_group_put(group);
return ret;
}
EXPORT_SYMBOL(vfio_unpin_pages);
/** /**
* Module/class support * Module/class support
*/ */
......
...@@ -259,7 +259,7 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) ...@@ -259,7 +259,7 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
* the iommu can only map chunks of consecutive pfns anyway, so get the * the iommu can only map chunks of consecutive pfns anyway, so get the
* first page and all consecutive pages with the same locking. * first page and all consecutive pages with the same locking.
*/ */
static long vfio_pin_pages(unsigned long vaddr, long npage, static long vfio_pin_pages_remote(unsigned long vaddr, long npage,
int prot, unsigned long *pfn_base) int prot, unsigned long *pfn_base)
{ {
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
...@@ -318,7 +318,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage, ...@@ -318,7 +318,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
return i; return i;
} }
static long vfio_unpin_pages(unsigned long pfn, long npage, static long vfio_unpin_pages_remote(unsigned long pfn, long npage,
int prot, bool do_accounting) int prot, bool do_accounting)
{ {
unsigned long unlocked = 0; unsigned long unlocked = 0;
...@@ -382,7 +382,7 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) ...@@ -382,7 +382,7 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
if (WARN_ON(!unmapped)) if (WARN_ON(!unmapped))
break; break;
unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT, unlocked += vfio_unpin_pages_remote(phys >> PAGE_SHIFT,
unmapped >> PAGE_SHIFT, unmapped >> PAGE_SHIFT,
dma->prot, false); dma->prot, false);
iova += unmapped; iova += unmapped;
...@@ -613,7 +613,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, ...@@ -613,7 +613,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
while (size) { while (size) {
/* Pin a contiguous chunk of memory */ /* Pin a contiguous chunk of memory */
npage = vfio_pin_pages(vaddr + dma->size, npage = vfio_pin_pages_remote(vaddr + dma->size,
size >> PAGE_SHIFT, prot, &pfn); size >> PAGE_SHIFT, prot, &pfn);
if (npage <= 0) { if (npage <= 0) {
WARN_ON(!npage); WARN_ON(!npage);
...@@ -624,7 +624,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, ...@@ -624,7 +624,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
/* Map it! */ /* Map it! */
ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot); ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot);
if (ret) { if (ret) {
vfio_unpin_pages(pfn, npage, prot, true); vfio_unpin_pages_remote(pfn, npage, prot, true);
break; break;
} }
......
...@@ -75,7 +75,11 @@ struct vfio_iommu_driver_ops { ...@@ -75,7 +75,11 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group); struct iommu_group *group);
void (*detach_group)(void *iommu_data, void (*detach_group)(void *iommu_data,
struct iommu_group *group); struct iommu_group *group);
int (*pin_pages)(void *iommu_data, unsigned long *user_pfn,
int npage, int prot,
unsigned long *phys_pfn);
int (*unpin_pages)(void *iommu_data,
unsigned long *user_pfn, int npage);
}; };
extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
...@@ -92,6 +96,13 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group); ...@@ -92,6 +96,13 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group, extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg); unsigned long arg);
#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
int npage, int prot, unsigned long *phys_pfn);
extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
int npage);
/* /*
* Sub-module helpers * Sub-module helpers
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment