Commit c67e6279 authored by Christian König's avatar Christian König

drm/prime: split array import functions v4

Mapping the imported pages of a DMA-buf into an userspace process
doesn't work as expected.

But we have reoccurring requests on this approach, so split the
functions for this and  document that dma_buf_mmap() needs to be used
instead.

v2: split it into two functions
v3: rebased on latest changes
v4: update commit message a bit
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/403838/
parent 18f7608a
......@@ -918,8 +918,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
goto release_sg;
/* convert SG to linear array of pages and dma addresses */
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL, gtt->ttm.dma_address,
ttm->num_pages);
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
ttm->num_pages);
return 0;
......@@ -1264,9 +1264,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
ttm->sg = sgt;
}
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL,
gtt->ttm.dma_address,
ttm->num_pages);
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
ttm->num_pages);
return 0;
}
......
......@@ -978,44 +978,58 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_prime_import);
/**
* drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
* drm_prime_sg_to_page_array - convert an sg table into a page array
* @sgt: scatter-gather table to convert
* @pages: optional array of page pointers to store the page array in
* @addrs: optional array to store the dma bus address of each page
* @pages: array of page pointers to store the pages in
* @max_entries: size of the passed-in array
*
* Exports an sg table into an array of pages.
*
* This function is deprecated and strongly discouraged to be used.
* The page array is only useful for page faults and those can corrupt fields
* in the struct page if they are not handled by the exporting driver.
*/
int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
struct page **pages,
int max_entries)
{
struct sg_page_iter page_iter;
struct page **p = pages;
for_each_sgtable_page(sgt, &page_iter, 0) {
if (WARN_ON(p - pages >= max_entries))
return -1;
*p++ = sg_page_iter_page(&page_iter);
}
return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_array);
/**
* drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
* @sgt: scatter-gather table to convert
* @addrs: array to store the dma bus address of each page
* @max_entries: size of both the passed-in arrays
*
* Exports an sg table into an array of pages and addresses. This is currently
* required by the TTM driver in order to do correct fault handling.
* Exports an sg table into an array of addresses.
*
* Drivers can use this in their &drm_driver.gem_prime_import_sg_table
* Drivers should use this in their &drm_driver.gem_prime_import_sg_table
* implementation.
*/
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_entries)
int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
int max_entries)
{
struct sg_dma_page_iter dma_iter;
struct sg_page_iter page_iter;
struct page **p = pages;
dma_addr_t *a = addrs;
if (pages) {
for_each_sgtable_page(sgt, &page_iter, 0) {
if (WARN_ON(p - pages >= max_entries))
return -1;
*p++ = sg_page_iter_page(&page_iter);
}
}
if (addrs) {
for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
if (WARN_ON(a - addrs >= max_entries))
return -1;
*a++ = sg_page_iter_dma_address(&dma_iter);
}
for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
if (WARN_ON(a - addrs >= max_entries))
return -1;
*a++ = sg_page_iter_dma_address(&dma_iter);
}
return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
/**
* drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
......
......@@ -135,8 +135,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
goto fail;
}
ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
NULL, npages);
ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages);
if (ret)
goto fail;
......
......@@ -260,7 +260,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return -ENOMEM;
}
drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
......
......@@ -1180,7 +1180,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
goto fail;
}
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
if (ret) {
mutex_unlock(&msm_obj->lock);
goto fail;
......
......@@ -1235,9 +1235,8 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
return 0;
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL,
ttm_dma->dma_address,
ttm->num_pages);
drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
ttm->num_pages);
return 0;
}
......
......@@ -1324,8 +1324,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
}
omap_obj->pages = pages;
ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
npages);
ret = drm_prime_sg_to_page_array(sgt, pages, npages);
if (ret) {
omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM);
......
......@@ -395,8 +395,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
if (r)
goto release_sg;
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL, gtt->ttm.dma_address,
ttm->num_pages);
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
ttm->num_pages);
return 0;
......@@ -574,9 +574,8 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
}
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL,
gtt->ttm.dma_address,
ttm->num_pages);
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
ttm->num_pages);
return 0;
}
......
......@@ -356,8 +356,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
}
obj->pages_pin_count++; /* perma-pinned */
drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
npages);
drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
return &obj->base;
}
......
......@@ -220,8 +220,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
xen_obj->sgt_imported = sgt;
ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
NULL, xen_obj->num_pages);
ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
xen_obj->num_pages);
if (ret < 0)
return ERR_PTR(ret);
......
......@@ -105,8 +105,9 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages);
int drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages,
int max_pages);
int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
int max_pages);
#endif /* __DRM_PRIME_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment