Commit 63540f01 authored by Jan Kara's avatar Jan Kara Committed by Mauro Carvalho Chehab

[media] drm/exynos: Convert g2d_userptr_get_dma_addr() to use get_vaddr_frames()

Convert g2d_userptr_get_dma_addr() to pin pages using get_vaddr_frames().
This removes the knowledge about vmas and mmap_sem locking from exynos
driver. Also it fixes a problem that the function has been mapping user
provided address without holding mmap_sem.
Acked-by: default avatarInki Dae <inki.dae@samsung.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarHans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@osg.samsung.com>
parent 6690c8c7
...@@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI ...@@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI
config DRM_EXYNOS_G2D config DRM_EXYNOS_G2D
bool "Exynos DRM G2D" bool "Exynos DRM G2D"
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
select FRAME_VECTOR
help help
Choose this option if you want to use Exynos G2D for DRM. Choose this option if you want to use Exynos G2D for DRM.
......
...@@ -190,10 +190,8 @@ struct g2d_cmdlist_userptr { ...@@ -190,10 +190,8 @@ struct g2d_cmdlist_userptr {
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned long userptr; unsigned long userptr;
unsigned long size; unsigned long size;
struct page **pages; struct frame_vector *vec;
unsigned int npages;
struct sg_table *sgt; struct sg_table *sgt;
struct vm_area_struct *vma;
atomic_t refcount; atomic_t refcount;
bool in_pool; bool in_pool;
bool out_of_list; bool out_of_list;
...@@ -363,6 +361,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, ...@@ -363,6 +361,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
{ {
struct g2d_cmdlist_userptr *g2d_userptr = struct g2d_cmdlist_userptr *g2d_userptr =
(struct g2d_cmdlist_userptr *)obj; (struct g2d_cmdlist_userptr *)obj;
struct page **pages;
if (!obj) if (!obj)
return; return;
...@@ -382,19 +381,21 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, ...@@ -382,19 +381,21 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
exynos_gem_put_pages_to_userptr(g2d_userptr->pages, pages = frame_vector_pages(g2d_userptr->vec);
g2d_userptr->npages, if (!IS_ERR(pages)) {
g2d_userptr->vma); int i;
exynos_gem_put_vma(g2d_userptr->vma); for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
set_page_dirty_lock(pages[i]);
}
put_vaddr_frames(g2d_userptr->vec);
frame_vector_destroy(g2d_userptr->vec);
if (!g2d_userptr->out_of_list) if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list); list_del_init(&g2d_userptr->list);
sg_free_table(g2d_userptr->sgt); sg_free_table(g2d_userptr->sgt);
kfree(g2d_userptr->sgt); kfree(g2d_userptr->sgt);
drm_free_large(g2d_userptr->pages);
kfree(g2d_userptr); kfree(g2d_userptr);
} }
...@@ -408,9 +409,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, ...@@ -408,9 +409,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr; struct g2d_cmdlist_userptr *g2d_userptr;
struct g2d_data *g2d; struct g2d_data *g2d;
struct page **pages;
struct sg_table *sgt; struct sg_table *sgt;
struct vm_area_struct *vma;
unsigned long start, end; unsigned long start, end;
unsigned int npages, offset; unsigned int npages, offset;
int ret; int ret;
...@@ -456,65 +455,40 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, ...@@ -456,65 +455,40 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
atomic_set(&g2d_userptr->refcount, 1); atomic_set(&g2d_userptr->refcount, 1);
g2d_userptr->size = size;
start = userptr & PAGE_MASK; start = userptr & PAGE_MASK;
offset = userptr & ~PAGE_MASK; offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size); end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT; npages = (end - start) >> PAGE_SHIFT;
g2d_userptr->npages = npages; g2d_userptr->vec = frame_vector_create(npages);
if (!g2d_userptr->vec) {
pages = drm_calloc_large(npages, sizeof(struct page *));
if (!pages) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_free; goto err_free;
} }
down_read(&current->mm->mmap_sem); ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
vma = find_vma(current->mm, userptr); if (ret != npages) {
if (!vma) { DRM_ERROR("failed to get user pages from userptr.\n");
up_read(&current->mm->mmap_sem); if (ret < 0)
DRM_ERROR("failed to get vm region.\n"); goto err_destroy_framevec;
ret = -EFAULT; ret = -EFAULT;
goto err_free_pages; goto err_put_framevec;
} }
if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
if (vma->vm_end < userptr + size) {
up_read(&current->mm->mmap_sem);
DRM_ERROR("vma is too small.\n");
ret = -EFAULT; ret = -EFAULT;
goto err_free_pages; goto err_put_framevec;
} }
g2d_userptr->vma = exynos_gem_get_vma(vma);
if (!g2d_userptr->vma) {
up_read(&current->mm->mmap_sem);
DRM_ERROR("failed to copy vma.\n");
ret = -ENOMEM;
goto err_free_pages;
}
g2d_userptr->size = size;
ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
npages, pages, vma);
if (ret < 0) {
up_read(&current->mm->mmap_sem);
DRM_ERROR("failed to get user pages from userptr.\n");
goto err_put_vma;
}
up_read(&current->mm->mmap_sem);
g2d_userptr->pages = pages;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) { if (!sgt) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_free_userptr; goto err_put_framevec;
} }
ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, ret = sg_alloc_table_from_pages(sgt,
size, GFP_KERNEL); frame_vector_pages(g2d_userptr->vec),
npages, offset, size, GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
DRM_ERROR("failed to get sgt from pages.\n"); DRM_ERROR("failed to get sgt from pages.\n");
goto err_free_sgt; goto err_free_sgt;
...@@ -549,16 +523,11 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, ...@@ -549,16 +523,11 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
err_free_sgt: err_free_sgt:
kfree(sgt); kfree(sgt);
err_free_userptr: err_put_framevec:
exynos_gem_put_pages_to_userptr(g2d_userptr->pages, put_vaddr_frames(g2d_userptr->vec);
g2d_userptr->npages,
g2d_userptr->vma);
err_put_vma:
exynos_gem_put_vma(g2d_userptr->vma);
err_free_pages: err_destroy_framevec:
drm_free_large(pages); frame_vector_destroy(g2d_userptr->vec);
err_free: err_free:
kfree(g2d_userptr); kfree(g2d_userptr);
......
...@@ -378,103 +378,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, ...@@ -378,103 +378,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *vma_copy;
vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
if (!vma_copy)
return NULL;
if (vma->vm_ops && vma->vm_ops->open)
vma->vm_ops->open(vma);
if (vma->vm_file)
get_file(vma->vm_file);
memcpy(vma_copy, vma, sizeof(*vma));
vma_copy->vm_mm = NULL;
vma_copy->vm_next = NULL;
vma_copy->vm_prev = NULL;
return vma_copy;
}
void exynos_gem_put_vma(struct vm_area_struct *vma)
{
if (!vma)
return;
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
fput(vma->vm_file);
kfree(vma);
}
int exynos_gem_get_pages_from_userptr(unsigned long start,
unsigned int npages,
struct page **pages,
struct vm_area_struct *vma)
{
int get_npages;
/* the memory region mmaped with VM_PFNMAP. */
if (vma_is_io(vma)) {
unsigned int i;
for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
unsigned long pfn;
int ret = follow_pfn(vma, start, &pfn);
if (ret)
return ret;
pages[i] = pfn_to_page(pfn);
}
if (i != npages) {
DRM_ERROR("failed to get user_pages.\n");
return -EINVAL;
}
return 0;
}
get_npages = get_user_pages(current, current->mm, start,
npages, 1, 1, pages, NULL);
get_npages = max(get_npages, 0);
if (get_npages != npages) {
DRM_ERROR("failed to get user_pages.\n");
while (get_npages)
put_page(pages[--get_npages]);
return -EFAULT;
}
return 0;
}
void exynos_gem_put_pages_to_userptr(struct page **pages,
unsigned int npages,
struct vm_area_struct *vma)
{
if (!vma_is_io(vma)) {
unsigned int i;
for (i = 0; i < npages; i++) {
set_page_dirty_lock(pages[i]);
/*
* undo the reference we took when populating
* the table.
*/
put_page(pages[i]);
}
}
}
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt, struct sg_table *sgt,
enum dma_data_direction dir) enum dma_data_direction dir)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment