Commit b2295c24 authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: get rid of userptr worker

All code paths which populate userptr BOs are fine with the get_pages
function taking the mmap_sem lock. This allows to get rid of the pretty
involved architecture with a worker being scheduled if the mmap_sem
needs to be taken, but instead call GUP directly and allow it to take
the lock if necessary.

This simplifies the code a lot and removes the possibility of this
function returning -EAGAIN, which complicates object population
handling at the callers.

A notable change in behavior is that we don't allow a process to populate
objects with user pages from a foreign MM anymore. This would have been an
invalid use before, as it breaks the assumptions made in the etnaviv kernel
driver to enfore cache coherence. We now disallow this by rejecting the
request to populate those objects. Well behaving userspace is unaffected by
this change.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
parent 54f09288
...@@ -705,141 +705,41 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, ...@@ -705,141 +705,41 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
return 0; return 0;
} }
struct get_pages_work {
struct work_struct work;
struct mm_struct *mm;
struct task_struct *task;
struct etnaviv_gem_object *etnaviv_obj;
};
static struct page **etnaviv_gem_userptr_do_get_pages(
struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
{
int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct page **pvec;
uintptr_t ptr;
unsigned int flags = 0;
pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec)
return ERR_PTR(-ENOMEM);
if (!etnaviv_obj->userptr.ro)
flags |= FOLL_WRITE;
pinned = 0;
ptr = etnaviv_obj->userptr.ptr;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
flags, pvec + pinned, NULL, NULL);
if (ret < 0)
break;
ptr += ret * PAGE_SIZE;
pinned += ret;
}
up_read(&mm->mmap_sem);
if (ret < 0) {
release_pages(pvec, pinned);
kvfree(pvec);
return ERR_PTR(ret);
}
return pvec;
}
static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
struct page **pvec;
pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
mutex_lock(&etnaviv_obj->lock);
if (IS_ERR(pvec)) {
etnaviv_obj->userptr.work = ERR_CAST(pvec);
} else {
etnaviv_obj->userptr.work = NULL;
etnaviv_obj->pages = pvec;
}
mutex_unlock(&etnaviv_obj->lock);
drm_gem_object_put_unlocked(&etnaviv_obj->base);
mmput(work->mm);
put_task_struct(work->task);
kfree(work);
}
static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{ {
struct page **pvec = NULL; struct page **pvec = NULL;
struct get_pages_work *work; struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
struct mm_struct *mm; int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
might_lock_read(&current->mm->mmap_sem); might_lock_read(&current->mm->mmap_sem);
if (etnaviv_obj->userptr.work) { if (userptr->mm != current->mm)
if (IS_ERR(etnaviv_obj->userptr.work)) { return -EPERM;
ret = PTR_ERR(etnaviv_obj->userptr.work);
etnaviv_obj->userptr.work = NULL;
} else {
ret = -EAGAIN;
}
return ret;
}
mm = get_task_mm(etnaviv_obj->userptr.task);
pinned = 0;
if (mm == current->mm) {
pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec) { if (!pvec)
mmput(mm);
return -ENOMEM; return -ENOMEM;
}
pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages, do {
!etnaviv_obj->userptr.ro, pvec); unsigned num_pages = npages - pinned;
if (pinned < 0) { uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
kvfree(pvec); struct page **pages = pvec + pinned;
mmput(mm);
return pinned;
}
if (pinned == npages) {
etnaviv_obj->pages = pvec;
mmput(mm);
return 0;
}
}
ret = get_user_pages_fast(ptr, num_pages,
!userptr->ro ? FOLL_WRITE : 0, pages);
if (ret < 0) {
release_pages(pvec, pinned); release_pages(pvec, pinned);
kvfree(pvec); kvfree(pvec);
return ret;
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
mmput(mm);
return -ENOMEM;
} }
get_task_struct(current); pinned += ret;
drm_gem_object_get(&etnaviv_obj->base);
work->mm = mm;
work->task = current;
work->etnaviv_obj = etnaviv_obj;
etnaviv_obj->userptr.work = &work->work; } while (pinned < npages);
INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
etnaviv_queue_work(etnaviv_obj->base.dev, &work->work); etnaviv_obj->pages = pvec;
return -EAGAIN; return 0;
} }
static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
...@@ -855,7 +755,6 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) ...@@ -855,7 +755,6 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
release_pages(etnaviv_obj->pages, npages); release_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages); kvfree(etnaviv_obj->pages);
} }
put_task_struct(etnaviv_obj->userptr.task);
} }
static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
...@@ -885,9 +784,8 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, ...@@ -885,9 +784,8 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class); lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
etnaviv_obj->userptr.ptr = ptr; etnaviv_obj->userptr.ptr = ptr;
etnaviv_obj->userptr.task = current; etnaviv_obj->userptr.mm = current->mm;
etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE); etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
get_task_struct(current);
etnaviv_gem_obj_add(dev, &etnaviv_obj->base); etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
......
...@@ -26,8 +26,7 @@ struct etnaviv_gem_object; ...@@ -26,8 +26,7 @@ struct etnaviv_gem_object;
struct etnaviv_gem_userptr { struct etnaviv_gem_userptr {
uintptr_t ptr; uintptr_t ptr;
struct task_struct *task; struct mm_struct *mm;
struct work_struct *work;
bool ro; bool ro;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment