Commit 5ba6c9ff authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/vgem: Fix mmaping

The vGEM mmap code has bitrotted slightly and now immediately BUGs.
Since vGEM was last updated, there are new core GEM facilities to
provide more common functions, so let's use those here.

v2: drm_gem_free_mmap_offset() is performed from
drm_gem_object_release() so we can remove the redundant call.

Testcase: igt/vgem_basic/mmap
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=96603Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Zach Reizner <zachr@google.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Tested-by: default avatarHumberto Israel Perez Rodriguez <humberto.i.perez.rodriguez@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1466692534-28303-1-git-send-email-chris@chris-wilson.co.uk
parent 2ae99588
...@@ -42,81 +42,38 @@ ...@@ -42,81 +42,38 @@
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 0 #define DRIVER_MINOR 0
void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
{
drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL;
}
static void vgem_gem_free_object(struct drm_gem_object *obj) static void vgem_gem_free_object(struct drm_gem_object *obj)
{ {
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
drm_gem_free_mmap_offset(obj);
if (vgem_obj->use_dma_buf && obj->dma_buf) {
dma_buf_put(obj->dma_buf);
obj->dma_buf = NULL;
}
drm_gem_object_release(obj); drm_gem_object_release(obj);
if (vgem_obj->pages)
vgem_gem_put_pages(vgem_obj);
vgem_obj->pages = NULL;
kfree(vgem_obj); kfree(vgem_obj);
} }
int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
{
struct page **pages;
if (obj->pages || obj->use_dma_buf)
return 0;
pages = drm_gem_get_pages(&obj->base);
if (IS_ERR(pages)) {
return PTR_ERR(pages);
}
obj->pages = pages;
return 0;
}
static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct drm_vgem_gem_object *obj = vma->vm_private_data; struct drm_vgem_gem_object *obj = vma->vm_private_data;
loff_t num_pages;
pgoff_t page_offset;
int ret;
/* We don't use vmf->pgoff since that has the fake offset */ /* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> unsigned long vaddr = (unsigned long)vmf->virtual_address;
PAGE_SHIFT; struct page *page;
num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
(vaddr - vma->vm_start) >> PAGE_SHIFT);
if (page_offset > num_pages) if (!IS_ERR(page)) {
return VM_FAULT_SIGBUS; vmf->page = page;
return 0;
ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, } else switch (PTR_ERR(page)) {
obj->pages[page_offset]); case -ENOSPC:
switch (ret) { case -ENOMEM:
case 0: return VM_FAULT_OOM;
return VM_FAULT_NOPAGE; case -EBUSY:
case -ENOMEM: return VM_FAULT_RETRY;
return VM_FAULT_OOM; case -EFAULT:
case -EBUSY: case -EINVAL:
return VM_FAULT_RETRY; return VM_FAULT_SIGBUS;
case -EFAULT: default:
case -EINVAL: WARN_ON_ONCE(PTR_ERR(page));
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
default:
WARN_ON(1);
return VM_FAULT_SIGBUS;
} }
} }
...@@ -134,57 +91,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, ...@@ -134,57 +91,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
unsigned long size) unsigned long size)
{ {
struct drm_vgem_gem_object *obj; struct drm_vgem_gem_object *obj;
struct drm_gem_object *gem_object; int ret;
int err;
size = roundup(size, PAGE_SIZE);
obj = kzalloc(sizeof(*obj), GFP_KERNEL); obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj) if (!obj)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
gem_object = &obj->base; ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
if (ret)
err = drm_gem_object_init(dev, gem_object, size); goto err_free;
if (err)
goto out;
err = vgem_gem_get_pages(obj);
if (err)
goto out;
err = drm_gem_handle_create(file, gem_object, handle);
if (err)
goto handle_out;
drm_gem_object_unreference_unlocked(gem_object); ret = drm_gem_handle_create(file, &obj->base, handle);
drm_gem_object_unreference_unlocked(&obj->base);
if (ret)
goto err;
return gem_object; return &obj->base;
handle_out: err_free:
drm_gem_object_release(gem_object);
out:
kfree(obj); kfree(obj);
return ERR_PTR(err); err:
return ERR_PTR(ret);
} }
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args) struct drm_mode_create_dumb *args)
{ {
struct drm_gem_object *gem_object; struct drm_gem_object *gem_object;
uint64_t size; u64 pitch, size;
uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
size = args->height * pitch; size = args->height * pitch;
if (size == 0) if (size == 0)
return -EINVAL; return -EINVAL;
gem_object = vgem_gem_create(dev, file, &args->handle, size); gem_object = vgem_gem_create(dev, file, &args->handle, size);
if (IS_ERR(gem_object))
if (IS_ERR(gem_object)) {
DRM_DEBUG_DRIVER("object creation failed\n");
return PTR_ERR(gem_object); return PTR_ERR(gem_object);
}
args->size = gem_object->size; args->size = gem_object->size;
args->pitch = pitch; args->pitch = pitch;
...@@ -194,26 +137,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, ...@@ -194,26 +137,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
return 0; return 0;
} }
int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset) uint32_t handle, uint64_t *offset)
{ {
int ret = 0;
struct drm_gem_object *obj; struct drm_gem_object *obj;
int ret;
obj = drm_gem_object_lookup(file, handle); obj = drm_gem_object_lookup(file, handle);
if (!obj) if (!obj)
return -ENOENT; return -ENOENT;
if (!obj->filp) {
ret = -EINVAL;
goto unref;
}
ret = drm_gem_create_mmap_offset(obj); ret = drm_gem_create_mmap_offset(obj);
if (ret) if (ret)
goto unref; goto unref;
BUG_ON(!obj->filp);
obj->filp->private_data = obj;
*offset = drm_vma_node_offset_addr(&obj->vma_node); *offset = drm_vma_node_offset_addr(&obj->vma_node);
unref: unref:
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(obj);
...@@ -223,10 +166,26 @@ int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, ...@@ -223,10 +166,26 @@ int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
static struct drm_ioctl_desc vgem_ioctls[] = { static struct drm_ioctl_desc vgem_ioctls[] = {
}; };
static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long flags = vma->vm_flags;
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
/* Keep the WC mmaping set by drm_gem_mmap() but our pages
* are ordinary and not special.
*/
vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
static const struct file_operations vgem_driver_fops = { static const struct file_operations vgem_driver_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = drm_open, .open = drm_open,
.mmap = drm_gem_mmap, .mmap = vgem_mmap,
.poll = drm_poll, .poll = drm_poll,
.read = drm_read, .read = drm_read,
.unlocked_ioctl = drm_ioctl, .unlocked_ioctl = drm_ioctl,
...@@ -248,7 +207,7 @@ static struct drm_driver vgem_driver = { ...@@ -248,7 +207,7 @@ static struct drm_driver vgem_driver = {
.minor = DRIVER_MINOR, .minor = DRIVER_MINOR,
}; };
struct drm_device *vgem_device; static struct drm_device *vgem_device;
static int __init vgem_init(void) static int __init vgem_init(void)
{ {
...@@ -261,7 +220,6 @@ static int __init vgem_init(void) ...@@ -261,7 +220,6 @@ static int __init vgem_init(void)
} }
ret = drm_dev_register(vgem_device, 0); ret = drm_dev_register(vgem_device, 0);
if (ret) if (ret)
goto out_unref; goto out_unref;
......
...@@ -35,12 +35,6 @@ ...@@ -35,12 +35,6 @@
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base) #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object { struct drm_vgem_gem_object {
struct drm_gem_object base; struct drm_gem_object base;
struct page **pages;
bool use_dma_buf;
}; };
/* vgem_drv.c */
extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment