Commit f230ffa1 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'topic/struct_mutex-2016-04-21' of git://anongit.freedesktop.org/drm-intel into drm-next

struct_mutex cleanups and error paths fixes. Unfortunately I didn't manage
to get acks from everyone, but this stuff has been hanging out for months
now and imo simple enough to just land the remaining few patches. But
separate pull request so that you can take a look yourself.

* tag 'topic/struct_mutex-2016-04-21' of git://anongit.freedesktop.org/drm-intel:
  drm/vma_manage: Drop has_offset
  drm/vgem: Drop dev->struct_mutex
  drm/vgem: Move get_pages to gem_create
  drm/vgem: Simplify dumb_map
  drm/exynos: drop struct_mutex from fbdev setup
  drm/exynos: drop struct_mutex from exynos_drm_gem_get_ioctl
  drm/exynos: drop struct_mutex from exynos_gem_map_sgt_with_dma
  drm/exynos: Drop dev->struct_mutex from mmap offset function
  drm/nouveau: Drop dev->struct_mutex from fbdev init
  drm/qxl: Use unlocked gem unreferencing
  drm/omapdrm: Use unlocked gem unreferencing
  drm/nouveau: Use unlocked gem unreferencing
parents f9fd2ada f74418a4
......@@ -422,6 +422,10 @@ EXPORT_SYMBOL(drm_gem_handle_create);
* @obj: obj in question
*
* This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
*
* Note that drm_gem_object_release() already calls this function, so drivers
* don't have to take care of releasing the mmap offset themselves when freeing
* the GEM object.
*/
void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
......@@ -445,6 +449,9 @@ EXPORT_SYMBOL(drm_gem_free_mmap_offset);
* This routine allocates and attaches a fake offset for @obj, in cases where
* the virtual size differs from the physical size (ie. obj->size). Otherwise
* just use drm_gem_create_mmap_offset().
*
* This function is idempotent and handles an already allocated mmap offset
* transparently. Drivers do not need to check for this case.
*/
int
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
......@@ -466,6 +473,9 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
* structures.
*
* This routine allocates and attaches a fake offset for @obj.
*
* Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
* the fake offset again.
*/
int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
......@@ -759,6 +769,13 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
idr_destroy(&file_private->object_idr);
}
/**
* drm_gem_object_release - release GEM buffer object resources
* @obj: GEM buffer object
*
* This releases any structures and resources used by @obj and is the invers of
* drm_gem_object_init().
*/
void
drm_gem_object_release(struct drm_gem_object *obj)
{
......
......@@ -138,8 +138,6 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
mutex_lock(&dev->struct_mutex);
size = mode_cmd.pitches[0] * mode_cmd.height;
exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
......@@ -154,10 +152,8 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
size);
}
if (IS_ERR(exynos_gem)) {
ret = PTR_ERR(exynos_gem);
goto out;
}
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
exynos_fbdev->exynos_gem = exynos_gem;
......@@ -173,7 +169,6 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (ret < 0)
goto err_destroy_framebuffer;
mutex_unlock(&dev->struct_mutex);
return ret;
err_destroy_framebuffer:
......@@ -181,13 +176,12 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
err_destroy_gem:
exynos_drm_gem_destroy(exynos_gem);
/*
* if failed, all resources allocated above would be released by
* drm_mode_config_cleanup() when drm_load() had been called prior
* to any specific driver such as fimd or hdmi driver.
*/
out:
mutex_unlock(&dev->struct_mutex);
/*
* if failed, all resources allocated above would be released by
* drm_mode_config_cleanup() when drm_load() had been called prior
* to any specific driver such as fimd or hdmi driver.
*/
return ret;
}
......
......@@ -362,12 +362,9 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_exynos_gem_info *args = data;
struct drm_gem_object *obj;
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
......@@ -376,8 +373,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
args->flags = exynos_gem->flags;
args->size = exynos_gem->size;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(obj);
return 0;
}
......@@ -388,16 +384,12 @@ int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
{
int nents;
mutex_lock(&drm_dev->struct_mutex);
nents = dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
if (!nents) {
DRM_ERROR("failed to map sgl with dma.\n");
mutex_unlock(&drm_dev->struct_mutex);
return nents;
}
mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
......@@ -458,8 +450,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_gem_object *obj;
int ret = 0;
mutex_lock(&dev->struct_mutex);
/*
* get offset of memory allocated for drm framebuffer.
* - this callback would be called by user application
......@@ -469,16 +459,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
ret = -EINVAL;
goto unlock;
return -EINVAL;
}
*offset = drm_vma_node_offset_addr(&obj->vma_node);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
drm_gem_object_unreference(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
......
......@@ -2031,9 +2031,6 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
if (drm_vma_node_has_offset(&obj->base.vma_node))
return 0;
dev_priv->mm.shrinker_no_lock_stealing = true;
ret = drm_gem_create_mmap_offset(&obj->base);
......
......@@ -296,7 +296,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
err:
kfree(nouveau_fb);
err_unref:
drm_gem_object_unreference(gem);
drm_gem_object_unreference_unlocked(gem);
return ERR_PTR(ret);
}
......
......@@ -386,8 +386,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
}
}
mutex_lock(&dev->struct_mutex);
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
......@@ -426,8 +424,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
mutex_unlock(&dev->struct_mutex);
if (chan)
nouveau_fbcon_accel_init(dev);
nouveau_fbcon_zfill(dev, fbcon);
......@@ -441,7 +437,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
return 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
if (chan)
nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
nouveau_bo_unmap(nvbo);
......
......@@ -153,7 +153,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_unreference(fbdev->bo);
drm_gem_object_unreference_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
......
......@@ -443,11 +443,11 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
}
}
if (fb && ret) {
drm_gem_object_unreference(gobj);
drm_gem_object_unreference_unlocked(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
drm_gem_object_unreference(gobj);
drm_gem_object_unreference_unlocked(gobj);
return ret;
}
......
......@@ -89,7 +89,6 @@ int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_vgem_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->base.dev;
loff_t num_pages;
pgoff_t page_offset;
int ret;
......@@ -103,12 +102,8 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
mutex_lock(&dev->struct_mutex);
ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
obj->pages[page_offset]);
mutex_unlock(&dev->struct_mutex);
switch (ret) {
case 0:
return VM_FAULT_NOPAGE;
......@@ -154,6 +149,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
if (err)
goto out;
err = vgem_gem_get_pages(obj);
if (err)
goto out;
err = drm_gem_handle_create(file, gem_object, handle);
if (err)
goto handle_out;
......@@ -201,37 +200,23 @@ int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
int ret = 0;
struct drm_gem_object *obj;
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
if (!obj) {
ret = -ENOENT;
goto unlock;
}
if (!obj)
return -ENOENT;
if (!drm_vma_node_has_offset(&obj->vma_node)) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto unref;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto unref;
BUG_ON(!obj->filp);
obj->filp->private_data = obj;
ret = vgem_gem_get_pages(to_vgem_bo(obj));
if (ret)
goto fail_get_pages;
*offset = drm_vma_node_offset_addr(&obj->vma_node);
goto unref;
fail_get_pages:
drm_gem_free_mmap_offset(obj);
unref:
drm_gem_object_unreference(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
......
......@@ -175,19 +175,6 @@ static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
return node->vm_node.size;
}
/**
* drm_vma_node_has_offset() - Check whether node is added to offset manager
* @node: Node to be checked
*
* RETURNS:
* true iff the node was previously allocated an offset and added to
* an vma offset manager.
*/
static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
{
return drm_mm_node_allocated(&node->vm_node);
}
/**
* drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
* @node: Linked offset node
......@@ -220,7 +207,7 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
struct address_space *file_mapping)
{
if (drm_vma_node_has_offset(node))
if (drm_mm_node_allocated(&node->vm_node))
unmap_mapping_range(file_mapping,
drm_vma_node_offset_addr(node),
drm_vma_node_size(node) << PAGE_SHIFT, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment