Commit 3c0e3ab2 authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Luis Henriques

drm/vmwgfx: Fix up user_dmabuf refcounting

commit 54c12bc3 upstream.

If user space calls unreference on a user_dmabuf it will typically
kill the struct ttm_base_object member which is responsible for the
user-space visibility. However the dmabuf part may still be alive and
refcounted. In some situations, like for shared guest-backed surface
referencing/opening, the driver may try to reference the
struct ttm_base_object member again, causing an immediate kernel warning
and a later kernel NULL pointer dereference.

Fix this by always maintaining a reference on the struct
ttm_base_object member, in situations where it might subsequently be
referenced.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarBrian Paul <brianp@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
[ luis: backported to 3.16: adjusted context ]
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent c918c41d
...@@ -610,7 +610,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, ...@@ -610,7 +610,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size, uint32_t size,
bool shareable, bool shareable,
uint32_t *handle, uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf); struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf, struct vmw_dma_buffer *dma_buf,
uint32_t *handle); uint32_t *handle);
...@@ -624,7 +625,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, ...@@ -624,7 +625,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node); uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out); uint32_t id, struct vmw_dma_buffer **out,
struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
......
...@@ -873,7 +873,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -873,7 +873,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL; return -EINVAL;
...@@ -934,7 +935,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -934,7 +935,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL; return -EINVAL;
......
...@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
......
...@@ -355,7 +355,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, ...@@ -355,7 +355,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
} }
*out_surf = NULL; *out_surf = NULL;
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
return ret; return ret;
} }
...@@ -482,7 +482,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, ...@@ -482,7 +482,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size, uint32_t size,
bool shareable, bool shareable,
uint32_t *handle, uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf) struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base)
{ {
struct vmw_user_dma_buffer *user_bo; struct vmw_user_dma_buffer *user_bo;
struct ttm_buffer_object *tmp; struct ttm_buffer_object *tmp;
...@@ -516,6 +517,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, ...@@ -516,6 +517,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
} }
*p_dma_buf = &user_bo->dma; *p_dma_buf = &user_bo->dma;
if (p_base) {
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
*handle = user_bo->prime.base.hash.key; *handle = user_bo->prime.base.hash.key;
out_no_base_object: out_no_base_object:
...@@ -627,6 +632,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -627,6 +632,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct vmw_dma_buffer *dma_buf; struct vmw_dma_buffer *dma_buf;
struct vmw_user_dma_buffer *user_bo; struct vmw_user_dma_buffer *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_base_object *buffer_base;
int ret; int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
...@@ -639,7 +645,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -639,7 +645,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
switch (arg->op) { switch (arg->op) {
case drm_vmw_synccpu_grab: case drm_vmw_synccpu_grab:
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
&buffer_base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -647,6 +654,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -647,6 +654,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
dma); dma);
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
vmw_dmabuf_unreference(&dma_buf); vmw_dmabuf_unreference(&dma_buf);
ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS && if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) { ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
...@@ -688,7 +696,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, ...@@ -688,7 +696,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
return ret; return ret;
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &dma_buf); req->size, false, &handle, &dma_buf,
NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_dmabuf; goto out_no_dmabuf;
...@@ -717,7 +726,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, ...@@ -717,7 +726,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
} }
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out) uint32_t handle, struct vmw_dma_buffer **out,
struct ttm_base_object **p_base)
{ {
struct vmw_user_dma_buffer *vmw_user_bo; struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base; struct ttm_base_object *base;
...@@ -739,7 +749,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, ...@@ -739,7 +749,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base); prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base); (void)ttm_bo_reference(&vmw_user_bo->dma.base);
ttm_base_object_unref(&base); if (p_base)
*p_base = base;
else
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma; *out = &vmw_user_bo->dma;
return 0; return 0;
...@@ -1000,7 +1013,7 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -1000,7 +1013,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle, args->size, false, &args->handle,
&dma_buf); &dma_buf, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_dmabuf; goto out_no_dmabuf;
...@@ -1028,7 +1041,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, ...@@ -1028,7 +1041,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
struct vmw_dma_buffer *out_buf; struct vmw_dma_buffer *out_buf;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
if (ret != 0) if (ret != 0)
return -EINVAL; return -EINVAL;
......
...@@ -455,7 +455,7 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, ...@@ -455,7 +455,7 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
if (arg->buffer_handle != SVGA3D_INVALID_ID) { if (arg->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
&buffer); &buffer, NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader " DRM_ERROR("Could not find buffer for shader "
"creation.\n"); "creation.\n");
......
...@@ -43,6 +43,7 @@ struct vmw_user_surface { ...@@ -43,6 +43,7 @@ struct vmw_user_surface {
struct vmw_surface srf; struct vmw_surface srf;
uint32_t size; uint32_t size;
struct drm_master *master; struct drm_master *master;
struct ttm_base_object *backup_base;
}; };
/** /**
...@@ -652,6 +653,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) ...@@ -652,6 +653,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res; struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL; *p_base = NULL;
ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
} }
...@@ -846,7 +848,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -846,7 +848,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
res->backup_size, res->backup_size,
true, true,
&backup_handle, &backup_handle,
&res->backup); &res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
goto out_unlock; goto out_unlock;
...@@ -1309,7 +1312,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1309,7 +1312,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
if (req->buffer_handle != SVGA3D_INVALID_ID) { if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup); &res->backup,
&user_srf->backup_base);
} else if (req->drm_surface_flags & } else if (req->drm_surface_flags &
drm_vmw_surface_flag_create_buffer) drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile, ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
...@@ -1317,7 +1321,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1317,7 +1321,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
req->drm_surface_flags & req->drm_surface_flags &
drm_vmw_surface_flag_shareable, drm_vmw_surface_flag_shareable,
&backup_handle, &backup_handle,
&res->backup); &res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment