Commit 7dd737f3 authored by Chris Wilson's avatar Chris Wilson

drm/i915/dmabuf: Acquire the backing storage outside of struct_mutex

Use the per-object mm.lock to allocate the backing storage (and hold a
reference to it across the dmabuf access) without resorting to
struct_mutex.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-15-chris@chris-wilson.co.uk
parent 1233e2db
...@@ -44,19 +44,15 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme ...@@ -44,19 +44,15 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst; struct scatterlist *src, *dst;
int ret, i; int ret, i;
ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
goto err;
ret = i915_gem_object_pin_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
goto err_unlock; goto err;
/* Copy sg so that we make an independent mapping */ /* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) { if (st == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_unpin; goto err_unpin_pages;
} }
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
...@@ -72,21 +68,18 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme ...@@ -72,21 +68,18 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
} }
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
ret =-ENOMEM; ret = -ENOMEM;
goto err_free_sg; goto err_free_sg;
} }
mutex_unlock(&obj->base.dev->struct_mutex);
return st; return st;
err_free_sg: err_free_sg:
sg_free_table(st); sg_free_table(st);
err_free: err_free:
kfree(st); kfree(st);
err_unpin: err_unpin_pages:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
err_unlock:
mutex_unlock(&obj->base.dev->struct_mutex);
err: err:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -101,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, ...@@ -101,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
sg_free_table(sg); sg_free_table(sg);
kfree(sg); kfree(sg);
mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
mutex_unlock(&obj->base.dev->struct_mutex);
} }
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
void *addr;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ERR_PTR(ret);
addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
mutex_unlock(&dev->struct_mutex);
return addr; return i915_gem_object_pin_map(obj, I915_MAP_WB);
} }
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
mutex_unlock(&dev->struct_mutex);
} }
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
...@@ -177,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire ...@@ -177,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
int err;
ret = i915_mutex_lock_interruptible(dev); err = i915_gem_object_pin_pages(obj);
if (ret) if (err)
return ret; return err;
err = i915_mutex_lock_interruptible(dev);
if (err)
goto out;
ret = i915_gem_object_set_to_cpu_domain(obj, write); err = i915_gem_object_set_to_cpu_domain(obj, write);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret;
out:
i915_gem_object_unpin_pages(obj);
return err;
} }
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
int ret; int err;
ret = i915_mutex_lock_interruptible(dev); err = i915_gem_object_pin_pages(obj);
if (ret) if (err)
return ret; return err;
err = i915_mutex_lock_interruptible(dev);
if (err)
goto out;
ret = i915_gem_object_set_to_gtt_domain(obj, false); err = i915_gem_object_set_to_gtt_domain(obj, false);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; out:
i915_gem_object_unpin_pages(obj);
return err;
} }
static const struct dma_buf_ops i915_dmabuf_ops = { static const struct dma_buf_ops i915_dmabuf_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment