Commit 18b862dc authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()

Drivers, especially i915.ko, can fail during the initial migration of a
dma-buf for CPU access. However, the error code from the driver was not
being propagated back to ioctl and so userspace was blissfully ignorant
of the failure. Rendering corruption ensues.

Whilst fixing the ioctl to return the error code from
dma_buf_start_cpu_access(), also do the same for
dma_buf_end_cpu_access().  For most drivers, dma_buf_end_cpu_access()
cannot fail. i915.ko however, as most drivers would, wants to avoid being
uninterruptible (as would be required to guarrantee no failure when
flushing the buffer to the device). As userspace already has to handle
errors from the SYNC_IOCTL, take advantage of this to be able to restart
the syscall across signals.

This fixes a coherency issue for i915.ko as well as reducing the
uninterruptible hold upon its BKL, the struct_mutex.

Fixes commit c11e391d
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date:   Thu Feb 11 20:04:51 2016 -0200

    dma-buf: Add ioctls to allow userspace to flush

Testcase: igt/gem_concurrent_blit/*dmabuf*interruptible
Testcase: igt/prime_mmap_coherency/ioctl-errors
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tiago Vignatti <tiago.vignatti@intel.com>
Cc: Stéphane Marchesin <marcheu@chromium.org>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Daniel Vetter <daniel.vetter@intel.com>
CC: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: intel-gfx@lists.freedesktop.org
Cc: devel@driverdev.osuosl.org
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1458331359-2634-1-git-send-email-chris@chris-wilson.co.uk
parent b47ff7e6
...@@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file, ...@@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file,
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
struct dma_buf_sync sync; struct dma_buf_sync sync;
enum dma_data_direction direction; enum dma_data_direction direction;
int ret;
dmabuf = file->private_data; dmabuf = file->private_data;
...@@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file, ...@@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file,
} }
if (sync.flags & DMA_BUF_SYNC_END) if (sync.flags & DMA_BUF_SYNC_END)
dma_buf_end_cpu_access(dmabuf, direction); ret = dma_buf_end_cpu_access(dmabuf, direction);
else else
dma_buf_begin_cpu_access(dmabuf, direction); ret = dma_buf_begin_cpu_access(dmabuf, direction);
return 0; return ret;
default: default:
return -ENOTTY; return -ENOTTY;
} }
...@@ -613,13 +614,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); ...@@ -613,13 +614,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
* *
* This call must always succeed. * This call must always succeed.
*/ */
void dma_buf_end_cpu_access(struct dma_buf *dmabuf, int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
int ret = 0;
WARN_ON(!dmabuf); WARN_ON(!dmabuf);
if (dmabuf->ops->end_cpu_access) if (dmabuf->ops->end_cpu_access)
dmabuf->ops->end_cpu_access(dmabuf, direction); ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
return ret;
} }
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
......
...@@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire ...@@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
return ret; return ret;
} }
static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
bool was_interruptible;
int ret; int ret;
mutex_lock(&dev->struct_mutex); ret = i915_mutex_lock_interruptible(dev);
was_interruptible = dev_priv->mm.interruptible; if (ret)
dev_priv->mm.interruptible = false; return ret;
ret = i915_gem_object_set_to_gtt_domain(obj, false); ret = i915_gem_object_set_to_gtt_domain(obj, false);
dev_priv->mm.interruptible = was_interruptible;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (unlikely(ret)) return ret;
DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
} }
static const struct dma_buf_ops i915_dmabuf_ops = { static const struct dma_buf_ops i915_dmabuf_ops = {
......
...@@ -93,11 +93,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, ...@@ -93,11 +93,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
return omap_gem_get_pages(obj, &pages, true); return omap_gem_get_pages(obj, &pages, true);
} }
static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
struct drm_gem_object *obj = buffer->priv; struct drm_gem_object *obj = buffer->priv;
omap_gem_put_pages(obj); omap_gem_put_pages(obj);
return 0;
} }
......
...@@ -423,7 +423,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, ...@@ -423,7 +423,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
} }
if (ufb->obj->base.import_attach) { if (ufb->obj->base.import_attach) {
dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
......
...@@ -1075,7 +1075,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, ...@@ -1075,7 +1075,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return PTR_ERR_OR_ZERO(vaddr); return PTR_ERR_OR_ZERO(vaddr);
} }
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
struct ion_buffer *buffer = dmabuf->priv; struct ion_buffer *buffer = dmabuf->priv;
...@@ -1083,6 +1083,8 @@ static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, ...@@ -1083,6 +1083,8 @@ static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock); mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer); ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock); mutex_unlock(&buffer->lock);
return 0;
} }
static struct dma_buf_ops dma_buf_ops = { static struct dma_buf_ops dma_buf_ops = {
......
...@@ -94,7 +94,7 @@ struct dma_buf_ops { ...@@ -94,7 +94,7 @@ struct dma_buf_ops {
void (*release)(struct dma_buf *); void (*release)(struct dma_buf *);
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long); void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long); void *(*kmap)(struct dma_buf *, unsigned long);
...@@ -224,7 +224,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, ...@@ -224,7 +224,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction); enum dma_data_direction);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir); enum dma_data_direction dir);
void dma_buf_end_cpu_access(struct dma_buf *dma_buf, int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir); enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment