Commit cdd1cf79 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm: Make drm_read() more robust against multithreaded races

The current implementation of drm_read() faces a number of issues:

1. Upon an error, it consumes the event which may lead to the client
blocking.
2. Upon an error, it forgets about events already copied
3. If it fails to copy a single event with O_NONBLOCK it falls into a
infinite loop of reporting EAGAIN.
3. There is a race between multiple waiters and blocking reads of the
events list.

Here, we inline drm_dequeue_event() into drm_read() so that we can take
the spinlock around the list walking and event copying, and importantly
reorder the error handling to avoid the issues above.

Cc: Takashi Iwai <tiwai@suse.de>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTakashi Iwai <tiwai@suse.de>
Testcase: igt/drm_read
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 01934c2a
...@@ -478,64 +478,59 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -478,64 +478,59 @@ int drm_release(struct inode *inode, struct file *filp)
} }
EXPORT_SYMBOL(drm_release); EXPORT_SYMBOL(drm_release);
static bool ssize_t drm_read(struct file *filp, char __user *buffer,
drm_dequeue_event(struct drm_file *file_priv, size_t count, loff_t *offset)
size_t total, size_t max, struct drm_pending_event **out)
{ {
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev; struct drm_device *dev = file_priv->minor->dev;
struct drm_pending_event *e; ssize_t ret = 0;
unsigned long flags;
bool ret = false;
spin_lock_irqsave(&dev->event_lock, flags);
*out = NULL; if (!access_ok(VERIFY_WRITE, buffer, count))
if (list_empty(&file_priv->event_list)) return -EFAULT;
goto out;
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
if (e->event->length + total > max)
goto out;
file_priv->event_space += e->event->length; spin_lock_irq(&dev->event_lock);
list_del(&e->link); for (;;) {
*out = e; if (list_empty(&file_priv->event_list)) {
ret = true; if (ret)
break;
out: if (filp->f_flags & O_NONBLOCK) {
spin_unlock_irqrestore(&dev->event_lock, flags); ret = -EAGAIN;
return ret; break;
} }
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset)
{
struct drm_file *file_priv = filp->private_data;
struct drm_pending_event *e;
size_t total;
ssize_t ret;
if ((filp->f_flags & O_NONBLOCK) == 0) { spin_unlock_irq(&dev->event_lock);
ret = wait_event_interruptible(file_priv->event_wait, ret = wait_event_interruptible(file_priv->event_wait,
!list_empty(&file_priv->event_list)); !list_empty(&file_priv->event_list));
if (ret < 0) spin_lock_irq(&dev->event_lock);
return ret; if (ret < 0)
} break;
ret = 0;
} else {
struct drm_pending_event *e;
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
if (e->event->length + ret > count)
break;
if (__copy_to_user_inatomic(buffer + ret,
e->event, e->event->length)) {
if (ret == 0)
ret = -EFAULT;
break;
}
total = 0; file_priv->event_space += e->event->length;
while (drm_dequeue_event(file_priv, total, count, &e)) { ret += e->event->length;
if (copy_to_user(buffer + total, list_del(&e->link);
e->event, e->event->length)) {
total = -EFAULT;
e->destroy(e); e->destroy(e);
break;
} }
total += e->event->length;
e->destroy(e);
} }
spin_unlock_irq(&dev->event_lock);
return total ?: -EAGAIN; return ret;
} }
EXPORT_SYMBOL(drm_read); EXPORT_SYMBOL(drm_read);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment