Commit 3632dee2 authored by Vegard Nossum's avatar Vegard Nossum Committed by Linus Torvalds

inotify: clean up inotify_read and fix locking problems

If userspace supplies an invalid pointer to a read() of an inotify
instance, the inotify device's event list mutex is unlocked twice.
This causes an unbalance which effectively leaves the data structure
unprotected, and we can trigger oopses by accessing the inotify
instance from different tasks concurrently.

The best fix (contributed largely by Linus) is a total rewrite
of the function in question:

On Thu, Jan 22, 2009 at 7:05 AM, Linus Torvalds wrote:
> The thing to notice is that:
>
>  - locking is done in just one place, and there is no question about it
>   not having an unlock.
>
>  - that whole double-while(1)-loop thing is gone.
>
>  - use multiple functions to make nesting and error handling sane
>
>  - do error testing after doing the things you always need to do, ie do
>   this:
>
>        mutex_lock(..)
>        ret = function_call();
>        mutex_unlock(..)
>
>        .. test ret here ..
>
>   instead of doing conditional exits with unlocking or freeing.
>
> So if the code is written in this way, it may still be buggy, but at least
> it's not buggy because of subtle "forgot to unlock" or "forgot to free"
> issues.
>
> This _always_ unlocks if it locked, and it always frees if it got a
> non-error kevent.

Cc: John McCutchan <ttb@tentacle.dhs.org>
Cc: Robert Love <rlove@google.com>
Cc: <stable@kernel.org>
Signed-off-by: default avatarVegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent aeb565df
...@@ -427,10 +427,61 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) ...@@ -427,10 +427,61 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
return ret; return ret;
} }
/*
* Get an inotify_kernel_event if one exists and is small
* enough to fit in "count". Return an error pointer if
* not large enough.
*
* Called with the device ev_mutex held.
*/
static struct inotify_kernel_event *get_one_event(struct inotify_device *dev,
size_t count)
{
size_t event_size = sizeof(struct inotify_event);
struct inotify_kernel_event *kevent;
if (list_empty(&dev->events))
return NULL;
kevent = inotify_dev_get_event(dev);
if (kevent->name)
event_size += kevent->event.len;
if (event_size > count)
return ERR_PTR(-EINVAL);
remove_kevent(dev, kevent);
return kevent;
}
/*
* Copy an event to user space, returning how much we copied.
*
* We already checked that the event size is smaller than the
* buffer we had in "get_one_event()" above.
*/
static ssize_t copy_event_to_user(struct inotify_kernel_event *kevent,
char __user *buf)
{
size_t event_size = sizeof(struct inotify_event);
if (copy_to_user(buf, &kevent->event, event_size))
return -EFAULT;
if (kevent->name) {
buf += event_size;
if (copy_to_user(buf, kevent->name, kevent->event.len))
return -EFAULT;
event_size += kevent->event.len;
}
return event_size;
}
static ssize_t inotify_read(struct file *file, char __user *buf, static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos) size_t count, loff_t *pos)
{ {
size_t event_size = sizeof (struct inotify_event);
struct inotify_device *dev; struct inotify_device *dev;
char __user *start; char __user *start;
int ret; int ret;
...@@ -440,81 +491,43 @@ static ssize_t inotify_read(struct file *file, char __user *buf, ...@@ -440,81 +491,43 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
dev = file->private_data; dev = file->private_data;
while (1) { while (1) {
struct inotify_kernel_event *kevent;
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&dev->ev_mutex); mutex_lock(&dev->ev_mutex);
if (!list_empty(&dev->events)) { kevent = get_one_event(dev, count);
ret = 0;
break;
}
mutex_unlock(&dev->ev_mutex); mutex_unlock(&dev->ev_mutex);
if (file->f_flags & O_NONBLOCK) { if (kevent) {
ret = -EAGAIN; ret = PTR_ERR(kevent);
break; if (IS_ERR(kevent))
} break;
ret = copy_event_to_user(kevent, buf);
if (signal_pending(current)) { free_kevent(kevent);
ret = -EINTR; if (ret < 0)
break; break;
buf += ret;
count -= ret;
continue;
} }
schedule(); ret = -EAGAIN;
} if (file->f_flags & O_NONBLOCK)
finish_wait(&dev->wq, &wait);
if (ret)
return ret;
while (1) {
struct inotify_kernel_event *kevent;
ret = buf - start;
if (list_empty(&dev->events))
break; break;
ret = -EINTR;
kevent = inotify_dev_get_event(dev); if (signal_pending(current))
if (event_size + kevent->event.len > count) {
if (ret == 0 && count > 0) {
/*
* could not get a single event because we
* didn't have enough buffer space.
*/
ret = -EINVAL;
}
break; break;
}
remove_kevent(dev, kevent);
/* if (start != buf)
* Must perform the copy_to_user outside the mutex in order
* to avoid a lock order reversal with mmap_sem.
*/
mutex_unlock(&dev->ev_mutex);
if (copy_to_user(buf, &kevent->event, event_size)) {
ret = -EFAULT;
break; break;
}
buf += event_size;
count -= event_size;
if (kevent->name) {
if (copy_to_user(buf, kevent->name, kevent->event.len)){
ret = -EFAULT;
break;
}
buf += kevent->event.len;
count -= kevent->event.len;
}
free_kevent(kevent);
mutex_lock(&dev->ev_mutex); schedule();
} }
mutex_unlock(&dev->ev_mutex);
finish_wait(&dev->wq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment