Commit 9c504cad authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify

* 'for-linus' of git://git.infradead.org/users/eparis/notify:
  inotify: Ensure we alwasy write the terminating NULL.
  inotify: fix locking around inotify watching in the idr
  inotify: do not BUG on idr entries at inotify destruction
  inotify: seperate new watch creation updating existing watches
parents 4f8ee2c9 0db501bd
...@@ -105,16 +105,45 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode ...@@ -105,16 +105,45 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
return send; return send;
} }
/*
* This is NEVER supposed to be called. Inotify marks should either have been
* removed from the idr when the watch was removed or in the
* fsnotify_destroy_mark_by_group() call when the inotify instance was being
* torn down. This is only called if the idr is about to be freed but there
* are still marks in it.
*/
static int idr_callback(int id, void *p, void *data) static int idr_callback(int id, void *p, void *data)
{ {
BUG(); struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *ientry;
static bool warned = false;
if (warned)
return 0;
warned = false;
entry = p;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in "
"idr. Probably leaking memory\n", id, p, data);
/*
* I'm taking the liberty of assuming that the mark in question is a
* valid address and I'm dereferencing it. This might help to figure
* out why we got here and the panic is no worse than the original
* BUG() that was here.
*/
if (entry)
printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n",
entry->group, entry->inode, ientry->wd);
return 0; return 0;
} }
static void inotify_free_group_priv(struct fsnotify_group *group) static void inotify_free_group_priv(struct fsnotify_group *group)
{ {
/* ideally the idr is empty and we won't hit the BUG in teh callback */ /* ideally the idr is empty and we won't hit the BUG in teh callback */
idr_for_each(&group->inotify_data.idr, idr_callback, NULL); idr_for_each(&group->inotify_data.idr, idr_callback, group);
idr_remove_all(&group->inotify_data.idr); idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr); idr_destroy(&group->inotify_data.idr);
} }
......
...@@ -47,9 +47,6 @@ ...@@ -47,9 +47,6 @@
static struct vfsmount *inotify_mnt __read_mostly; static struct vfsmount *inotify_mnt __read_mostly;
/* this just sits here and wastes global memory. used to just pad userspace messages with zeros */
static struct inotify_event nul_inotify_event;
/* these are configurable via /proc/sys/fs/inotify/ */ /* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly; static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly; static int inotify_max_queued_events __read_mostly;
...@@ -199,8 +196,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, ...@@ -199,8 +196,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
inotify_free_event_priv(fsn_priv); inotify_free_event_priv(fsn_priv);
} }
/* round up event->name_len so it is a multiple of event_size */ /* round up event->name_len so it is a multiple of event_size
name_len = roundup(event->name_len, event_size); * plus an extra byte for the terminating '\0'.
*/
name_len = roundup(event->name_len + 1, event_size);
inotify_event.len = name_len; inotify_event.len = name_len;
inotify_event.mask = inotify_mask_to_arg(event->mask); inotify_event.mask = inotify_mask_to_arg(event->mask);
...@@ -224,8 +223,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, ...@@ -224,8 +223,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
return -EFAULT; return -EFAULT;
buf += event->name_len; buf += event->name_len;
/* fill userspace with 0's from nul_inotify_event */ /* fill userspace with 0's */
if (copy_to_user(buf, &nul_inotify_event, len_to_zero)) if (clear_user(buf, len_to_zero))
return -EFAULT; return -EFAULT;
buf += len_to_zero; buf += len_to_zero;
event_size += name_len; event_size += name_len;
...@@ -364,20 +363,53 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns ...@@ -364,20 +363,53 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error; return error;
} }
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static void inotify_remove_from_idr(struct fsnotify_group *group, static void inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark_entry *ientry) struct inotify_inode_mark_entry *ientry)
{ {
struct idr *idr; struct idr *idr;
struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *found_ientry;
int wd;
spin_lock(&group->inotify_data.idr_lock); spin_lock(&group->inotify_data.idr_lock);
idr = &group->inotify_data.idr; idr = &group->inotify_data.idr;
idr_remove(idr, ientry->wd); wd = ientry->wd;
spin_unlock(&group->inotify_data.idr_lock);
if (wd == -1)
goto out;
entry = idr_find(&group->inotify_data.idr, wd);
if (unlikely(!entry))
goto out;
found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
if (unlikely(found_ientry != ientry)) {
/* We found an entry in the idr with the right wd, but it's
* not the entry we were told to remove. eparis seriously
* fucked up somewhere. */
WARN_ON(1);
ientry->wd = -1; ientry->wd = -1;
goto out;
}
/* One ref for being in the idr, one ref held by the caller */
BUG_ON(atomic_read(&entry->refcnt) < 2);
idr_remove(idr, wd);
ientry->wd = -1;
/* removed from the idr, drop that ref */
fsnotify_put_mark(entry);
out:
spin_unlock(&group->inotify_data.idr_lock);
} }
/* /*
* Send IN_IGNORED for this wd, remove this wd from the idr, and drop the * Send IN_IGNORED for this wd, remove this wd from the idr.
* internal reference help on the mark because it is in the idr.
*/ */
void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
struct fsnotify_group *group) struct fsnotify_group *group)
...@@ -417,9 +449,6 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, ...@@ -417,9 +449,6 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
/* remove this entry from the idr */ /* remove this entry from the idr */
inotify_remove_from_idr(group, ientry); inotify_remove_from_idr(group, ientry);
/* removed from idr, drop that reference */
fsnotify_put_mark(entry);
atomic_dec(&group->inotify_data.user->inotify_watches); atomic_dec(&group->inotify_data.user->inotify_watches);
} }
...@@ -431,15 +460,76 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry) ...@@ -431,15 +460,76 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry)
kmem_cache_free(inotify_inode_mark_cachep, ientry); kmem_cache_free(inotify_inode_mark_cachep, ientry);
} }
static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{ {
struct fsnotify_mark_entry *entry = NULL; struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *ientry; struct inotify_inode_mark_entry *ientry;
struct inotify_inode_mark_entry *tmp_ientry; __u32 old_mask, new_mask;
int ret = 0; __u32 mask;
int add = (arg & IN_MASK_ADD); int add = (arg & IN_MASK_ADD);
int ret;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!mask))
return -EINVAL;
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (!entry)
return -ENOENT;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
spin_lock(&entry->lock);
old_mask = entry->mask;
if (add) {
entry->mask |= mask;
new_mask = entry->mask;
} else {
entry->mask = mask;
new_mask = entry->mask;
}
spin_unlock(&entry->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this entry than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* more bits in this entry than the group? */
int do_group = (new_mask & ~group->mask);
/* update the inode with this new entry */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
/* update the group mask with the new mask */
if (dropped || do_group)
fsnotify_recalc_group_mask(group);
}
/* return the wd */
ret = ientry->wd;
/* match the get from fsnotify_find_mark_entry() */
fsnotify_put_mark(entry);
return ret;
}
static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct inotify_inode_mark_entry *tmp_ientry;
__u32 mask; __u32 mask;
__u32 old_mask, new_mask; int ret;
/* don't allow invalid bits: we don't want flags set */ /* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg); mask = inotify_arg_to_mask(arg);
...@@ -449,17 +539,11 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod ...@@ -449,17 +539,11 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
if (unlikely(!tmp_ientry)) if (unlikely(!tmp_ientry))
return -ENOMEM; return -ENOMEM;
/* we set the mask at the end after attaching it */
fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
tmp_ientry->fsn_entry.mask = mask;
tmp_ientry->wd = -1; tmp_ientry->wd = -1;
find_entry:
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (entry) {
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
} else {
ret = -ENOSPC; ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err; goto out_err;
...@@ -474,80 +558,59 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod ...@@ -474,80 +558,59 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
&tmp_ientry->wd); &tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock); spin_unlock(&group->inotify_data.idr_lock);
if (ret) { if (ret) {
/* idr was out of memory allocate and try again */
if (ret == -EAGAIN) if (ret == -EAGAIN)
goto retry; goto retry;
goto out_err; goto out_err;
} }
/* we put the mark on the idr, take a reference */
fsnotify_get_mark(&tmp_ientry->fsn_entry);
/* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
if (ret) { if (ret) {
/* we failed to get on the inode, get off the idr */
inotify_remove_from_idr(group, tmp_ientry); inotify_remove_from_idr(group, tmp_ientry);
if (ret == -EEXIST)
goto find_entry;
goto out_err; goto out_err;
} }
/* tmp_ientry has been added to the inode, so we are all set up. /* update the idr hint, who cares about races, it's just a hint */
* now we just need to make sure tmp_ientry doesn't get freed and group->inotify_data.last_wd = tmp_ientry->wd;
* we need to set up entry and ientry so the generic code can
* do its thing. */
ientry = tmp_ientry;
entry = &ientry->fsn_entry;
tmp_ientry = NULL;
/* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches); atomic_inc(&group->inotify_data.user->inotify_watches);
/* update the idr hint */ /* return the watch descriptor for this new entry */
group->inotify_data.last_wd = ientry->wd; ret = tmp_ientry->wd;
/* we put the mark on the idr, take a reference */
fsnotify_get_mark(entry);
}
ret = ientry->wd;
spin_lock(&entry->lock);
old_mask = entry->mask;
if (add) {
entry->mask |= mask;
new_mask = entry->mask;
} else {
entry->mask = mask;
new_mask = entry->mask;
}
spin_unlock(&entry->lock);
if (old_mask != new_mask) { /* match the ref from fsnotify_init_markentry() */
/* more bits in old than in new? */ fsnotify_put_mark(&tmp_ientry->fsn_entry);
int dropped = (old_mask & ~new_mask);
/* more bits in this entry than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* more bits in this entry than the group? */
int do_group = (new_mask & ~group->mask);
/* update the inode with this new entry */ out_err:
if (dropped || do_inode) if (ret < 0)
fsnotify_recalc_inode_mask(inode); kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
/* update the group mask with the new mask */ return ret;
if (dropped || do_group) }
fsnotify_recalc_group_mask(group);
}
/* this either matches fsnotify_find_mark_entry, or init_mark_entry static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
* depending on which path we took... */ {
fsnotify_put_mark(entry); int ret = 0;
out_err: retry:
/* could be an error, could be that we found an existing mark */ /* try to update and existing watch with the new arg */
if (tmp_ientry) { ret = inotify_update_existing_watch(group, inode, arg);
/* on the idr but didn't make it on the inode */ /* no mark present, try to add a new one */
if (tmp_ientry->wd != -1) if (ret == -ENOENT)
inotify_remove_from_idr(group, tmp_ientry); ret = inotify_new_watch(group, inode, arg);
kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); /*
} * inotify_new_watch could race with another thread which did an
* inotify_new_watch between the update_existing and the add watch
* here, go back and try to update an existing mark again.
*/
if (ret == -EEXIST)
goto retry;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment