Commit 96680d2b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-next' of git://git.infradead.org/users/eparis/notify

Pull filesystem notification updates from Eric Paris:
 "This pull mostly is about locking changes in the fsnotify system.  By
  switching the group lock from a spin_lock() to a mutex() we can now
  hold the lock across things like iput().  This fixes a problem
  involving unmounting a fs and having inodes be busy, first pointed out
  by FAT, but reproducible with tmpfs.

  This also restores signal driven I/O for inotify, which has been
  broken since about 2.6.32."

Ugh.  I *hate* the timing of this.  It was rebased after the merge
window opened, and then left to sit with the pull request coming the day
before the merge window closes.  That's just crap.  But apparently the
patches themselves have been around for over a year, just gathering
dust, so now it's suddenly critical.

Fixed up semantic conflict in fs/notify/fdinfo.c as per Stephen
Rothwell's fixes from -next.

* 'for-next' of git://git.infradead.org/users/eparis/notify:
  inotify: automatically restart syscalls
  inotify: dont skip removal of watch descriptor if creation of ignored event failed
  fanotify: dont merge permission events
  fsnotify: make fasync generic for both inotify and fanotify
  fsnotify: change locking order
  fsnotify: dont put marks on temporary list when clearing marks by group
  fsnotify: introduce locked versions of fsnotify_add_mark() and fsnotify_remove_mark()
  fsnotify: pass group to fsnotify_destroy_mark()
  fsnotify: use a mutex instead of a spinlock to protect a groups mark list
  fanotify: add an extra flag to mark_remove_from_mask that indicates wheather a mark should be destroyed
  fsnotify: take groups mark_lock before mark lock
  fsnotify: use reference counting for groups
  fsnotify: introduce fsnotify_get_group()
  inotify, fanotify: replace fsnotify_put_group() with fsnotify_destroy_group()
parents 4c9a44ae 1ca39ab9
...@@ -201,7 +201,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id) ...@@ -201,7 +201,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
/* nothing else could have found us thanks to the dnotify_mark_mutex */ /* nothing else could have found us thanks to the dnotify_mark_mutex */
if (dn_mark->dn == NULL) if (dn_mark->dn == NULL)
fsnotify_destroy_mark(fsn_mark); fsnotify_destroy_mark(fsn_mark, dnotify_group);
mutex_unlock(&dnotify_mark_mutex); mutex_unlock(&dnotify_mark_mutex);
...@@ -385,7 +385,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) ...@@ -385,7 +385,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
spin_unlock(&fsn_mark->lock); spin_unlock(&fsn_mark->lock);
if (destroy) if (destroy)
fsnotify_destroy_mark(fsn_mark); fsnotify_destroy_mark(fsn_mark, dnotify_group);
mutex_unlock(&dnotify_mark_mutex); mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(fsn_mark); fsnotify_put_mark(fsn_mark);
......
...@@ -18,6 +18,12 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new) ...@@ -18,6 +18,12 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
old->tgid == new->tgid) { old->tgid == new->tgid) {
switch (old->data_type) { switch (old->data_type) {
case (FSNOTIFY_EVENT_PATH): case (FSNOTIFY_EVENT_PATH):
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
/* dont merge two permission events */
if ((old->mask & FAN_ALL_PERM_EVENTS) &&
(new->mask & FAN_ALL_PERM_EVENTS))
return false;
#endif
if ((old->path.mnt == new->path.mnt) && if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry)) (old->path.dentry == new->path.dentry))
return true; return true;
......
...@@ -397,8 +397,12 @@ static int fanotify_release(struct inode *ignored, struct file *file) ...@@ -397,8 +397,12 @@ static int fanotify_release(struct inode *ignored, struct file *file)
wake_up(&group->fanotify_data.access_waitq); wake_up(&group->fanotify_data.access_waitq);
#endif #endif
if (file->f_flags & FASYNC)
fsnotify_fasync(-1, file, 0);
/* matches the fanotify_init->fsnotify_alloc_group */ /* matches the fanotify_init->fsnotify_alloc_group */
fsnotify_put_group(group); fsnotify_destroy_group(group);
return 0; return 0;
} }
...@@ -493,7 +497,8 @@ static int fanotify_find_path(int dfd, const char __user *filename, ...@@ -493,7 +497,8 @@ static int fanotify_find_path(int dfd, const char __user *filename,
static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
__u32 mask, __u32 mask,
unsigned int flags) unsigned int flags,
int *destroy)
{ {
__u32 oldmask; __u32 oldmask;
...@@ -507,8 +512,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, ...@@ -507,8 +512,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
} }
spin_unlock(&fsn_mark->lock); spin_unlock(&fsn_mark->lock);
if (!(oldmask & ~mask)) *destroy = !(oldmask & ~mask);
fsnotify_destroy_mark(fsn_mark);
return mask & oldmask; return mask & oldmask;
} }
...@@ -519,12 +523,17 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, ...@@ -519,12 +523,17 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
{ {
struct fsnotify_mark *fsn_mark = NULL; struct fsnotify_mark *fsn_mark = NULL;
__u32 removed; __u32 removed;
int destroy_mark;
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
if (!fsn_mark) if (!fsn_mark)
return -ENOENT; return -ENOENT;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
&destroy_mark);
if (destroy_mark)
fsnotify_destroy_mark(fsn_mark, group);
fsnotify_put_mark(fsn_mark); fsnotify_put_mark(fsn_mark);
if (removed & real_mount(mnt)->mnt_fsnotify_mask) if (removed & real_mount(mnt)->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt); fsnotify_recalc_vfsmount_mask(mnt);
...@@ -538,12 +547,16 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group, ...@@ -538,12 +547,16 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group,
{ {
struct fsnotify_mark *fsn_mark = NULL; struct fsnotify_mark *fsn_mark = NULL;
__u32 removed; __u32 removed;
int destroy_mark;
fsn_mark = fsnotify_find_inode_mark(group, inode); fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark) if (!fsn_mark)
return -ENOENT; return -ENOENT;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
&destroy_mark);
if (destroy_mark)
fsnotify_destroy_mark(fsn_mark, group);
/* matches the fsnotify_find_inode_mark() */ /* matches the fsnotify_find_inode_mark() */
fsnotify_put_mark(fsn_mark); fsnotify_put_mark(fsn_mark);
if (removed & inode->i_fsnotify_mask) if (removed & inode->i_fsnotify_mask)
...@@ -710,13 +723,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) ...@@ -710,13 +723,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
break; break;
default: default:
fd = -EINVAL; fd = -EINVAL;
goto out_put_group; goto out_destroy_group;
} }
if (flags & FAN_UNLIMITED_QUEUE) { if (flags & FAN_UNLIMITED_QUEUE) {
fd = -EPERM; fd = -EPERM;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
goto out_put_group; goto out_destroy_group;
group->max_events = UINT_MAX; group->max_events = UINT_MAX;
} else { } else {
group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
...@@ -725,7 +738,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) ...@@ -725,7 +738,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
if (flags & FAN_UNLIMITED_MARKS) { if (flags & FAN_UNLIMITED_MARKS) {
fd = -EPERM; fd = -EPERM;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
goto out_put_group; goto out_destroy_group;
group->fanotify_data.max_marks = UINT_MAX; group->fanotify_data.max_marks = UINT_MAX;
} else { } else {
group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
...@@ -733,12 +746,12 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) ...@@ -733,12 +746,12 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
if (fd < 0) if (fd < 0)
goto out_put_group; goto out_destroy_group;
return fd; return fd;
out_put_group: out_destroy_group:
fsnotify_put_group(group); fsnotify_destroy_group(group);
return fd; return fd;
} }
......
...@@ -27,13 +27,13 @@ static int show_fdinfo(struct seq_file *m, struct file *f, ...@@ -27,13 +27,13 @@ static int show_fdinfo(struct seq_file *m, struct file *f,
struct fsnotify_mark *mark; struct fsnotify_mark *mark;
int ret = 0; int ret = 0;
spin_lock(&group->mark_lock); mutex_lock(&group->mark_mutex);
list_for_each_entry(mark, &group->marks_list, g_list) { list_for_each_entry(mark, &group->marks_list, g_list) {
ret = show(m, mark); ret = show(m, mark);
if (ret) if (ret)
break; break;
} }
spin_unlock(&group->mark_lock); mutex_unlock(&group->mark_mutex);
return ret; return ret;
} }
......
...@@ -33,9 +33,6 @@ ...@@ -33,9 +33,6 @@
*/ */
void fsnotify_final_destroy_group(struct fsnotify_group *group) void fsnotify_final_destroy_group(struct fsnotify_group *group)
{ {
/* clear the notification queue of all events */
fsnotify_flush_notify(group);
if (group->ops->free_group_priv) if (group->ops->free_group_priv)
group->ops->free_group_priv(group); group->ops->free_group_priv(group);
...@@ -43,23 +40,30 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group) ...@@ -43,23 +40,30 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group)
} }
/* /*
* Trying to get rid of a group. We need to first get rid of any outstanding * Trying to get rid of a group. Remove all marks, flush all events and release
* allocations and then free the group. Remember that fsnotify_clear_marks_by_group * the group reference.
* could miss marks that are being freed by inode and those marks could still * Note that another thread calling fsnotify_clear_marks_by_group() may still
* hold a reference to this group (via group->num_marks) If we get into that * hold a ref to the group.
* situtation, the fsnotify_final_destroy_group will get called when that final
* mark is freed.
*/ */
static void fsnotify_destroy_group(struct fsnotify_group *group) void fsnotify_destroy_group(struct fsnotify_group *group)
{ {
/* clear all inode marks for this group */ /* clear all inode marks for this group */
fsnotify_clear_marks_by_group(group); fsnotify_clear_marks_by_group(group);
synchronize_srcu(&fsnotify_mark_srcu); synchronize_srcu(&fsnotify_mark_srcu);
/* past the point of no return, matches the initial value of 1 */ /* clear the notification queue of all events */
if (atomic_dec_and_test(&group->num_marks)) fsnotify_flush_notify(group);
fsnotify_final_destroy_group(group);
fsnotify_put_group(group);
}
/*
* Get reference to a group.
*/
void fsnotify_get_group(struct fsnotify_group *group)
{
atomic_inc(&group->refcnt);
} }
/* /*
...@@ -68,7 +72,7 @@ static void fsnotify_destroy_group(struct fsnotify_group *group) ...@@ -68,7 +72,7 @@ static void fsnotify_destroy_group(struct fsnotify_group *group)
void fsnotify_put_group(struct fsnotify_group *group) void fsnotify_put_group(struct fsnotify_group *group)
{ {
if (atomic_dec_and_test(&group->refcnt)) if (atomic_dec_and_test(&group->refcnt))
fsnotify_destroy_group(group); fsnotify_final_destroy_group(group);
} }
/* /*
...@@ -84,21 +88,24 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops) ...@@ -84,21 +88,24 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
/* set to 0 when there a no external references to this group */ /* set to 0 when there a no external references to this group */
atomic_set(&group->refcnt, 1); atomic_set(&group->refcnt, 1);
/* atomic_set(&group->num_marks, 0);
* hits 0 when there are no external references AND no marks for
* this group
*/
atomic_set(&group->num_marks, 1);
mutex_init(&group->notification_mutex); mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list); INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq); init_waitqueue_head(&group->notification_waitq);
group->max_events = UINT_MAX; group->max_events = UINT_MAX;
spin_lock_init(&group->mark_lock); mutex_init(&group->mark_mutex);
INIT_LIST_HEAD(&group->marks_list); INIT_LIST_HEAD(&group->marks_list);
group->ops = ops; group->ops = ops;
return group; return group;
} }
int fsnotify_fasync(int fd, struct file *file, int on)
{
struct fsnotify_group *group = file->private_data;
return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO;
}
...@@ -63,8 +63,8 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark) ...@@ -63,8 +63,8 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
{ {
struct inode *inode = mark->i.inode; struct inode *inode = mark->i.inode;
BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock); assert_spin_locked(&mark->lock);
assert_spin_locked(&mark->group->mark_lock);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
...@@ -99,8 +99,16 @@ void fsnotify_clear_marks_by_inode(struct inode *inode) ...@@ -99,8 +99,16 @@ void fsnotify_clear_marks_by_inode(struct inode *inode)
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) { list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
fsnotify_destroy_mark(mark); struct fsnotify_group *group;
spin_lock(&mark->lock);
fsnotify_get_group(mark->group);
group = mark->group;
spin_unlock(&mark->lock);
fsnotify_destroy_mark(mark, group);
fsnotify_put_mark(mark); fsnotify_put_mark(mark);
fsnotify_put_group(group);
} }
} }
...@@ -192,8 +200,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark, ...@@ -192,8 +200,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
mark->flags |= FSNOTIFY_MARK_FLAG_INODE; mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
BUG_ON(!mutex_is_locked(&group->mark_mutex));
assert_spin_locked(&mark->lock); assert_spin_locked(&mark->lock);
assert_spin_locked(&group->mark_lock);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
......
...@@ -118,6 +118,7 @@ static int inotify_handle_event(struct fsnotify_group *group, ...@@ -118,6 +118,7 @@ static int inotify_handle_event(struct fsnotify_group *group,
fsn_event_priv = &event_priv->fsnotify_event_priv_data; fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsnotify_get_group(group);
fsn_event_priv->group = group; fsn_event_priv->group = group;
event_priv->wd = wd; event_priv->wd = wd;
...@@ -131,7 +132,7 @@ static int inotify_handle_event(struct fsnotify_group *group, ...@@ -131,7 +132,7 @@ static int inotify_handle_event(struct fsnotify_group *group,
} }
if (inode_mark->mask & IN_ONESHOT) if (inode_mark->mask & IN_ONESHOT)
fsnotify_destroy_mark(inode_mark); fsnotify_destroy_mark(inode_mark, group);
return ret; return ret;
} }
...@@ -210,6 +211,7 @@ void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv) ...@@ -210,6 +211,7 @@ void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
event_priv = container_of(fsn_event_priv, struct inotify_event_private_data, event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
fsnotify_event_priv_data); fsnotify_event_priv_data);
fsnotify_put_group(fsn_event_priv->group);
kmem_cache_free(event_priv_cachep, event_priv); kmem_cache_free(event_priv_cachep, event_priv);
} }
......
...@@ -265,7 +265,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, ...@@ -265,7 +265,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
ret = -EAGAIN; ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK) if (file->f_flags & O_NONBLOCK)
break; break;
ret = -EINTR; ret = -ERESTARTSYS;
if (signal_pending(current)) if (signal_pending(current))
break; break;
...@@ -281,23 +281,17 @@ static ssize_t inotify_read(struct file *file, char __user *buf, ...@@ -281,23 +281,17 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
return ret; return ret;
} }
static int inotify_fasync(int fd, struct file *file, int on)
{
struct fsnotify_group *group = file->private_data;
return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
}
static int inotify_release(struct inode *ignored, struct file *file) static int inotify_release(struct inode *ignored, struct file *file)
{ {
struct fsnotify_group *group = file->private_data; struct fsnotify_group *group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group); pr_debug("%s: group=%p\n", __func__, group);
fsnotify_clear_marks_by_group(group); if (file->f_flags & FASYNC)
fsnotify_fasync(-1, file, 0);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */ /* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group); fsnotify_destroy_group(group);
return 0; return 0;
} }
...@@ -339,7 +333,7 @@ static const struct file_operations inotify_fops = { ...@@ -339,7 +333,7 @@ static const struct file_operations inotify_fops = {
.show_fdinfo = inotify_show_fdinfo, .show_fdinfo = inotify_show_fdinfo,
.poll = inotify_poll, .poll = inotify_poll,
.read = inotify_read, .read = inotify_read,
.fasync = inotify_fasync, .fasync = fsnotify_fasync,
.release = inotify_release, .release = inotify_release,
.unlocked_ioctl = inotify_ioctl, .unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl, .compat_ioctl = inotify_ioctl,
...@@ -521,13 +515,13 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, ...@@ -521,13 +515,13 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_event_private_data *fsn_event_priv; struct fsnotify_event_private_data *fsn_event_priv;
int ret; int ret;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
FSNOTIFY_EVENT_NONE, NULL, 0, FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_NOFS); GFP_NOFS);
if (!ignored_event) if (!ignored_event)
return; goto skip_send_ignore;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv)) if (unlikely(!event_priv))
...@@ -535,6 +529,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, ...@@ -535,6 +529,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
fsn_event_priv = &event_priv->fsnotify_event_priv_data; fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsnotify_get_group(group);
fsn_event_priv->group = group; fsn_event_priv->group = group;
event_priv->wd = i_mark->wd; event_priv->wd = i_mark->wd;
...@@ -548,8 +543,8 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, ...@@ -548,8 +543,8 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
} }
skip_send_ignore: skip_send_ignore:
/* matches the reference taken when the event was created */ /* matches the reference taken when the event was created */
if (ignored_event)
fsnotify_put_event(ignored_event); fsnotify_put_event(ignored_event);
/* remove this mark from the idr */ /* remove this mark from the idr */
...@@ -709,12 +704,11 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events) ...@@ -709,12 +704,11 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
spin_lock_init(&group->inotify_data.idr_lock); spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr); idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0; group->inotify_data.last_wd = 0;
group->inotify_data.fa = NULL;
group->inotify_data.user = get_current_user(); group->inotify_data.user = get_current_user();
if (atomic_inc_return(&group->inotify_data.user->inotify_devs) > if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
inotify_max_user_instances) { inotify_max_user_instances) {
fsnotify_put_group(group); fsnotify_destroy_group(group);
return ERR_PTR(-EMFILE); return ERR_PTR(-EMFILE);
} }
...@@ -743,7 +737,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) ...@@ -743,7 +737,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
ret = anon_inode_getfd("inotify", &inotify_fops, group, ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags); O_RDONLY | flags);
if (ret < 0) if (ret < 0)
fsnotify_put_group(group); fsnotify_destroy_group(group);
return ret; return ret;
} }
...@@ -819,7 +813,7 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) ...@@ -819,7 +813,7 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
ret = 0; ret = 0;
fsnotify_destroy_mark(&i_mark->fsn_mark); fsnotify_destroy_mark(&i_mark->fsn_mark, group);
/* match ref taken by inotify_idr_find */ /* match ref taken by inotify_idr_find */
fsnotify_put_mark(&i_mark->fsn_mark); fsnotify_put_mark(&i_mark->fsn_mark);
......
...@@ -109,8 +109,11 @@ void fsnotify_get_mark(struct fsnotify_mark *mark) ...@@ -109,8 +109,11 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
void fsnotify_put_mark(struct fsnotify_mark *mark) void fsnotify_put_mark(struct fsnotify_mark *mark)
{ {
if (atomic_dec_and_test(&mark->refcnt)) if (atomic_dec_and_test(&mark->refcnt)) {
if (mark->group)
fsnotify_put_group(mark->group);
mark->free_mark(mark); mark->free_mark(mark);
}
} }
/* /*
...@@ -118,14 +121,14 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) ...@@ -118,14 +121,14 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
* The caller had better be holding a reference to this mark so we don't actually * The caller had better be holding a reference to this mark so we don't actually
* do the final put under the mark->lock * do the final put under the mark->lock
*/ */
void fsnotify_destroy_mark(struct fsnotify_mark *mark) void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
struct fsnotify_group *group)
{ {
struct fsnotify_group *group;
struct inode *inode = NULL; struct inode *inode = NULL;
spin_lock(&mark->lock); BUG_ON(!mutex_is_locked(&group->mark_mutex));
group = mark->group; spin_lock(&mark->lock);
/* something else already called this function on this mark */ /* something else already called this function on this mark */
if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
...@@ -135,8 +138,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark) ...@@ -135,8 +138,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
spin_lock(&group->mark_lock);
if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) { if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
inode = mark->i.inode; inode = mark->i.inode;
fsnotify_destroy_inode_mark(mark); fsnotify_destroy_inode_mark(mark);
...@@ -147,13 +148,22 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark) ...@@ -147,13 +148,22 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
list_del_init(&mark->g_list); list_del_init(&mark->g_list);
spin_unlock(&group->mark_lock);
spin_unlock(&mark->lock); spin_unlock(&mark->lock);
if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
iput(inode);
/* release lock temporarily */
mutex_unlock(&group->mark_mutex);
spin_lock(&destroy_lock); spin_lock(&destroy_lock);
list_add(&mark->destroy_list, &destroy_list); list_add(&mark->destroy_list, &destroy_list);
spin_unlock(&destroy_lock); spin_unlock(&destroy_lock);
wake_up(&destroy_waitq); wake_up(&destroy_waitq);
/*
* We don't necessarily have a ref on mark from caller so the above destroy
* may have actually freed it, unless this group provides a 'freeing_mark'
* function which must be holding a reference.
*/
/* /*
* Some groups like to know that marks are being freed. This is a * Some groups like to know that marks are being freed. This is a
...@@ -175,21 +185,17 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark) ...@@ -175,21 +185,17 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
* is just a lazy update (and could be a perf win...) * is just a lazy update (and could be a perf win...)
*/ */
if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) atomic_dec(&group->num_marks);
iput(inode);
/* mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
* We don't necessarily have a ref on mark from caller so the above iput }
* may have already destroyed it. Don't touch from now on.
*/
/* void fsnotify_destroy_mark(struct fsnotify_mark *mark,
* it's possible that this group tried to destroy itself, but this struct fsnotify_group *group)
* this mark was simultaneously being freed by inode. If that's the {
* case, we finish freeing the group here. mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
*/ fsnotify_destroy_mark_locked(mark, group);
if (unlikely(atomic_dec_and_test(&group->num_marks))) mutex_unlock(&group->mark_mutex);
fsnotify_final_destroy_group(group);
} }
void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask) void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
...@@ -214,7 +220,7 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas ...@@ -214,7 +220,7 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas
* These marks may be used for the fsnotify backend to determine which * These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group. * event types should be delivered to which group.
*/ */
int fsnotify_add_mark(struct fsnotify_mark *mark, int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode, struct fsnotify_group *group, struct inode *inode,
struct vfsmount *mnt, int allow_dups) struct vfsmount *mnt, int allow_dups)
{ {
...@@ -222,18 +228,18 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, ...@@ -222,18 +228,18 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
BUG_ON(inode && mnt); BUG_ON(inode && mnt);
BUG_ON(!inode && !mnt); BUG_ON(!inode && !mnt);
BUG_ON(!mutex_is_locked(&group->mark_mutex));
/* /*
* LOCKING ORDER!!!! * LOCKING ORDER!!!!
* group->mark_mutex
* mark->lock * mark->lock
* group->mark_lock
* inode->i_lock * inode->i_lock
*/ */
spin_lock(&mark->lock); spin_lock(&mark->lock);
spin_lock(&group->mark_lock);
mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE; mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
fsnotify_get_group(group);
mark->group = group; mark->group = group;
list_add(&mark->g_list, &group->marks_list); list_add(&mark->g_list, &group->marks_list);
atomic_inc(&group->num_marks); atomic_inc(&group->num_marks);
...@@ -251,11 +257,8 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, ...@@ -251,11 +257,8 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
BUG(); BUG();
} }
spin_unlock(&group->mark_lock);
/* this will pin the object if appropriate */ /* this will pin the object if appropriate */
fsnotify_set_mark_mask_locked(mark, mark->mask); fsnotify_set_mark_mask_locked(mark, mark->mask);
spin_unlock(&mark->lock); spin_unlock(&mark->lock);
if (inode) if (inode)
...@@ -265,10 +268,10 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, ...@@ -265,10 +268,10 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
err: err:
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
list_del_init(&mark->g_list); list_del_init(&mark->g_list);
fsnotify_put_group(group);
mark->group = NULL; mark->group = NULL;
atomic_dec(&group->num_marks); atomic_dec(&group->num_marks);
spin_unlock(&group->mark_lock);
spin_unlock(&mark->lock); spin_unlock(&mark->lock);
spin_lock(&destroy_lock); spin_lock(&destroy_lock);
...@@ -279,6 +282,16 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, ...@@ -279,6 +282,16 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
return ret; return ret;
} }
int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
struct inode *inode, struct vfsmount *mnt, int allow_dups)
{
int ret;
mutex_lock(&group->mark_mutex);
ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups);
mutex_unlock(&group->mark_mutex);
return ret;
}
/* /*
* clear any marks in a group in which mark->flags & flags is true * clear any marks in a group in which mark->flags & flags is true
*/ */
...@@ -286,22 +299,16 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, ...@@ -286,22 +299,16 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
unsigned int flags) unsigned int flags)
{ {
struct fsnotify_mark *lmark, *mark; struct fsnotify_mark *lmark, *mark;
LIST_HEAD(free_list);
spin_lock(&group->mark_lock); mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
if (mark->flags & flags) { if (mark->flags & flags) {
list_add(&mark->free_g_list, &free_list);
list_del_init(&mark->g_list);
fsnotify_get_mark(mark); fsnotify_get_mark(mark);
} fsnotify_destroy_mark_locked(mark, group);
}
spin_unlock(&group->mark_lock);
list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
fsnotify_destroy_mark(mark);
fsnotify_put_mark(mark); fsnotify_put_mark(mark);
} }
}
mutex_unlock(&group->mark_mutex);
} }
/* /*
...@@ -317,6 +324,8 @@ void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *ol ...@@ -317,6 +324,8 @@ void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *ol
assert_spin_locked(&old->lock); assert_spin_locked(&old->lock);
new->i.inode = old->i.inode; new->i.inode = old->i.inode;
new->m.mnt = old->m.mnt; new->m.mnt = old->m.mnt;
if (old->group)
fsnotify_get_group(old->group);
new->group = old->group; new->group = old->group;
new->mask = old->mask; new->mask = old->mask;
new->free_mark = old->free_mark; new->free_mark = old->free_mark;
......
...@@ -225,6 +225,7 @@ struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, s ...@@ -225,6 +225,7 @@ struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, s
mutex_unlock(&group->notification_mutex); mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq); wake_up(&group->notification_waitq);
kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
return return_event; return return_event;
} }
......
...@@ -46,8 +46,16 @@ void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) ...@@ -46,8 +46,16 @@ void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
spin_unlock(&mnt->mnt_root->d_lock); spin_unlock(&mnt->mnt_root->d_lock);
list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) { list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
fsnotify_destroy_mark(mark); struct fsnotify_group *group;
spin_lock(&mark->lock);
fsnotify_get_group(mark->group);
group = mark->group;
spin_unlock(&mark->lock);
fsnotify_destroy_mark(mark, group);
fsnotify_put_mark(mark); fsnotify_put_mark(mark);
fsnotify_put_group(group);
} }
} }
...@@ -88,8 +96,8 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark) ...@@ -88,8 +96,8 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
{ {
struct vfsmount *mnt = mark->m.mnt; struct vfsmount *mnt = mark->m.mnt;
BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock); assert_spin_locked(&mark->lock);
assert_spin_locked(&mark->group->mark_lock);
spin_lock(&mnt->mnt_root->d_lock); spin_lock(&mnt->mnt_root->d_lock);
...@@ -151,8 +159,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark, ...@@ -151,8 +159,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
BUG_ON(!mutex_is_locked(&group->mark_mutex));
assert_spin_locked(&mark->lock); assert_spin_locked(&mark->lock);
assert_spin_locked(&group->mark_lock);
spin_lock(&mnt->mnt_root->d_lock); spin_lock(&mnt->mnt_root->d_lock);
......
...@@ -88,9 +88,10 @@ struct fsnotify_event_private_data; ...@@ -88,9 +88,10 @@ struct fsnotify_event_private_data;
* if the group is interested in this event. * if the group is interested in this event.
* handle_event - main call for a group to handle an fs event * handle_event - main call for a group to handle an fs event
* free_group_priv - called when a group refcnt hits 0 to clean up the private union * free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing-mark - this means that a mark has been flagged to die when everything * freeing_mark - called when a mark is being destroyed for some reason. The group
* finishes using it. The function is supplied with what must be a * MUST be holding a reference on each mark and that reference must be
* valid group and inode to use to clean up. * dropped in this function. inotify uses this function to send
* userspace messages that marks have been removed.
*/ */
struct fsnotify_ops { struct fsnotify_ops {
bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode,
...@@ -141,12 +142,14 @@ struct fsnotify_group { ...@@ -141,12 +142,14 @@ struct fsnotify_group {
unsigned int priority; unsigned int priority;
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
spinlock_t mark_lock; /* protect marks_list */ struct mutex mark_mutex; /* protect marks_list */
atomic_t num_marks; /* 1 for each mark and 1 for not being atomic_t num_marks; /* 1 for each mark and 1 for not being
* past the point of no return when freeing * past the point of no return when freeing
* a group */ * a group */
struct list_head marks_list; /* all inode marks for this group */ struct list_head marks_list; /* all inode marks for this group */
struct fasync_struct *fsn_fa; /* async notification */
/* groups can define private fields here or use the void *private */ /* groups can define private fields here or use the void *private */
union { union {
void *private; void *private;
...@@ -155,7 +158,6 @@ struct fsnotify_group { ...@@ -155,7 +158,6 @@ struct fsnotify_group {
spinlock_t idr_lock; spinlock_t idr_lock;
struct idr idr; struct idr idr;
u32 last_wd; u32 last_wd;
struct fasync_struct *fa; /* async notification */
struct user_struct *user; struct user_struct *user;
} inotify_data; } inotify_data;
#endif #endif
...@@ -287,7 +289,6 @@ struct fsnotify_mark { ...@@ -287,7 +289,6 @@ struct fsnotify_mark {
struct fsnotify_inode_mark i; struct fsnotify_inode_mark i;
struct fsnotify_vfsmount_mark m; struct fsnotify_vfsmount_mark m;
}; };
struct list_head free_g_list; /* tmp list used when freeing this mark */
__u32 ignored_mask; /* events types to ignore */ __u32 ignored_mask; /* events types to ignore */
#define FSNOTIFY_MARK_FLAG_INODE 0x01 #define FSNOTIFY_MARK_FLAG_INODE 0x01
#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02 #define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
...@@ -360,11 +361,16 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode ...@@ -360,11 +361,16 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode
/* called from fsnotify listeners, such as fanotify or dnotify */ /* called from fsnotify listeners, such as fanotify or dnotify */
/* get a reference to an existing or create a new group */ /* create a new group */
extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
/* get reference to a group */
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */ /* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group); extern void fsnotify_put_group(struct fsnotify_group *group);
/* destroy group */
extern void fsnotify_destroy_group(struct fsnotify_group *group);
/* fasync handler function */
extern int fsnotify_fasync(int fd, struct file *file, int on);
/* take a reference to an event */ /* take a reference to an event */
extern void fsnotify_get_event(struct fsnotify_event *event); extern void fsnotify_get_event(struct fsnotify_event *event);
extern void fsnotify_put_event(struct fsnotify_event *event); extern void fsnotify_put_event(struct fsnotify_event *event);
...@@ -405,8 +411,13 @@ extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask ...@@ -405,8 +411,13 @@ extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask
/* attach the mark to both the group and the inode */ /* attach the mark to both the group and the inode */
extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
struct inode *inode, struct vfsmount *mnt, int allow_dups); struct inode *inode, struct vfsmount *mnt, int allow_dups);
/* given a mark, flag it to be freed when all references are dropped */ extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group,
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark); struct inode *inode, struct vfsmount *mnt, int allow_dups);
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the vfsmount marks */ /* run all the marks in a group, and clear all of the vfsmount marks */
extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the inode marks */ /* run all the marks in a group, and clear all of the inode marks */
......
...@@ -249,7 +249,7 @@ static void untag_chunk(struct node *p) ...@@ -249,7 +249,7 @@ static void untag_chunk(struct node *p)
list_del_rcu(&chunk->hash); list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock); spin_unlock(&hash_lock);
spin_unlock(&entry->lock); spin_unlock(&entry->lock);
fsnotify_destroy_mark(entry); fsnotify_destroy_mark(entry, audit_tree_group);
goto out; goto out;
} }
...@@ -291,7 +291,7 @@ static void untag_chunk(struct node *p) ...@@ -291,7 +291,7 @@ static void untag_chunk(struct node *p)
owner->root = new; owner->root = new;
spin_unlock(&hash_lock); spin_unlock(&hash_lock);
spin_unlock(&entry->lock); spin_unlock(&entry->lock);
fsnotify_destroy_mark(entry); fsnotify_destroy_mark(entry, audit_tree_group);
fsnotify_put_mark(&new->mark); /* drop initial reference */ fsnotify_put_mark(&new->mark); /* drop initial reference */
goto out; goto out;
...@@ -331,7 +331,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) ...@@ -331,7 +331,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&hash_lock); spin_unlock(&hash_lock);
chunk->dead = 1; chunk->dead = 1;
spin_unlock(&entry->lock); spin_unlock(&entry->lock);
fsnotify_destroy_mark(entry); fsnotify_destroy_mark(entry, audit_tree_group);
fsnotify_put_mark(entry); fsnotify_put_mark(entry);
return 0; return 0;
} }
...@@ -412,7 +412,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) ...@@ -412,7 +412,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&chunk_entry->lock); spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock); spin_unlock(&old_entry->lock);
fsnotify_destroy_mark(chunk_entry); fsnotify_destroy_mark(chunk_entry, audit_tree_group);
fsnotify_put_mark(chunk_entry); fsnotify_put_mark(chunk_entry);
fsnotify_put_mark(old_entry); fsnotify_put_mark(old_entry);
...@@ -443,7 +443,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) ...@@ -443,7 +443,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&hash_lock); spin_unlock(&hash_lock);
spin_unlock(&chunk_entry->lock); spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock); spin_unlock(&old_entry->lock);
fsnotify_destroy_mark(old_entry); fsnotify_destroy_mark(old_entry, audit_tree_group);
fsnotify_put_mark(chunk_entry); /* drop initial reference */ fsnotify_put_mark(chunk_entry); /* drop initial reference */
fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
return 0; return 0;
......
...@@ -350,7 +350,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent) ...@@ -350,7 +350,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
} }
mutex_unlock(&audit_filter_mutex); mutex_unlock(&audit_filter_mutex);
fsnotify_destroy_mark(&parent->mark); fsnotify_destroy_mark(&parent->mark, audit_watch_group);
} }
/* Get path information necessary for adding watches. */ /* Get path information necessary for adding watches. */
...@@ -457,7 +457,7 @@ void audit_remove_watch_rule(struct audit_krule *krule) ...@@ -457,7 +457,7 @@ void audit_remove_watch_rule(struct audit_krule *krule)
if (list_empty(&parent->watches)) { if (list_empty(&parent->watches)) {
audit_get_parent(parent); audit_get_parent(parent);
fsnotify_destroy_mark(&parent->mark); fsnotify_destroy_mark(&parent->mark, audit_watch_group);
audit_put_parent(parent); audit_put_parent(parent);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment