Commit a0a92d26 authored by Amir Goldstein's avatar Amir Goldstein Committed by Jan Kara

fsnotify: move mask out of struct fsnotify_event

Common fsnotify_event helpers have no need for the mask field.
It is only used by backend code, so move the field out of the
abstract fsnotify_event struct and into the concrete backend
event structs.

This change packs struct inotify_event_info better on 64bit
machine and will allow us to cram some more fields into
struct fanotify_event_info.
Signed-off-by: default avatarAmir Goldstein <amir73il@gmail.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent 45a9fb37
...@@ -36,20 +36,22 @@ static bool should_merge(struct fsnotify_event *old_fsn, ...@@ -36,20 +36,22 @@ static bool should_merge(struct fsnotify_event *old_fsn,
static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
{ {
struct fsnotify_event *test_event; struct fsnotify_event *test_event;
struct fanotify_event_info *new;
pr_debug("%s: list=%p event=%p\n", __func__, list, event); pr_debug("%s: list=%p event=%p\n", __func__, list, event);
new = FANOTIFY_E(event);
/* /*
* Don't merge a permission event with any other event so that we know * Don't merge a permission event with any other event so that we know
* the event structure we have created in fanotify_handle_event() is the * the event structure we have created in fanotify_handle_event() is the
* one we should check for permission response. * one we should check for permission response.
*/ */
if (fanotify_is_perm_event(event->mask)) if (fanotify_is_perm_event(new->mask))
return 0; return 0;
list_for_each_entry_reverse(test_event, list, list) { list_for_each_entry_reverse(test_event, list, list) {
if (should_merge(test_event, event)) { if (should_merge(test_event, event)) {
test_event->mask |= event->mask; FANOTIFY_E(test_event)->mask |= new->mask;
return 1; return 1;
} }
} }
...@@ -173,7 +175,8 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group, ...@@ -173,7 +175,8 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
if (!event) if (!event)
goto out; goto out;
init: __maybe_unused init: __maybe_unused
fsnotify_init_event(&event->fse, inode, mask); fsnotify_init_event(&event->fse, inode);
event->mask = mask;
if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
event->pid = get_pid(task_pid(current)); event->pid = get_pid(task_pid(current));
else else
...@@ -280,7 +283,7 @@ static void fanotify_free_event(struct fsnotify_event *fsn_event) ...@@ -280,7 +283,7 @@ static void fanotify_free_event(struct fsnotify_event *fsn_event)
event = FANOTIFY_E(fsn_event); event = FANOTIFY_E(fsn_event);
path_put(&event->path); path_put(&event->path);
put_pid(event->pid); put_pid(event->pid);
if (fanotify_is_perm_event(fsn_event->mask)) { if (fanotify_is_perm_event(event->mask)) {
kmem_cache_free(fanotify_perm_event_cachep, kmem_cache_free(fanotify_perm_event_cachep,
FANOTIFY_PE(fsn_event)); FANOTIFY_PE(fsn_event));
return; return;
......
...@@ -14,6 +14,7 @@ extern struct kmem_cache *fanotify_perm_event_cachep; ...@@ -14,6 +14,7 @@ extern struct kmem_cache *fanotify_perm_event_cachep;
*/ */
struct fanotify_event_info { struct fanotify_event_info {
struct fsnotify_event fse; struct fsnotify_event fse;
u32 mask;
/* /*
* We hold ref to this path so it may be dereferenced at any point * We hold ref to this path so it may be dereferenced at any point
* during this object's lifetime * during this object's lifetime
......
...@@ -131,9 +131,9 @@ static int fill_event_metadata(struct fsnotify_group *group, ...@@ -131,9 +131,9 @@ static int fill_event_metadata(struct fsnotify_group *group,
metadata->metadata_len = FAN_EVENT_METADATA_LEN; metadata->metadata_len = FAN_EVENT_METADATA_LEN;
metadata->vers = FANOTIFY_METADATA_VERSION; metadata->vers = FANOTIFY_METADATA_VERSION;
metadata->reserved = 0; metadata->reserved = 0;
metadata->mask = fsn_event->mask & FANOTIFY_OUTGOING_EVENTS; metadata->mask = event->mask & FANOTIFY_OUTGOING_EVENTS;
metadata->pid = pid_vnr(event->pid); metadata->pid = pid_vnr(event->pid);
if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW)) if (unlikely(event->mask & FAN_Q_OVERFLOW))
metadata->fd = FAN_NOFD; metadata->fd = FAN_NOFD;
else { else {
metadata->fd = create_fd(group, event, file); metadata->fd = create_fd(group, event, file);
...@@ -230,7 +230,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, ...@@ -230,7 +230,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
fanotify_event_metadata.event_len)) fanotify_event_metadata.event_len))
goto out_close_fd; goto out_close_fd;
if (fanotify_is_perm_event(event->mask)) if (fanotify_is_perm_event(FANOTIFY_E(event)->mask))
FANOTIFY_PE(event)->fd = fd; FANOTIFY_PE(event)->fd = fd;
if (fd != FAN_NOFD) if (fd != FAN_NOFD)
...@@ -316,7 +316,7 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, ...@@ -316,7 +316,7 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
* Permission events get queued to wait for response. Other * Permission events get queued to wait for response. Other
* events can be destroyed now. * events can be destroyed now.
*/ */
if (!fanotify_is_perm_event(kevent->mask)) { if (!fanotify_is_perm_event(FANOTIFY_E(kevent)->mask)) {
fsnotify_destroy_event(group, kevent); fsnotify_destroy_event(group, kevent);
} else { } else {
if (ret <= 0) { if (ret <= 0) {
...@@ -401,7 +401,7 @@ static int fanotify_release(struct inode *ignored, struct file *file) ...@@ -401,7 +401,7 @@ static int fanotify_release(struct inode *ignored, struct file *file)
*/ */
while (!fsnotify_notify_queue_is_empty(group)) { while (!fsnotify_notify_queue_is_empty(group)) {
fsn_event = fsnotify_remove_first_event(group); fsn_event = fsnotify_remove_first_event(group);
if (!(fsn_event->mask & FANOTIFY_PERM_EVENTS)) { if (!(FANOTIFY_E(fsn_event)->mask & FANOTIFY_PERM_EVENTS)) {
spin_unlock(&group->notification_lock); spin_unlock(&group->notification_lock);
fsnotify_destroy_event(group, fsn_event); fsnotify_destroy_event(group, fsn_event);
spin_lock(&group->notification_lock); spin_lock(&group->notification_lock);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
struct inotify_event_info { struct inotify_event_info {
struct fsnotify_event fse; struct fsnotify_event fse;
u32 mask;
int wd; int wd;
u32 sync_cookie; u32 sync_cookie;
int name_len; int name_len;
......
...@@ -43,11 +43,11 @@ static bool event_compare(struct fsnotify_event *old_fsn, ...@@ -43,11 +43,11 @@ static bool event_compare(struct fsnotify_event *old_fsn,
{ {
struct inotify_event_info *old, *new; struct inotify_event_info *old, *new;
if (old_fsn->mask & FS_IN_IGNORED)
return false;
old = INOTIFY_E(old_fsn); old = INOTIFY_E(old_fsn);
new = INOTIFY_E(new_fsn); new = INOTIFY_E(new_fsn);
if ((old_fsn->mask == new_fsn->mask) && if (old->mask & FS_IN_IGNORED)
return false;
if ((old->mask == new->mask) &&
(old_fsn->inode == new_fsn->inode) && (old_fsn->inode == new_fsn->inode) &&
(old->name_len == new->name_len) && (old->name_len == new->name_len) &&
(!old->name_len || !strcmp(old->name, new->name))) (!old->name_len || !strcmp(old->name, new->name)))
...@@ -114,7 +114,8 @@ int inotify_handle_event(struct fsnotify_group *group, ...@@ -114,7 +114,8 @@ int inotify_handle_event(struct fsnotify_group *group,
} }
fsn_event = &event->fse; fsn_event = &event->fse;
fsnotify_init_event(fsn_event, inode, mask); fsnotify_init_event(fsn_event, inode);
event->mask = mask;
event->wd = i_mark->wd; event->wd = i_mark->wd;
event->sync_cookie = cookie; event->sync_cookie = cookie;
event->name_len = len; event->name_len = len;
......
...@@ -189,7 +189,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, ...@@ -189,7 +189,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
*/ */
pad_name_len = round_event_name_len(fsn_event); pad_name_len = round_event_name_len(fsn_event);
inotify_event.len = pad_name_len; inotify_event.len = pad_name_len;
inotify_event.mask = inotify_mask_to_arg(fsn_event->mask); inotify_event.mask = inotify_mask_to_arg(event->mask);
inotify_event.wd = event->wd; inotify_event.wd = event->wd;
inotify_event.cookie = event->sync_cookie; inotify_event.cookie = event->sync_cookie;
...@@ -634,7 +634,8 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events) ...@@ -634,7 +634,8 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
group->overflow_event = &oevent->fse; group->overflow_event = &oevent->fse;
fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW); fsnotify_init_event(group->overflow_event, NULL);
oevent->mask = FS_Q_OVERFLOW;
oevent->wd = -1; oevent->wd = -1;
oevent->sync_cookie = 0; oevent->sync_cookie = 0;
oevent->name_len = 0; oevent->name_len = 0;
......
...@@ -71,7 +71,7 @@ void fsnotify_destroy_event(struct fsnotify_group *group, ...@@ -71,7 +71,7 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
struct fsnotify_event *event) struct fsnotify_event *event)
{ {
/* Overflow events are per-group and we don't want to free them */ /* Overflow events are per-group and we don't want to free them */
if (!event || event->mask == FS_Q_OVERFLOW) if (!event || event == group->overflow_event)
return; return;
/* /*
* If the event is still queued, we have a problem... Do an unreliable * If the event is still queued, we have a problem... Do an unreliable
...@@ -194,23 +194,3 @@ void fsnotify_flush_notify(struct fsnotify_group *group) ...@@ -194,23 +194,3 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
} }
spin_unlock(&group->notification_lock); spin_unlock(&group->notification_lock);
} }
/*
* fsnotify_create_event - Allocate a new event which will be sent to each
* group's handle_event function if the group was interested in this
* particular event.
*
* @inode the inode which is supposed to receive the event (sometimes a
* parent of the inode to which the event happened.
* @mask what actually happened.
* @data pointer to the object which was actually affected
* @data_type flag indication if the data is a file, path, inode, nothing...
* @name the filename, if available
*/
void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode,
u32 mask)
{
INIT_LIST_HEAD(&event->list);
event->inode = inode;
event->mask = mask;
}
...@@ -135,7 +135,6 @@ struct fsnotify_event { ...@@ -135,7 +135,6 @@ struct fsnotify_event {
struct list_head list; struct list_head list;
/* inode may ONLY be dereferenced during handle_event(). */ /* inode may ONLY be dereferenced during handle_event(). */
struct inode *inode; /* either the inode the event happened to or its parent */ struct inode *inode; /* either the inode the event happened to or its parent */
u32 mask; /* the type of access, bitwise OR for FS_* event types */
}; };
/* /*
...@@ -485,9 +484,12 @@ extern void fsnotify_put_mark(struct fsnotify_mark *mark); ...@@ -485,9 +484,12 @@ extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
/* put here because inotify does some weird stuff when destroying watches */ static inline void fsnotify_init_event(struct fsnotify_event *event,
extern void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode)
struct inode *to_tell, u32 mask); {
INIT_LIST_HEAD(&event->list);
event->inode = inode;
}
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment