Commit 94e00d28 authored by Amir Goldstein's avatar Amir Goldstein Committed by Jan Kara

fsnotify: use hash table for faster events merge

In order to improve event merge performance, hash events in a 128 size
hash table by the event merge key.

The fanotify_event size grows by two pointers, but we just reduced its
size by removing the objectid member, so overall its size is increased
by one pointer.

Permission events and overflow event are not merged so they are also
not hashed.

Link: https://lore.kernel.org/r/20210304104826.3993892-5-amir73il@gmail.comSigned-off-by: default avatarAmir Goldstein <amir73il@gmail.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent 7e3e5c69
...@@ -149,12 +149,15 @@ static bool fanotify_should_merge(struct fanotify_event *old, ...@@ -149,12 +149,15 @@ static bool fanotify_should_merge(struct fanotify_event *old,
} }
/* and the list better be locked by something too! */ /* and the list better be locked by something too! */
static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) static int fanotify_merge(struct fsnotify_group *group,
struct fsnotify_event *event)
{ {
struct fsnotify_event *test_event;
struct fanotify_event *old, *new = FANOTIFY_E(event); struct fanotify_event *old, *new = FANOTIFY_E(event);
unsigned int bucket = fanotify_event_hash_bucket(group, new);
struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket];
pr_debug("%s: list=%p event=%p\n", __func__, list, event); pr_debug("%s: group=%p event=%p bucket=%u\n", __func__,
group, event, bucket);
/* /*
* Don't merge a permission event with any other event so that we know * Don't merge a permission event with any other event so that we know
...@@ -164,8 +167,7 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) ...@@ -164,8 +167,7 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
if (fanotify_is_perm_event(new->mask)) if (fanotify_is_perm_event(new->mask))
return 0; return 0;
list_for_each_entry_reverse(test_event, list, list) { hlist_for_each_entry(old, hlist, merge_list) {
old = FANOTIFY_E(test_event);
if (fanotify_should_merge(old, new)) { if (fanotify_should_merge(old, new)) {
old->mask |= new->mask; old->mask |= new->mask;
return 1; return 1;
...@@ -203,8 +205,11 @@ static int fanotify_get_response(struct fsnotify_group *group, ...@@ -203,8 +205,11 @@ static int fanotify_get_response(struct fsnotify_group *group,
return ret; return ret;
} }
/* Event not yet reported? Just remove it. */ /* Event not yet reported? Just remove it. */
if (event->state == FAN_EVENT_INIT) if (event->state == FAN_EVENT_INIT) {
fsnotify_remove_queued_event(group, &event->fae.fse); fsnotify_remove_queued_event(group, &event->fae.fse);
/* Permission events are not supposed to be hashed */
WARN_ON_ONCE(!hlist_unhashed(&event->fae.merge_list));
}
/* /*
* Event may be also answered in case signal delivery raced * Event may be also answered in case signal delivery raced
* with wakeup. In that case we have nothing to do besides * with wakeup. In that case we have nothing to do besides
...@@ -679,6 +684,24 @@ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info) ...@@ -679,6 +684,24 @@ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
return fsid; return fsid;
} }
/*
* Add an event to hash table for faster merge.
*/
static void fanotify_insert_event(struct fsnotify_group *group,
struct fsnotify_event *fsn_event)
{
struct fanotify_event *event = FANOTIFY_E(fsn_event);
unsigned int bucket = fanotify_event_hash_bucket(group, event);
struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket];
assert_spin_locked(&group->notification_lock);
pr_debug("%s: group=%p event=%p bucket=%u\n", __func__,
group, event, bucket);
hlist_add_head(&event->merge_list, hlist);
}
static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
const void *data, int data_type, const void *data, int data_type,
struct inode *dir, struct inode *dir,
...@@ -749,7 +772,9 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, ...@@ -749,7 +772,9 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
} }
fsn_event = &event->fse; fsn_event = &event->fse;
ret = fsnotify_add_event(group, fsn_event, fanotify_merge); ret = fsnotify_add_event(group, fsn_event, fanotify_merge,
fanotify_is_hashed_event(mask) ?
fanotify_insert_event : NULL);
if (ret) { if (ret) {
/* Permission events shouldn't be merged */ /* Permission events shouldn't be merged */
BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS); BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS);
...@@ -772,6 +797,7 @@ static void fanotify_free_group_priv(struct fsnotify_group *group) ...@@ -772,6 +797,7 @@ static void fanotify_free_group_priv(struct fsnotify_group *group)
{ {
struct user_struct *user; struct user_struct *user;
kfree(group->fanotify_data.merge_hash);
user = group->fanotify_data.user; user = group->fanotify_data.user;
atomic_dec(&user->fanotify_listeners); atomic_dec(&user->fanotify_listeners);
free_uid(user); free_uid(user);
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/path.h> #include <linux/path.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/exportfs.h> #include <linux/exportfs.h>
#include <linux/hashtable.h>
extern struct kmem_cache *fanotify_mark_cache; extern struct kmem_cache *fanotify_mark_cache;
extern struct kmem_cache *fanotify_fid_event_cachep; extern struct kmem_cache *fanotify_fid_event_cachep;
...@@ -150,6 +151,7 @@ enum fanotify_event_type { ...@@ -150,6 +151,7 @@ enum fanotify_event_type {
struct fanotify_event { struct fanotify_event {
struct fsnotify_event fse; struct fsnotify_event fse;
struct hlist_node merge_list; /* List for hashed merge */
u32 mask; u32 mask;
struct { struct {
unsigned int type : FANOTIFY_EVENT_TYPE_BITS; unsigned int type : FANOTIFY_EVENT_TYPE_BITS;
...@@ -162,6 +164,7 @@ static inline void fanotify_init_event(struct fanotify_event *event, ...@@ -162,6 +164,7 @@ static inline void fanotify_init_event(struct fanotify_event *event,
unsigned int hash, u32 mask) unsigned int hash, u32 mask)
{ {
fsnotify_init_event(&event->fse); fsnotify_init_event(&event->fse);
INIT_HLIST_NODE(&event->merge_list);
event->hash = hash; event->hash = hash;
event->mask = mask; event->mask = mask;
event->pid = NULL; event->pid = NULL;
...@@ -299,3 +302,25 @@ static inline struct path *fanotify_event_path(struct fanotify_event *event) ...@@ -299,3 +302,25 @@ static inline struct path *fanotify_event_path(struct fanotify_event *event)
else else
return NULL; return NULL;
} }
/*
* Use 128 size hash table to speed up events merge.
*/
#define FANOTIFY_HTABLE_BITS (7)
#define FANOTIFY_HTABLE_SIZE (1 << FANOTIFY_HTABLE_BITS)
#define FANOTIFY_HTABLE_MASK (FANOTIFY_HTABLE_SIZE - 1)
/*
* Permission events and overflow event do not get merged - don't hash them.
*/
static inline bool fanotify_is_hashed_event(u32 mask)
{
return !fanotify_is_perm_event(mask) && !(mask & FS_Q_OVERFLOW);
}
static inline unsigned int fanotify_event_hash_bucket(
struct fsnotify_group *group,
struct fanotify_event *event)
{
return event->hash & FANOTIFY_HTABLE_MASK;
}
...@@ -89,6 +89,23 @@ static int fanotify_event_info_len(unsigned int fid_mode, ...@@ -89,6 +89,23 @@ static int fanotify_event_info_len(unsigned int fid_mode,
return info_len; return info_len;
} }
/*
* Remove an hashed event from merge hash table.
*/
static void fanotify_unhash_event(struct fsnotify_group *group,
struct fanotify_event *event)
{
assert_spin_locked(&group->notification_lock);
pr_debug("%s: group=%p event=%p bucket=%u\n", __func__,
group, event, fanotify_event_hash_bucket(group, event));
if (WARN_ON_ONCE(hlist_unhashed(&event->merge_list)))
return;
hlist_del_init(&event->merge_list);
}
/* /*
* Get an fanotify notification event if one exists and is small * Get an fanotify notification event if one exists and is small
* enough to fit in "count". Return an error pointer if the count * enough to fit in "count". Return an error pointer if the count
...@@ -126,6 +143,8 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group, ...@@ -126,6 +143,8 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group,
fsnotify_remove_first_event(group); fsnotify_remove_first_event(group);
if (fanotify_is_perm_event(event->mask)) if (fanotify_is_perm_event(event->mask))
FANOTIFY_PERM(event)->state = FAN_EVENT_REPORTED; FANOTIFY_PERM(event)->state = FAN_EVENT_REPORTED;
if (fanotify_is_hashed_event(event->mask))
fanotify_unhash_event(group, event);
out: out:
spin_unlock(&group->notification_lock); spin_unlock(&group->notification_lock);
return event; return event;
...@@ -925,6 +944,20 @@ static struct fsnotify_event *fanotify_alloc_overflow_event(void) ...@@ -925,6 +944,20 @@ static struct fsnotify_event *fanotify_alloc_overflow_event(void)
return &oevent->fse; return &oevent->fse;
} }
static struct hlist_head *fanotify_alloc_merge_hash(void)
{
struct hlist_head *hash;
hash = kmalloc(sizeof(struct hlist_head) << FANOTIFY_HTABLE_BITS,
GFP_KERNEL_ACCOUNT);
if (!hash)
return NULL;
__hash_init(hash, FANOTIFY_HTABLE_SIZE);
return hash;
}
/* fanotify syscalls */ /* fanotify syscalls */
SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
{ {
...@@ -993,6 +1026,12 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) ...@@ -993,6 +1026,12 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
atomic_inc(&user->fanotify_listeners); atomic_inc(&user->fanotify_listeners);
group->memcg = get_mem_cgroup_from_mm(current->mm); group->memcg = get_mem_cgroup_from_mm(current->mm);
group->fanotify_data.merge_hash = fanotify_alloc_merge_hash();
if (!group->fanotify_data.merge_hash) {
fd = -ENOMEM;
goto out_destroy_group;
}
group->overflow_event = fanotify_alloc_overflow_event(); group->overflow_event = fanotify_alloc_overflow_event();
if (unlikely(!group->overflow_event)) { if (unlikely(!group->overflow_event)) {
fd = -ENOMEM; fd = -ENOMEM;
......
...@@ -46,9 +46,10 @@ static bool event_compare(struct fsnotify_event *old_fsn, ...@@ -46,9 +46,10 @@ static bool event_compare(struct fsnotify_event *old_fsn,
return false; return false;
} }
static int inotify_merge(struct list_head *list, static int inotify_merge(struct fsnotify_group *group,
struct fsnotify_event *event) struct fsnotify_event *event)
{ {
struct list_head *list = &group->notification_list;
struct fsnotify_event *last_event; struct fsnotify_event *last_event;
last_event = list_entry(list->prev, struct fsnotify_event, list); last_event = list_entry(list->prev, struct fsnotify_event, list);
...@@ -115,7 +116,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, ...@@ -115,7 +116,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
if (len) if (len)
strcpy(event->name, name->name); strcpy(event->name, name->name);
ret = fsnotify_add_event(group, fsn_event, inotify_merge); ret = fsnotify_add_event(group, fsn_event, inotify_merge, NULL);
if (ret) { if (ret) {
/* Our event wasn't used in the end. Free it. */ /* Our event wasn't used in the end. Free it. */
fsnotify_destroy_event(group, fsn_event); fsnotify_destroy_event(group, fsn_event);
......
...@@ -68,16 +68,22 @@ void fsnotify_destroy_event(struct fsnotify_group *group, ...@@ -68,16 +68,22 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
} }
/* /*
* Add an event to the group notification queue. The group can later pull this * Try to add an event to the notification queue.
* event off the queue to deal with. The function returns 0 if the event was * The group can later pull this event off the queue to deal with.
* added to the queue, 1 if the event was merged with some other queued event, * The group can use the @merge hook to merge the event with a queued event.
* The group can use the @insert hook to insert the event into hash table.
* The function returns:
* 0 if the event was added to a queue
* 1 if the event was merged with some other queued event
* 2 if the event was not queued - either the queue of events has overflown * 2 if the event was not queued - either the queue of events has overflown
* or the group is shutting down. * or the group is shutting down.
*/ */
int fsnotify_add_event(struct fsnotify_group *group, int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event, struct fsnotify_event *event,
int (*merge)(struct list_head *, int (*merge)(struct fsnotify_group *,
struct fsnotify_event *)) struct fsnotify_event *),
void (*insert)(struct fsnotify_group *,
struct fsnotify_event *))
{ {
int ret = 0; int ret = 0;
struct list_head *list = &group->notification_list; struct list_head *list = &group->notification_list;
...@@ -104,7 +110,7 @@ int fsnotify_add_event(struct fsnotify_group *group, ...@@ -104,7 +110,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
} }
if (!list_empty(list) && merge) { if (!list_empty(list) && merge) {
ret = merge(list, event); ret = merge(group, event);
if (ret) { if (ret) {
spin_unlock(&group->notification_lock); spin_unlock(&group->notification_lock);
return ret; return ret;
...@@ -114,6 +120,8 @@ int fsnotify_add_event(struct fsnotify_group *group, ...@@ -114,6 +120,8 @@ int fsnotify_add_event(struct fsnotify_group *group,
queue: queue:
group->q_len++; group->q_len++;
list_add_tail(&event->list, list); list_add_tail(&event->list, list);
if (insert)
insert(group, event);
spin_unlock(&group->notification_lock); spin_unlock(&group->notification_lock);
wake_up(&group->notification_waitq); wake_up(&group->notification_waitq);
......
...@@ -233,6 +233,8 @@ struct fsnotify_group { ...@@ -233,6 +233,8 @@ struct fsnotify_group {
#endif #endif
#ifdef CONFIG_FANOTIFY #ifdef CONFIG_FANOTIFY
struct fanotify_group_private_data { struct fanotify_group_private_data {
/* Hash table of events for merge */
struct hlist_head *merge_hash;
/* allows a group to block waiting for a userspace response */ /* allows a group to block waiting for a userspace response */
struct list_head access_list; struct list_head access_list;
wait_queue_head_t access_waitq; wait_queue_head_t access_waitq;
...@@ -486,12 +488,14 @@ extern void fsnotify_destroy_event(struct fsnotify_group *group, ...@@ -486,12 +488,14 @@ extern void fsnotify_destroy_event(struct fsnotify_group *group,
/* attach the event to the group notification queue */ /* attach the event to the group notification queue */
extern int fsnotify_add_event(struct fsnotify_group *group, extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event, struct fsnotify_event *event,
int (*merge)(struct list_head *, int (*merge)(struct fsnotify_group *,
struct fsnotify_event *)); struct fsnotify_event *),
void (*insert)(struct fsnotify_group *,
struct fsnotify_event *));
/* Queue overflow event to a notification group */ /* Queue overflow event to a notification group */
static inline void fsnotify_queue_overflow(struct fsnotify_group *group) static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
{ {
fsnotify_add_event(group, group->overflow_event, NULL); fsnotify_add_event(group, group->overflow_event, NULL, NULL);
} }
static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment