fanotify.c 4.97 KB
Newer Older
1
#include <linux/fanotify.h>
2 3 4 5
#include <linux/fdtable.h>
#include <linux/fsnotify_backend.h>
#include <linux/init.h>
#include <linux/kernel.h> /* UINT_MAX */
6
#include <linux/mount.h>
7 8
#include <linux/types.h>

9 10 11 12
static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
{
	pr_debug("%s: old=%p new=%p\n", __func__, old, new);

13 14 15
	if (old->to_tell == new->to_tell &&
	    old->data_type == new->data_type &&
	    old->tgid == new->tgid) {
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
		switch (old->data_type) {
		case (FSNOTIFY_EVENT_PATH):
			if ((old->path.mnt == new->path.mnt) &&
			    (old->path.dentry == new->path.dentry))
				return true;
		case (FSNOTIFY_EVENT_NONE):
			return true;
		default:
			BUG();
		};
	}
	return false;
}

static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
{
32
	struct fsnotify_event_holder *test_holder;
33
	struct fsnotify_event *test_event;
34 35
	struct fsnotify_event *new_event;
	int ret = 0;
36 37 38 39 40

	pr_debug("%s: list=%p event=%p\n", __func__, list, event);

	/* and the list better be locked by something too! */

41 42 43 44 45 46 47 48 49
	list_for_each_entry_reverse(test_holder, list, event_list) {
		test_event = test_holder->event;
		if (should_merge(test_event, event)) {
			ret = -EEXIST;

			/* if they are exactly the same we are done */
			if (test_event->mask == event->mask)
				goto out;

50 51 52 53 54 55 56 57 58 59
			/*
			 * if the refcnt == 1 this is the only queue
			 * for this event and so we can update the mask
			 * in place.
			 */
			if (atomic_read(&test_event->refcnt) == 1) {
				test_event->mask |= event->mask;
				goto out;
			}

60 61 62 63 64 65
			/* can't allocate memory, merge was no possible */
			new_event = fsnotify_clone_event(test_event);
			if (unlikely(!new_event)) {
				ret = 0;
				goto out;
			}
66

67 68 69 70 71 72 73 74 75 76 77
			/* build new event and replace it on the list */
			new_event->mask = (test_event->mask | event->mask);
			fsnotify_replace_event(test_holder, new_event);
			/* match ref from fsnotify_clone_event() */
			fsnotify_put_event(new_event);

			break;
		}
	}
out:
	return ret;
78 79
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
static int fanotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
	int ret;


	BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
	BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
	BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
	BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
	BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
	BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
	BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);

	pr_debug("%s: group=%p event=%p\n", __func__, group, event);

95 96 97 98
	ret = fsnotify_add_notify_event(group, event, NULL, fanotify_merge);
	/* -EEXIST means this event was merged with another, not that it was an error */
	if (ret == -EEXIST)
		ret = 0;
99 100 101
	return ret;
}

102
static bool should_send_vfsmount_event(struct fsnotify_group *group, struct vfsmount *mnt,
103
				       struct inode *inode, __u32 mask)
104
{
105 106
	struct fsnotify_mark *mnt_mark;
	struct fsnotify_mark *inode_mark;
107

108 109
	pr_debug("%s: group=%p vfsmount=%p mask=%x\n",
		 __func__, group, mnt, mask);
110

111 112
	mnt_mark = fsnotify_find_vfsmount_mark(group, mnt);
	if (!mnt_mark)
113 114
		return false;

115 116 117 118 119 120 121 122 123 124
	mask &= mnt_mark->mask;
	mask &= ~mnt_mark->ignored_mask;

	if (mask) {
		inode_mark = fsnotify_find_inode_mark(group, inode);
		if (inode_mark) {
			mask &= ~inode_mark->ignored_mask;
			fsnotify_put_mark(inode_mark);
		}
	}
125 126

	/* find took a reference */
127
	fsnotify_put_mark(mnt_mark);
128

129
	return mask;
130 131 132 133 134 135 136 137 138
}

static bool should_send_inode_event(struct fsnotify_group *group, struct inode *inode,
				    __u32 mask)
{
	struct fsnotify_mark *fsn_mark;

	pr_debug("%s: group=%p inode=%p mask=%x\n",
		 __func__, group, inode, mask);
139

140
	fsn_mark = fsnotify_find_inode_mark(group, inode);
141 142 143 144 145 146 147
	if (!fsn_mark)
		return false;

	/* if the event is for a child and this inode doesn't care about
	 * events on the child, don't send it! */
	if ((mask & FS_EVENT_ON_CHILD) &&
	    !(fsn_mark->mask & FS_EVENT_ON_CHILD)) {
148
		mask = 0;
149 150 151 152 153
	} else {
		/*
		 * We care about children, but do we care about this particular
		 * type of event?
		 */
154 155 156
		mask &= ~FS_EVENT_ON_CHILD;
		mask &= fsn_mark->mask;
		mask &= ~fsn_mark->ignored_mask;
157 158 159 160 161
	}

	/* find took a reference */
	fsnotify_put_mark(fsn_mark);

162
	return mask;
163 164
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
static bool fanotify_should_send_event(struct fsnotify_group *group, struct inode *to_tell,
				       struct vfsmount *mnt, __u32 mask, void *data,
				       int data_type)
{
	pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x data=%p data_type=%d\n",
		 __func__, group, to_tell, mnt, mask, data, data_type);

	/* sorry, fanotify only gives a damn about files and dirs */
	if (!S_ISREG(to_tell->i_mode) &&
	    !S_ISDIR(to_tell->i_mode))
		return false;

	/* if we don't have enough info to send an event to userspace say no */
	if (data_type != FSNOTIFY_EVENT_PATH)
		return false;

	if (mnt)
182
		return should_send_vfsmount_event(group, mnt, to_tell, mask);
183 184 185 186
	else
		return should_send_inode_event(group, to_tell, mask);
}

187 188 189 190 191 192 193
const struct fsnotify_ops fanotify_fsnotify_ops = {
	.handle_event = fanotify_handle_event,
	.should_send_event = fanotify_should_send_event,
	.free_group_priv = NULL,
	.free_event_priv = NULL,
	.freeing_mark = NULL,
};