1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/fanotify.h>
3 #include <linux/fdtable.h>
4 #include <linux/fsnotify_backend.h>
5 #include <linux/init.h>
6 #include <linux/jiffies.h>
7 #include <linux/kernel.h> /* UINT_MAX */
8 #include <linux/mount.h>
9 #include <linux/sched.h>
10 #include <linux/sched/user.h>
11 #include <linux/types.h>
12 #include <linux/wait.h>
13 #include <linux/audit.h>
17 static bool should_merge(struct fsnotify_event *old_fsn,
18 struct fsnotify_event *new_fsn)
20 struct fanotify_event_info *old, *new;
22 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
23 old = FANOTIFY_E(old_fsn);
24 new = FANOTIFY_E(new_fsn);
26 if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
27 old->path.mnt == new->path.mnt &&
28 old->path.dentry == new->path.dentry)
33 /* and the list better be locked by something too! */
34 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
36 struct fsnotify_event *test_event;
38 pr_debug("%s: list=%p event=%p\n", __func__, list, event);
41 * Don't merge a permission event with any other event so that we know
42 * the event structure we have created in fanotify_handle_event() is the
43 * one we should check for permission response.
45 if (fanotify_is_perm_event(event->mask))
48 list_for_each_entry_reverse(test_event, list, list) {
49 if (should_merge(test_event, event)) {
50 test_event->mask |= event->mask;
58 static int fanotify_get_response(struct fsnotify_group *group,
59 struct fanotify_perm_event_info *event,
60 struct fsnotify_iter_info *iter_info)
64 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
66 wait_event(group->fanotify_data.access_waitq, event->response);
68 /* userspace responded, convert to something usable */
69 switch (event->response & ~FAN_AUDIT) {
78 /* Check if the response should be audited */
79 if (event->response & FAN_AUDIT)
80 audit_fanotify(event->response & ~FAN_AUDIT);
84 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
90 static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
91 struct fsnotify_mark *vfsmnt_mark,
93 const void *data, int data_type)
95 __u32 marks_mask, marks_ignored_mask;
96 const struct path *path = data;
98 pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
99 " data_type=%d\n", __func__, inode_mark, vfsmnt_mark,
100 event_mask, data, data_type);
102 /* if we don't have enough info to send an event to userspace say no */
103 if (data_type != FSNOTIFY_EVENT_PATH)
106 /* sorry, fanotify only gives a damn about files and dirs */
107 if (!d_is_reg(path->dentry) &&
108 !d_can_lookup(path->dentry))
111 if (inode_mark && vfsmnt_mark) {
112 marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
113 marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
114 } else if (inode_mark) {
116 * if the event is for a child and this inode doesn't care about
117 * events on the child, don't send it!
119 if ((event_mask & FS_EVENT_ON_CHILD) &&
120 !(inode_mark->mask & FS_EVENT_ON_CHILD))
122 marks_mask = inode_mark->mask;
123 marks_ignored_mask = inode_mark->ignored_mask;
124 } else if (vfsmnt_mark) {
125 marks_mask = vfsmnt_mark->mask;
126 marks_ignored_mask = vfsmnt_mark->ignored_mask;
131 if (d_is_dir(path->dentry) &&
132 !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
135 if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
142 struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
143 struct inode *inode, u32 mask,
144 const struct path *path)
146 struct fanotify_event_info *event;
147 gfp_t gfp = GFP_KERNEL;
150 * For queues with unlimited length lost events are not expected and
151 * can possibly have security implications. Avoid losing events when
154 if (group->max_events == UINT_MAX)
157 if (fanotify_is_perm_event(mask)) {
158 struct fanotify_perm_event_info *pevent;
160 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
163 event = &pevent->fae;
164 pevent->response = 0;
167 event = kmem_cache_alloc(fanotify_event_cachep, gfp);
171 fsnotify_init_event(&event->fse, inode, mask);
172 event->tgid = get_pid(task_tgid(current));
175 path_get(&event->path);
177 event->path.mnt = NULL;
178 event->path.dentry = NULL;
183 static int fanotify_handle_event(struct fsnotify_group *group,
185 struct fsnotify_mark *inode_mark,
186 struct fsnotify_mark *fanotify_mark,
187 u32 mask, const void *data, int data_type,
188 const unsigned char *file_name, u32 cookie,
189 struct fsnotify_iter_info *iter_info)
192 struct fanotify_event_info *event;
193 struct fsnotify_event *fsn_event;
195 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
196 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
197 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
198 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
199 BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
200 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
201 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
202 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
203 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
204 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
206 if (!fanotify_should_send_event(inode_mark, fanotify_mark, mask, data,
210 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
213 if (fanotify_is_perm_event(mask)) {
215 * fsnotify_prepare_user_wait() fails if we race with mark
216 * deletion. Just let the operation pass in that case.
218 if (!fsnotify_prepare_user_wait(iter_info))
222 event = fanotify_alloc_event(group, inode, mask, data);
224 if (unlikely(!event)) {
226 * We don't queue overflow events for permission events as
227 * there the access is denied and so no event is in fact lost.
229 if (!fanotify_is_perm_event(mask))
230 fsnotify_queue_overflow(group);
234 fsn_event = &event->fse;
235 ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
237 /* Permission events shouldn't be merged */
238 BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
239 /* Our event wasn't used in the end. Free it. */
240 fsnotify_destroy_event(group, fsn_event);
243 } else if (fanotify_is_perm_event(mask)) {
244 ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
246 fsnotify_destroy_event(group, fsn_event);
249 if (fanotify_is_perm_event(mask))
250 fsnotify_finish_user_wait(iter_info);
255 static void fanotify_free_group_priv(struct fsnotify_group *group)
257 struct user_struct *user;
259 user = group->fanotify_data.user;
260 atomic_dec(&user->fanotify_listeners);
264 static void fanotify_free_event(struct fsnotify_event *fsn_event)
266 struct fanotify_event_info *event;
268 event = FANOTIFY_E(fsn_event);
269 path_put(&event->path);
270 put_pid(event->tgid);
271 if (fanotify_is_perm_event(fsn_event->mask)) {
272 kmem_cache_free(fanotify_perm_event_cachep,
273 FANOTIFY_PE(fsn_event));
276 kmem_cache_free(fanotify_event_cachep, event);
279 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
281 kmem_cache_free(fanotify_mark_cache, fsn_mark);
284 const struct fsnotify_ops fanotify_fsnotify_ops = {
285 .handle_event = fanotify_handle_event,
286 .free_group_priv = fanotify_free_group_priv,
287 .free_event = fanotify_free_event,
288 .free_mark = fanotify_free_mark,