1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/fanotify.h>
3 #include <linux/fdtable.h>
4 #include <linux/fsnotify_backend.h>
5 #include <linux/init.h>
6 #include <linux/jiffies.h>
7 #include <linux/kernel.h> /* UINT_MAX */
8 #include <linux/mount.h>
9 #include <linux/sched.h>
10 #include <linux/sched/user.h>
11 #include <linux/sched/signal.h>
12 #include <linux/types.h>
13 #include <linux/wait.h>
14 #include <linux/audit.h>
15 #include <linux/sched/mm.h>
16 #include <linux/statfs.h>
20 static bool fanotify_path_equal(struct path *p1, struct path *p2)
22 return p1->mnt == p2->mnt && p1->dentry == p2->dentry;
25 static inline bool fanotify_fsid_equal(__kernel_fsid_t *fsid1,
26 __kernel_fsid_t *fsid2)
28 return fsid1->val[0] == fsid1->val[0] && fsid2->val[1] == fsid2->val[1];
31 static bool fanotify_fh_equal(struct fanotify_fh *fh1,
32 struct fanotify_fh *fh2)
34 if (fh1->type != fh2->type || fh1->len != fh2->len)
37 /* Do not merge events if we failed to encode fh */
38 if (fh1->type == FILEID_INVALID)
42 !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len);
45 static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1,
46 struct fanotify_fid_event *ffe2)
48 /* Do not merge fid events without object fh */
49 if (!ffe1->object_fh.len)
52 return fanotify_fsid_equal(&ffe1->fsid, &ffe2->fsid) &&
53 fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh);
56 static bool should_merge(struct fsnotify_event *old_fsn,
57 struct fsnotify_event *new_fsn)
59 struct fanotify_event *old, *new;
61 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
62 old = FANOTIFY_E(old_fsn);
63 new = FANOTIFY_E(new_fsn);
65 if (old_fsn->objectid != new_fsn->objectid ||
66 old->type != new->type || old->pid != new->pid)
70 case FANOTIFY_EVENT_TYPE_PATH:
71 return fanotify_path_equal(fanotify_event_path(old),
72 fanotify_event_path(new));
73 case FANOTIFY_EVENT_TYPE_FID:
75 * We want to merge many dirent events in the same dir (i.e.
76 * creates/unlinks/renames), but we do not want to merge dirent
77 * events referring to subdirs with dirent events referring to
78 * non subdirs, otherwise, user won't be able to tell from a
79 * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+
80 * unlink pair or rmdir+create pair of events.
82 if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR))
85 return fanotify_fid_event_equal(FANOTIFY_FE(old),
94 /* and the list better be locked by something too! */
95 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
97 struct fsnotify_event *test_event;
98 struct fanotify_event *new;
100 pr_debug("%s: list=%p event=%p\n", __func__, list, event);
101 new = FANOTIFY_E(event);
104 * Don't merge a permission event with any other event so that we know
105 * the event structure we have created in fanotify_handle_event() is the
106 * one we should check for permission response.
108 if (fanotify_is_perm_event(new->mask))
111 list_for_each_entry_reverse(test_event, list, list) {
112 if (should_merge(test_event, event)) {
113 FANOTIFY_E(test_event)->mask |= new->mask;
122 * Wait for response to permission event. The function also takes care of
123 * freeing the permission event (or offloads that in case the wait is canceled
124 * by a signal). The function returns 0 in case access got allowed by userspace,
125 * -EPERM in case userspace disallowed the access, and -ERESTARTSYS in case
126 * the wait got interrupted by a signal.
128 static int fanotify_get_response(struct fsnotify_group *group,
129 struct fanotify_perm_event *event,
130 struct fsnotify_iter_info *iter_info)
134 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
136 ret = wait_event_killable(group->fanotify_data.access_waitq,
137 event->state == FAN_EVENT_ANSWERED);
138 /* Signal pending? */
140 spin_lock(&group->notification_lock);
141 /* Event reported to userspace and no answer yet? */
142 if (event->state == FAN_EVENT_REPORTED) {
143 /* Event will get freed once userspace answers to it */
144 event->state = FAN_EVENT_CANCELED;
145 spin_unlock(&group->notification_lock);
148 /* Event not yet reported? Just remove it. */
149 if (event->state == FAN_EVENT_INIT)
150 fsnotify_remove_queued_event(group, &event->fae.fse);
152 * Event may be also answered in case signal delivery raced
153 * with wakeup. In that case we have nothing to do besides
154 * freeing the event and reporting error.
156 spin_unlock(&group->notification_lock);
160 /* userspace responded, convert to something usable */
161 switch (event->response & ~FAN_AUDIT) {
170 /* Check if the response should be audited */
171 if (event->response & FAN_AUDIT)
172 audit_fanotify(event->response & ~FAN_AUDIT);
174 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
177 fsnotify_destroy_event(group, &event->fae.fse);
183 * This function returns a mask for an event that only contains the flags
184 * that have been specifically requested by the user. Flags that may have
185 * been included within the event mask, but have not been explicitly
186 * requested by the user, will not be present in the returned mask.
188 static u32 fanotify_group_event_mask(struct fsnotify_group *group,
189 struct fsnotify_iter_info *iter_info,
190 u32 event_mask, const void *data,
193 __u32 marks_mask = 0, marks_ignored_mask = 0;
194 __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS;
195 const struct path *path = fsnotify_data_path(data, data_type);
196 struct fsnotify_mark *mark;
199 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
200 __func__, iter_info->report_mask, event_mask, data, data_type);
202 if (!FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
203 /* Do we have path to open a file descriptor? */
206 /* Path type events are only relevant for files and dirs */
207 if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry))
211 fsnotify_foreach_obj_type(type) {
212 if (!fsnotify_iter_should_report_type(iter_info, type))
214 mark = iter_info->marks[type];
216 * If the event is on dir and this mark doesn't care about
217 * events on dir, don't send it!
219 if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR))
223 * If the event is for a child and this mark doesn't care about
224 * events on a child, don't send it!
226 if (event_mask & FS_EVENT_ON_CHILD &&
227 (type != FSNOTIFY_OBJ_TYPE_INODE ||
228 !(mark->mask & FS_EVENT_ON_CHILD)))
231 marks_mask |= mark->mask;
232 marks_ignored_mask |= mark->ignored_mask;
235 test_mask = event_mask & marks_mask & ~marks_ignored_mask;
238 * For dirent modification events (create/delete/move) that do not carry
239 * the child entry name information, we report FAN_ONDIR for mkdir/rmdir
240 * so user can differentiate them from creat/unlink.
242 * For backward compatibility and consistency, do not report FAN_ONDIR
243 * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR
244 * to user in FAN_REPORT_FID mode for all event types.
246 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
247 /* Do not report FAN_ONDIR without any event */
248 if (!(test_mask & ~FAN_ONDIR))
251 user_mask &= ~FAN_ONDIR;
254 return test_mask & user_mask;
257 static void fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode,
260 int dwords, type, bytes = 0;
261 char *ext_buf = NULL;
267 type = exportfs_encode_inode_fh(inode, NULL, &dwords, NULL);
272 if (bytes > FANOTIFY_INLINE_FH_LEN) {
273 /* Treat failure to allocate fh as failure to allocate event */
275 ext_buf = kmalloc(bytes, gfp);
279 *fanotify_fh_ext_buf_ptr(fh) = ext_buf;
283 type = exportfs_encode_inode_fh(inode, buf, &dwords, NULL);
285 if (!type || type == FILEID_INVALID || bytes != dwords << 2)
294 pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n",
297 *fanotify_fh_ext_buf_ptr(fh) = NULL;
298 /* Report the event without a file identifier on encode error */
299 fh->type = FILEID_INVALID;
304 * The inode to use as identifier when reporting fid depends on the event.
305 * Report the modified directory inode on dirent modification events.
306 * Report the "victim" inode otherwise.
308 * FS_ATTRIB reports the child inode even if reported on a watched parent.
309 * FS_CREATE reports the modified dir inode and not the created inode.
311 static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask,
312 const void *data, int data_type)
314 if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS)
317 return (struct inode *)fsnotify_data_inode(data, data_type);
320 struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
321 struct inode *inode, u32 mask,
322 const void *data, int data_type,
323 __kernel_fsid_t *fsid)
325 struct fanotify_event *event = NULL;
326 struct fanotify_fid_event *ffe = NULL;
327 gfp_t gfp = GFP_KERNEL_ACCOUNT;
328 struct inode *id = fanotify_fid_inode(inode, mask, data, data_type);
329 const struct path *path = fsnotify_data_path(data, data_type);
332 * For queues with unlimited length lost events are not expected and
333 * can possibly have security implications. Avoid losing events when
334 * memory is short. For the limited size queues, avoid OOM killer in the
335 * target monitoring memcg as it may have security repercussion.
337 if (group->max_events == UINT_MAX)
340 gfp |= __GFP_RETRY_MAYFAIL;
342 /* Whoever is interested in the event, pays for the allocation. */
343 memalloc_use_memcg(group->memcg);
345 if (fanotify_is_perm_event(mask)) {
346 struct fanotify_perm_event *pevent;
348 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
352 event = &pevent->fae;
353 event->type = FANOTIFY_EVENT_TYPE_PATH_PERM;
354 pevent->response = 0;
355 pevent->state = FAN_EVENT_INIT;
359 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
360 ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp);
365 event->type = FANOTIFY_EVENT_TYPE_FID;
367 struct fanotify_path_event *pevent;
369 pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp);
373 event = &pevent->fae;
374 event->type = FANOTIFY_EVENT_TYPE_PATH;
379 * Use the victim inode instead of the watching inode as the id for
380 * event queue, so event reported on parent is merged with event
381 * reported on child when both directory and child watches exist.
383 fsnotify_init_event(&event->fse, (unsigned long)id);
385 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
386 event->pid = get_pid(task_pid(current));
388 event->pid = get_pid(task_tgid(current));
390 if (fanotify_event_object_fh(event)) {
391 ffe->object_fh.len = 0;
395 fanotify_encode_fh(&ffe->object_fh, id, gfp);
396 } else if (fanotify_event_has_path(event)) {
397 struct path *p = fanotify_event_path(event);
408 memalloc_unuse_memcg();
413 * Get cached fsid of the filesystem containing the object from any connector.
414 * All connectors are supposed to have the same fsid, but we do not verify that
417 static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
420 __kernel_fsid_t fsid = {};
422 fsnotify_foreach_obj_type(type) {
423 struct fsnotify_mark_connector *conn;
425 if (!fsnotify_iter_should_report_type(iter_info, type))
428 conn = READ_ONCE(iter_info->marks[type]->connector);
429 /* Mark is just getting destroyed or created? */
432 if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID))
434 /* Pairs with smp_wmb() in fsnotify_add_mark_list() */
437 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1]))
445 static int fanotify_handle_event(struct fsnotify_group *group,
447 u32 mask, const void *data, int data_type,
448 const struct qstr *file_name, u32 cookie,
449 struct fsnotify_iter_info *iter_info)
452 struct fanotify_event *event;
453 struct fsnotify_event *fsn_event;
454 __kernel_fsid_t fsid = {};
456 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
457 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
458 BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB);
459 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
460 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
461 BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
462 BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO);
463 BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM);
464 BUILD_BUG_ON(FAN_CREATE != FS_CREATE);
465 BUILD_BUG_ON(FAN_DELETE != FS_DELETE);
466 BUILD_BUG_ON(FAN_DIR_MODIFY != FS_DIR_MODIFY);
467 BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF);
468 BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF);
469 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
470 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
471 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
472 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
473 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
474 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
475 BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
477 BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19);
479 mask = fanotify_group_event_mask(group, iter_info, mask, data,
484 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
487 if (fanotify_is_perm_event(mask)) {
489 * fsnotify_prepare_user_wait() fails if we race with mark
490 * deletion. Just let the operation pass in that case.
492 if (!fsnotify_prepare_user_wait(iter_info))
496 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
497 fsid = fanotify_get_fsid(iter_info);
498 /* Racing with mark destruction or creation? */
499 if (!fsid.val[0] && !fsid.val[1])
503 event = fanotify_alloc_event(group, inode, mask, data, data_type,
506 if (unlikely(!event)) {
508 * We don't queue overflow events for permission events as
509 * there the access is denied and so no event is in fact lost.
511 if (!fanotify_is_perm_event(mask))
512 fsnotify_queue_overflow(group);
516 fsn_event = &event->fse;
517 ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
519 /* Permission events shouldn't be merged */
520 BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS);
521 /* Our event wasn't used in the end. Free it. */
522 fsnotify_destroy_event(group, fsn_event);
525 } else if (fanotify_is_perm_event(mask)) {
526 ret = fanotify_get_response(group, FANOTIFY_PERM(event),
530 if (fanotify_is_perm_event(mask))
531 fsnotify_finish_user_wait(iter_info);
536 static void fanotify_free_group_priv(struct fsnotify_group *group)
538 struct user_struct *user;
540 user = group->fanotify_data.user;
541 atomic_dec(&user->fanotify_listeners);
545 static void fanotify_free_path_event(struct fanotify_event *event)
547 path_put(fanotify_event_path(event));
548 kmem_cache_free(fanotify_path_event_cachep, FANOTIFY_PE(event));
551 static void fanotify_free_perm_event(struct fanotify_event *event)
553 path_put(fanotify_event_path(event));
554 kmem_cache_free(fanotify_perm_event_cachep, FANOTIFY_PERM(event));
557 static void fanotify_free_fid_event(struct fanotify_event *event)
559 struct fanotify_fid_event *ffe = FANOTIFY_FE(event);
561 if (fanotify_fh_has_ext_buf(&ffe->object_fh))
562 kfree(fanotify_fh_ext_buf(&ffe->object_fh));
563 kmem_cache_free(fanotify_fid_event_cachep, ffe);
566 static void fanotify_free_event(struct fsnotify_event *fsn_event)
568 struct fanotify_event *event;
570 event = FANOTIFY_E(fsn_event);
572 switch (event->type) {
573 case FANOTIFY_EVENT_TYPE_PATH:
574 fanotify_free_path_event(event);
576 case FANOTIFY_EVENT_TYPE_PATH_PERM:
577 fanotify_free_perm_event(event);
579 case FANOTIFY_EVENT_TYPE_FID:
580 fanotify_free_fid_event(event);
587 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
589 kmem_cache_free(fanotify_mark_cache, fsn_mark);
592 const struct fsnotify_ops fanotify_fsnotify_ops = {
593 .handle_event = fanotify_handle_event,
594 .free_group_priv = fanotify_free_group_priv,
595 .free_event = fanotify_free_event,
596 .free_mark = fanotify_free_mark,