1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/fsnotify_backend.h>
4 #include <linux/namei.h>
5 #include <linux/mount.h>
6 #include <linux/kthread.h>
7 #include <linux/refcount.h>
8 #include <linux/slab.h>
16 struct audit_chunk *root;
17 struct list_head chunks;
18 struct list_head rules;
19 struct list_head list;
20 struct list_head same_root;
26 struct list_head hash;
28 struct fsnotify_mark *mark;
29 struct list_head trees; /* with root here */
34 struct list_head list;
35 struct audit_tree *owner;
36 unsigned index; /* index; upper bit indicates 'will prune' */
40 struct audit_tree_mark {
41 struct fsnotify_mark mark;
42 struct audit_chunk *chunk;
45 static LIST_HEAD(tree_list);
46 static LIST_HEAD(prune_list);
47 static struct task_struct *prune_thread;
50 * One struct chunk is attached to each inode of interest through
51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
52 * untagging, the mark is stable as long as there is chunk attached. The
53 * association between mark and chunk is protected by hash_lock and
54 * audit_tree_group->mark_mutex. Thus as long as we hold
55 * audit_tree_group->mark_mutex and check that the mark is alive by
56 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
59 * Rules have pointer to struct audit_tree.
60 * Rules have struct list_head rlist forming a list of rules over
62 * References to struct chunk are collected at audit_inode{,_child}()
63 * time and used in AUDIT_TREE rule matching.
64 * These references are dropped at the same time we are calling
65 * audit_free_names(), etc.
67 * Cyclic lists galore:
68 * tree.chunks anchors chunk.owners[].list hash_lock
69 * tree.rules anchors rule.rlist audit_filter_mutex
70 * chunk.trees anchors tree.same_root hash_lock
71 * chunk.hash is a hash with middle bits of watch.inode as
72 * a hash function. RCU, hash_lock
74 * tree is refcounted; one reference for "some rules on rules_list refer to
75 * it", one for each chunk with pointer to it.
77 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
78 * one chunk reference. This reference is dropped either when a mark is going
79 * to be freed (corresponding inode goes away) or when chunk attached to the
80 * mark gets replaced. This reference must be dropped using
81 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
82 * grace period as it protects RCU readers of the hash table.
84 * node.index allows to get from node.list to containing chunk.
85 * MSB of that sucker is stolen to mark taggings that we might have to
86 * revert - several operations have very unpleasant cleanup logics and
87 * that makes a difference. Some.
90 static struct fsnotify_group *audit_tree_group;
91 static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
93 static struct audit_tree *alloc_tree(const char *s)
95 struct audit_tree *tree;
97 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
99 refcount_set(&tree->count, 1);
101 INIT_LIST_HEAD(&tree->chunks);
102 INIT_LIST_HEAD(&tree->rules);
103 INIT_LIST_HEAD(&tree->list);
104 INIT_LIST_HEAD(&tree->same_root);
106 strcpy(tree->pathname, s);
111 static inline void get_tree(struct audit_tree *tree)
113 refcount_inc(&tree->count);
116 static inline void put_tree(struct audit_tree *tree)
118 if (refcount_dec_and_test(&tree->count))
119 kfree_rcu(tree, head);
122 /* to avoid bringing the entire thing in audit.h */
123 const char *audit_tree_path(struct audit_tree *tree)
125 return tree->pathname;
128 static void free_chunk(struct audit_chunk *chunk)
132 for (i = 0; i < chunk->count; i++) {
133 if (chunk->owners[i].owner)
134 put_tree(chunk->owners[i].owner);
139 void audit_put_chunk(struct audit_chunk *chunk)
141 if (atomic_long_dec_and_test(&chunk->refs))
145 static void __put_chunk(struct rcu_head *rcu)
147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
148 audit_put_chunk(chunk);
152 * Drop reference to the chunk that was held by the mark. This is the reference
153 * that gets dropped after we've removed the chunk from the hash table and we
154 * use it to make sure chunk cannot be freed before RCU grace period expires.
156 static void audit_mark_put_chunk(struct audit_chunk *chunk)
158 call_rcu(&chunk->head, __put_chunk);
161 static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
163 return container_of(mark, struct audit_tree_mark, mark);
166 static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
168 return audit_mark(mark)->chunk;
171 static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
173 kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
176 static struct fsnotify_mark *alloc_mark(void)
178 struct audit_tree_mark *amark;
180 amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
183 fsnotify_init_mark(&amark->mark, audit_tree_group);
184 amark->mark.mask = FS_IN_IGNORED;
188 static struct audit_chunk *alloc_chunk(int count)
190 struct audit_chunk *chunk;
193 chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL);
197 INIT_LIST_HEAD(&chunk->hash);
198 INIT_LIST_HEAD(&chunk->trees);
199 chunk->count = count;
200 atomic_long_set(&chunk->refs, 1);
201 for (i = 0; i < count; i++) {
202 INIT_LIST_HEAD(&chunk->owners[i].list);
203 chunk->owners[i].index = i;
208 enum {HASH_SIZE = 128};
209 static struct list_head chunk_hash_heads[HASH_SIZE];
210 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
212 /* Function to return search key in our hash from inode. */
213 static unsigned long inode_to_key(const struct inode *inode)
215 /* Use address pointed to by connector->obj as the key */
216 return (unsigned long)&inode->i_fsnotify_marks;
219 static inline struct list_head *chunk_hash(unsigned long key)
221 unsigned long n = key / L1_CACHE_BYTES;
222 return chunk_hash_heads + n % HASH_SIZE;
225 /* hash_lock & mark->group->mark_mutex is held by caller */
226 static void insert_hash(struct audit_chunk *chunk)
228 struct list_head *list;
231 * Make sure chunk is fully initialized before making it visible in the
232 * hash. Pairs with a data dependency barrier in READ_ONCE() in
233 * audit_tree_lookup().
236 WARN_ON_ONCE(!chunk->key);
237 list = chunk_hash(chunk->key);
238 list_add_rcu(&chunk->hash, list);
241 /* called under rcu_read_lock */
242 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
244 unsigned long key = inode_to_key(inode);
245 struct list_head *list = chunk_hash(key);
246 struct audit_chunk *p;
248 list_for_each_entry_rcu(p, list, hash) {
250 * We use a data dependency barrier in READ_ONCE() to make sure
251 * the chunk we see is fully initialized.
253 if (READ_ONCE(p->key) == key) {
254 atomic_long_inc(&p->refs);
261 bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
264 for (n = 0; n < chunk->count; n++)
265 if (chunk->owners[n].owner == tree)
270 /* tagging and untagging inodes with trees */
272 static struct audit_chunk *find_chunk(struct node *p)
274 int index = p->index & ~(1U<<31);
276 return container_of(p, struct audit_chunk, owners[0]);
279 static void replace_mark_chunk(struct fsnotify_mark *mark,
280 struct audit_chunk *chunk)
282 struct audit_chunk *old;
284 assert_spin_locked(&hash_lock);
285 old = mark_chunk(mark);
286 audit_mark(mark)->chunk = chunk;
293 static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
295 struct audit_tree *owner;
299 list_splice_init(&old->trees, &new->trees);
300 list_for_each_entry(owner, &new->trees, same_root)
302 for (i = j = 0; j < old->count; i++, j++) {
303 if (!old->owners[j].owner) {
307 owner = old->owners[j].owner;
308 new->owners[i].owner = owner;
309 new->owners[i].index = old->owners[j].index - j + i;
310 if (!owner) /* result of earlier fallback */
313 list_replace_init(&old->owners[j].list, &new->owners[i].list);
315 replace_mark_chunk(old->mark, new);
317 * Make sure chunk is fully initialized before making it visible in the
318 * hash. Pairs with a data dependency barrier in READ_ONCE() in
319 * audit_tree_lookup().
322 list_replace_rcu(&old->hash, &new->hash);
325 static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
327 struct audit_tree *owner = p->owner;
329 if (owner->root == chunk) {
330 list_del_init(&owner->same_root);
333 list_del_init(&p->list);
338 static int chunk_count_trees(struct audit_chunk *chunk)
343 for (i = 0; i < chunk->count; i++)
344 if (chunk->owners[i].owner)
349 static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
351 struct audit_chunk *new;
354 mutex_lock(&audit_tree_group->mark_mutex);
356 * mark_mutex stabilizes chunk attached to the mark so we can check
357 * whether it didn't change while we've dropped hash_lock.
359 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
360 mark_chunk(mark) != chunk)
363 size = chunk_count_trees(chunk);
365 spin_lock(&hash_lock);
366 list_del_init(&chunk->trees);
367 list_del_rcu(&chunk->hash);
368 replace_mark_chunk(mark, NULL);
369 spin_unlock(&hash_lock);
370 fsnotify_detach_mark(mark);
371 mutex_unlock(&audit_tree_group->mark_mutex);
372 audit_mark_put_chunk(chunk);
373 fsnotify_free_mark(mark);
377 new = alloc_chunk(size);
381 spin_lock(&hash_lock);
383 * This has to go last when updating chunk as once replace_chunk() is
384 * called, new RCU readers can see the new chunk.
386 replace_chunk(new, chunk);
387 spin_unlock(&hash_lock);
388 mutex_unlock(&audit_tree_group->mark_mutex);
389 audit_mark_put_chunk(chunk);
393 mutex_unlock(&audit_tree_group->mark_mutex);
396 /* Call with group->mark_mutex held, releases it */
397 static int create_chunk(struct inode *inode, struct audit_tree *tree)
399 struct fsnotify_mark *mark;
400 struct audit_chunk *chunk = alloc_chunk(1);
403 mutex_unlock(&audit_tree_group->mark_mutex);
409 mutex_unlock(&audit_tree_group->mark_mutex);
414 if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
415 mutex_unlock(&audit_tree_group->mark_mutex);
416 fsnotify_put_mark(mark);
421 spin_lock(&hash_lock);
423 spin_unlock(&hash_lock);
424 fsnotify_detach_mark(mark);
425 mutex_unlock(&audit_tree_group->mark_mutex);
426 fsnotify_free_mark(mark);
427 fsnotify_put_mark(mark);
431 replace_mark_chunk(mark, chunk);
432 chunk->owners[0].index = (1U << 31);
433 chunk->owners[0].owner = tree;
435 list_add(&chunk->owners[0].list, &tree->chunks);
438 list_add(&tree->same_root, &chunk->trees);
440 chunk->key = inode_to_key(inode);
442 * Inserting into the hash table has to go last as once we do that RCU
443 * readers can see the chunk.
446 spin_unlock(&hash_lock);
447 mutex_unlock(&audit_tree_group->mark_mutex);
449 * Drop our initial reference. When mark we point to is getting freed,
450 * we get notification through ->freeing_mark callback and cleanup
451 * chunk pointing to this mark.
453 fsnotify_put_mark(mark);
457 /* the first tagged inode becomes root of tree */
458 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
460 struct fsnotify_mark *mark;
461 struct audit_chunk *chunk, *old;
465 mutex_lock(&audit_tree_group->mark_mutex);
466 mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
468 return create_chunk(inode, tree);
471 * Found mark is guaranteed to be attached and mark_mutex protects mark
472 * from getting detached and thus it makes sure there is chunk attached
475 /* are we already there? */
476 spin_lock(&hash_lock);
477 old = mark_chunk(mark);
478 for (n = 0; n < old->count; n++) {
479 if (old->owners[n].owner == tree) {
480 spin_unlock(&hash_lock);
481 mutex_unlock(&audit_tree_group->mark_mutex);
482 fsnotify_put_mark(mark);
486 spin_unlock(&hash_lock);
488 chunk = alloc_chunk(old->count + 1);
490 mutex_unlock(&audit_tree_group->mark_mutex);
491 fsnotify_put_mark(mark);
495 spin_lock(&hash_lock);
497 spin_unlock(&hash_lock);
498 mutex_unlock(&audit_tree_group->mark_mutex);
499 fsnotify_put_mark(mark);
503 p = &chunk->owners[chunk->count - 1];
504 p->index = (chunk->count - 1) | (1U<<31);
507 list_add(&p->list, &tree->chunks);
510 list_add(&tree->same_root, &chunk->trees);
513 * This has to go last when updating chunk as once replace_chunk() is
514 * called, new RCU readers can see the new chunk.
516 replace_chunk(chunk, old);
517 spin_unlock(&hash_lock);
518 mutex_unlock(&audit_tree_group->mark_mutex);
519 fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
520 audit_mark_put_chunk(old);
525 static void audit_tree_log_remove_rule(struct audit_context *context,
526 struct audit_krule *rule)
528 struct audit_buffer *ab;
532 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
535 audit_log_format(ab, "op=remove_rule dir=");
536 audit_log_untrustedstring(ab, rule->tree->pathname);
537 audit_log_key(ab, rule->filterkey);
538 audit_log_format(ab, " list=%d res=1", rule->listnr);
542 static void kill_rules(struct audit_context *context, struct audit_tree *tree)
544 struct audit_krule *rule, *next;
545 struct audit_entry *entry;
547 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
548 entry = container_of(rule, struct audit_entry, rule);
550 list_del_init(&rule->rlist);
552 /* not a half-baked one */
553 audit_tree_log_remove_rule(context, rule);
555 audit_remove_mark(entry->rule.exe);
557 list_del_rcu(&entry->list);
558 list_del(&entry->rule.list);
559 call_rcu(&entry->rcu, audit_free_rule_rcu);
565 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
566 * chunks. The function expects tagged chunks are all at the beginning of the
569 static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
571 spin_lock(&hash_lock);
572 while (!list_empty(&victim->chunks)) {
574 struct audit_chunk *chunk;
575 struct fsnotify_mark *mark;
577 p = list_first_entry(&victim->chunks, struct node, list);
578 /* have we run out of marked? */
579 if (tagged && !(p->index & (1U<<31)))
581 chunk = find_chunk(p);
583 remove_chunk_node(chunk, p);
584 /* Racing with audit_tree_freeing_mark()? */
587 fsnotify_get_mark(mark);
588 spin_unlock(&hash_lock);
590 untag_chunk(chunk, mark);
591 fsnotify_put_mark(mark);
593 spin_lock(&hash_lock);
595 spin_unlock(&hash_lock);
600 * finish killing struct audit_tree
602 static void prune_one(struct audit_tree *victim)
604 prune_tree_chunks(victim, false);
607 /* trim the uncommitted chunks from tree */
609 static void trim_marked(struct audit_tree *tree)
611 struct list_head *p, *q;
612 spin_lock(&hash_lock);
614 spin_unlock(&hash_lock);
618 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
619 struct node *node = list_entry(p, struct node, list);
621 if (node->index & (1U<<31)) {
623 list_add(p, &tree->chunks);
626 spin_unlock(&hash_lock);
628 prune_tree_chunks(tree, true);
630 spin_lock(&hash_lock);
631 if (!tree->root && !tree->goner) {
633 spin_unlock(&hash_lock);
634 mutex_lock(&audit_filter_mutex);
635 kill_rules(audit_context(), tree);
636 list_del_init(&tree->list);
637 mutex_unlock(&audit_filter_mutex);
640 spin_unlock(&hash_lock);
644 static void audit_schedule_prune(void);
646 /* called with audit_filter_mutex */
647 int audit_remove_tree_rule(struct audit_krule *rule)
649 struct audit_tree *tree;
652 spin_lock(&hash_lock);
653 list_del_init(&rule->rlist);
654 if (list_empty(&tree->rules) && !tree->goner) {
656 list_del_init(&tree->same_root);
658 list_move(&tree->list, &prune_list);
660 spin_unlock(&hash_lock);
661 audit_schedule_prune();
665 spin_unlock(&hash_lock);
671 static int compare_root(struct vfsmount *mnt, void *arg)
673 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
677 void audit_trim_trees(void)
679 struct list_head cursor;
681 mutex_lock(&audit_filter_mutex);
682 list_add(&cursor, &tree_list);
683 while (cursor.next != &tree_list) {
684 struct audit_tree *tree;
686 struct vfsmount *root_mnt;
690 tree = container_of(cursor.next, struct audit_tree, list);
693 list_add(&cursor, &tree->list);
694 mutex_unlock(&audit_filter_mutex);
696 err = kern_path(tree->pathname, 0, &path);
700 root_mnt = collect_mounts(&path);
702 if (IS_ERR(root_mnt))
705 spin_lock(&hash_lock);
706 list_for_each_entry(node, &tree->chunks, list) {
707 struct audit_chunk *chunk = find_chunk(node);
708 /* this could be NULL if the watch is dying else where... */
709 node->index |= 1U<<31;
710 if (iterate_mounts(compare_root,
711 (void *)(chunk->key),
713 node->index &= ~(1U<<31);
715 spin_unlock(&hash_lock);
717 drop_collected_mounts(root_mnt);
720 mutex_lock(&audit_filter_mutex);
723 mutex_unlock(&audit_filter_mutex);
726 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
729 if (pathname[0] != '/' ||
730 rule->listnr != AUDIT_FILTER_EXIT ||
732 rule->inode_f || rule->watch || rule->tree)
734 rule->tree = alloc_tree(pathname);
740 void audit_put_tree(struct audit_tree *tree)
745 static int tag_mount(struct vfsmount *mnt, void *arg)
747 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
751 * That gets run when evict_chunk() ends up needing to kill audit_tree.
752 * Runs from a separate thread.
754 static int prune_tree_thread(void *unused)
757 if (list_empty(&prune_list)) {
758 set_current_state(TASK_INTERRUPTIBLE);
763 mutex_lock(&audit_filter_mutex);
765 while (!list_empty(&prune_list)) {
766 struct audit_tree *victim;
768 victim = list_entry(prune_list.next,
769 struct audit_tree, list);
770 list_del_init(&victim->list);
772 mutex_unlock(&audit_filter_mutex);
776 mutex_lock(&audit_filter_mutex);
779 mutex_unlock(&audit_filter_mutex);
785 static int audit_launch_prune(void)
789 prune_thread = kthread_run(prune_tree_thread, NULL,
791 if (IS_ERR(prune_thread)) {
792 pr_err("cannot start thread audit_prune_tree");
799 /* called with audit_filter_mutex */
800 int audit_add_tree_rule(struct audit_krule *rule)
802 struct audit_tree *seed = rule->tree, *tree;
804 struct vfsmount *mnt;
808 list_for_each_entry(tree, &tree_list, list) {
809 if (!strcmp(seed->pathname, tree->pathname)) {
812 list_add(&rule->rlist, &tree->rules);
817 list_add(&tree->list, &tree_list);
818 list_add(&rule->rlist, &tree->rules);
819 /* do not set rule->tree yet */
820 mutex_unlock(&audit_filter_mutex);
822 if (unlikely(!prune_thread)) {
823 err = audit_launch_prune();
828 err = kern_path(tree->pathname, 0, &path);
831 mnt = collect_mounts(&path);
839 err = iterate_mounts(tag_mount, tree, mnt);
840 drop_collected_mounts(mnt);
844 spin_lock(&hash_lock);
845 list_for_each_entry(node, &tree->chunks, list)
846 node->index &= ~(1U<<31);
847 spin_unlock(&hash_lock);
853 mutex_lock(&audit_filter_mutex);
854 if (list_empty(&rule->rlist)) {
863 mutex_lock(&audit_filter_mutex);
864 list_del_init(&tree->list);
865 list_del_init(&tree->rules);
870 int audit_tag_tree(char *old, char *new)
872 struct list_head cursor, barrier;
874 struct path path1, path2;
875 struct vfsmount *tagged;
878 err = kern_path(new, 0, &path2);
881 tagged = collect_mounts(&path2);
884 return PTR_ERR(tagged);
886 err = kern_path(old, 0, &path1);
888 drop_collected_mounts(tagged);
892 mutex_lock(&audit_filter_mutex);
893 list_add(&barrier, &tree_list);
894 list_add(&cursor, &barrier);
896 while (cursor.next != &tree_list) {
897 struct audit_tree *tree;
900 tree = container_of(cursor.next, struct audit_tree, list);
903 list_add(&cursor, &tree->list);
904 mutex_unlock(&audit_filter_mutex);
906 err = kern_path(tree->pathname, 0, &path2);
908 good_one = path_is_under(&path1, &path2);
914 mutex_lock(&audit_filter_mutex);
918 failed = iterate_mounts(tag_mount, tree, tagged);
921 mutex_lock(&audit_filter_mutex);
925 mutex_lock(&audit_filter_mutex);
926 spin_lock(&hash_lock);
928 list_del(&tree->list);
929 list_add(&tree->list, &tree_list);
931 spin_unlock(&hash_lock);
935 while (barrier.prev != &tree_list) {
936 struct audit_tree *tree;
938 tree = container_of(barrier.prev, struct audit_tree, list);
940 list_del(&tree->list);
941 list_add(&tree->list, &barrier);
942 mutex_unlock(&audit_filter_mutex);
946 spin_lock(&hash_lock);
947 list_for_each_entry(node, &tree->chunks, list)
948 node->index &= ~(1U<<31);
949 spin_unlock(&hash_lock);
955 mutex_lock(&audit_filter_mutex);
959 mutex_unlock(&audit_filter_mutex);
961 drop_collected_mounts(tagged);
966 static void audit_schedule_prune(void)
968 wake_up_process(prune_thread);
972 * ... and that one is done if evict_chunk() decides to delay until the end
973 * of syscall. Runs synchronously.
975 void audit_kill_trees(struct audit_context *context)
977 struct list_head *list = &context->killed_trees;
980 mutex_lock(&audit_filter_mutex);
982 while (!list_empty(list)) {
983 struct audit_tree *victim;
985 victim = list_entry(list->next, struct audit_tree, list);
986 kill_rules(context, victim);
987 list_del_init(&victim->list);
989 mutex_unlock(&audit_filter_mutex);
993 mutex_lock(&audit_filter_mutex);
996 mutex_unlock(&audit_filter_mutex);
1001 * Here comes the stuff asynchronous to auditctl operations
1004 static void evict_chunk(struct audit_chunk *chunk)
1006 struct audit_tree *owner;
1007 struct list_head *postponed = audit_killed_trees();
1011 mutex_lock(&audit_filter_mutex);
1012 spin_lock(&hash_lock);
1013 while (!list_empty(&chunk->trees)) {
1014 owner = list_entry(chunk->trees.next,
1015 struct audit_tree, same_root);
1018 list_del_init(&owner->same_root);
1019 spin_unlock(&hash_lock);
1021 kill_rules(audit_context(), owner);
1022 list_move(&owner->list, &prune_list);
1025 list_move(&owner->list, postponed);
1027 spin_lock(&hash_lock);
1029 list_del_rcu(&chunk->hash);
1030 for (n = 0; n < chunk->count; n++)
1031 list_del_init(&chunk->owners[n].list);
1032 spin_unlock(&hash_lock);
1033 mutex_unlock(&audit_filter_mutex);
1035 audit_schedule_prune();
1038 static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
1039 struct inode *inode, struct inode *dir,
1040 const struct qstr *file_name)
1045 static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1046 struct fsnotify_group *group)
1048 struct audit_chunk *chunk;
1050 mutex_lock(&mark->group->mark_mutex);
1051 spin_lock(&hash_lock);
1052 chunk = mark_chunk(mark);
1053 replace_mark_chunk(mark, NULL);
1054 spin_unlock(&hash_lock);
1055 mutex_unlock(&mark->group->mark_mutex);
1058 audit_mark_put_chunk(chunk);
1062 * We are guaranteed to have at least one reference to the mark from
1063 * either the inode or the caller of fsnotify_destroy_mark().
1065 BUG_ON(refcount_read(&mark->refcnt) < 1);
1068 static const struct fsnotify_ops audit_tree_ops = {
1069 .handle_inode_event = audit_tree_handle_event,
1070 .freeing_mark = audit_tree_freeing_mark,
1071 .free_mark = audit_tree_destroy_watch,
1074 static int __init audit_tree_init(void)
1078 audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1080 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1081 if (IS_ERR(audit_tree_group))
1082 audit_panic("cannot initialize fsnotify group for rectree watches");
1084 for (i = 0; i < HASH_SIZE; i++)
1085 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1089 __initcall(audit_tree_init);