1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/fsnotify_backend.h>
4 #include <linux/namei.h>
5 #include <linux/mount.h>
6 #include <linux/kthread.h>
7 #include <linux/refcount.h>
8 #include <linux/slab.h>
16 struct audit_chunk *root;
17 struct list_head chunks;
18 struct list_head rules;
19 struct list_head list;
20 struct list_head same_root;
26 struct list_head hash;
28 struct fsnotify_mark mark;
29 struct list_head trees; /* with root here */
35 struct list_head list;
36 struct audit_tree *owner;
37 unsigned index; /* index; upper bit indicates 'will prune' */
41 static LIST_HEAD(tree_list);
42 static LIST_HEAD(prune_list);
43 static struct task_struct *prune_thread;
46 * One struct chunk is attached to each inode of interest.
47 * We replace struct chunk on tagging/untagging.
48 * Rules have pointer to struct audit_tree.
49 * Rules have struct list_head rlist forming a list of rules over
51 * References to struct chunk are collected at audit_inode{,_child}()
52 * time and used in AUDIT_TREE rule matching.
53 * These references are dropped at the same time we are calling
54 * audit_free_names(), etc.
56 * Cyclic lists galore:
57 * tree.chunks anchors chunk.owners[].list hash_lock
58 * tree.rules anchors rule.rlist audit_filter_mutex
59 * chunk.trees anchors tree.same_root hash_lock
60 * chunk.hash is a hash with middle bits of watch.inode as
61 * a hash function. RCU, hash_lock
63 * tree is refcounted; one reference for "some rules on rules_list refer to
64 * it", one for each chunk with pointer to it.
66 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
67 * of watch contributes 1 to .refs).
69 * node.index allows to get from node.list to containing chunk.
70 * MSB of that sucker is stolen to mark taggings that we might have to
71 * revert - several operations have very unpleasant cleanup logics and
72 * that makes a difference. Some.
75 static struct fsnotify_group *audit_tree_group;
77 static struct audit_tree *alloc_tree(const char *s)
79 struct audit_tree *tree;
81 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
83 refcount_set(&tree->count, 1);
85 INIT_LIST_HEAD(&tree->chunks);
86 INIT_LIST_HEAD(&tree->rules);
87 INIT_LIST_HEAD(&tree->list);
88 INIT_LIST_HEAD(&tree->same_root);
90 strcpy(tree->pathname, s);
95 static inline void get_tree(struct audit_tree *tree)
97 refcount_inc(&tree->count);
100 static inline void put_tree(struct audit_tree *tree)
102 if (refcount_dec_and_test(&tree->count))
103 kfree_rcu(tree, head);
106 /* to avoid bringing the entire thing in audit.h */
107 const char *audit_tree_path(struct audit_tree *tree)
109 return tree->pathname;
112 static void free_chunk(struct audit_chunk *chunk)
116 for (i = 0; i < chunk->count; i++) {
117 if (chunk->owners[i].owner)
118 put_tree(chunk->owners[i].owner);
123 void audit_put_chunk(struct audit_chunk *chunk)
125 if (atomic_long_dec_and_test(&chunk->refs))
129 static void __put_chunk(struct rcu_head *rcu)
131 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
132 audit_put_chunk(chunk);
135 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
137 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
138 call_rcu(&chunk->head, __put_chunk);
141 static struct audit_chunk *alloc_chunk(int count)
143 struct audit_chunk *chunk;
147 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
148 chunk = kzalloc(size, GFP_KERNEL);
152 INIT_LIST_HEAD(&chunk->hash);
153 INIT_LIST_HEAD(&chunk->trees);
154 chunk->count = count;
155 atomic_long_set(&chunk->refs, 1);
156 for (i = 0; i < count; i++) {
157 INIT_LIST_HEAD(&chunk->owners[i].list);
158 chunk->owners[i].index = i;
160 fsnotify_init_mark(&chunk->mark, audit_tree_group);
161 chunk->mark.mask = FS_IN_IGNORED;
165 enum {HASH_SIZE = 128};
166 static struct list_head chunk_hash_heads[HASH_SIZE];
167 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
169 /* Function to return search key in our hash from inode. */
170 static unsigned long inode_to_key(const struct inode *inode)
172 /* Use address pointed to by connector->obj as the key */
173 return (unsigned long)&inode->i_fsnotify_marks;
176 static inline struct list_head *chunk_hash(unsigned long key)
178 unsigned long n = key / L1_CACHE_BYTES;
179 return chunk_hash_heads + n % HASH_SIZE;
182 /* hash_lock & entry->lock is held by caller */
183 static void insert_hash(struct audit_chunk *chunk)
185 struct list_head *list;
187 if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
189 WARN_ON_ONCE(!chunk->key);
190 list = chunk_hash(chunk->key);
191 list_add_rcu(&chunk->hash, list);
194 /* called under rcu_read_lock */
195 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
197 unsigned long key = inode_to_key(inode);
198 struct list_head *list = chunk_hash(key);
199 struct audit_chunk *p;
201 list_for_each_entry_rcu(p, list, hash) {
203 atomic_long_inc(&p->refs);
210 bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
213 for (n = 0; n < chunk->count; n++)
214 if (chunk->owners[n].owner == tree)
219 /* tagging and untagging inodes with trees */
221 static struct audit_chunk *find_chunk(struct node *p)
223 int index = p->index & ~(1U<<31);
225 return container_of(p, struct audit_chunk, owners[0]);
228 static void untag_chunk(struct node *p)
230 struct audit_chunk *chunk = find_chunk(p);
231 struct fsnotify_mark *entry = &chunk->mark;
232 struct audit_chunk *new = NULL;
233 struct audit_tree *owner;
234 int size = chunk->count - 1;
237 fsnotify_get_mark(entry);
239 spin_unlock(&hash_lock);
242 new = alloc_chunk(size);
244 mutex_lock(&entry->group->mark_mutex);
245 spin_lock(&entry->lock);
247 * mark_mutex protects mark from getting detached and thus also from
248 * mark->connector->obj getting NULL.
250 if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
251 spin_unlock(&entry->lock);
252 mutex_unlock(&entry->group->mark_mutex);
254 fsnotify_put_mark(&new->mark);
262 spin_lock(&hash_lock);
263 list_del_init(&chunk->trees);
264 if (owner->root == chunk)
266 list_del_init(&p->list);
267 list_del_rcu(&chunk->hash);
268 spin_unlock(&hash_lock);
269 spin_unlock(&entry->lock);
270 mutex_unlock(&entry->group->mark_mutex);
271 fsnotify_destroy_mark(entry, audit_tree_group);
278 if (fsnotify_add_mark_locked(&new->mark, entry->connector->obj,
279 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
280 fsnotify_put_mark(&new->mark);
285 spin_lock(&hash_lock);
286 new->key = chunk->key;
287 list_replace_init(&chunk->trees, &new->trees);
288 if (owner->root == chunk) {
289 list_del_init(&owner->same_root);
293 for (i = j = 0; j <= size; i++, j++) {
294 struct audit_tree *s;
295 if (&chunk->owners[j] == p) {
296 list_del_init(&p->list);
300 s = chunk->owners[j].owner;
301 new->owners[i].owner = s;
302 new->owners[i].index = chunk->owners[j].index - j + i;
303 if (!s) /* result of earlier fallback */
306 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
309 list_replace_rcu(&chunk->hash, &new->hash);
310 list_for_each_entry(owner, &new->trees, same_root)
312 spin_unlock(&hash_lock);
313 spin_unlock(&entry->lock);
314 mutex_unlock(&entry->group->mark_mutex);
315 fsnotify_destroy_mark(entry, audit_tree_group);
316 fsnotify_put_mark(&new->mark); /* drop initial reference */
320 // do the best we can
321 spin_lock(&hash_lock);
322 if (owner->root == chunk) {
323 list_del_init(&owner->same_root);
326 list_del_init(&p->list);
329 spin_unlock(&hash_lock);
330 spin_unlock(&entry->lock);
331 mutex_unlock(&entry->group->mark_mutex);
333 fsnotify_put_mark(entry);
334 spin_lock(&hash_lock);
337 static int create_chunk(struct inode *inode, struct audit_tree *tree)
339 struct fsnotify_mark *entry;
340 struct audit_chunk *chunk = alloc_chunk(1);
344 entry = &chunk->mark;
345 if (fsnotify_add_inode_mark(entry, inode, 0)) {
346 fsnotify_put_mark(entry);
350 spin_lock(&entry->lock);
351 spin_lock(&hash_lock);
353 spin_unlock(&hash_lock);
355 spin_unlock(&entry->lock);
356 fsnotify_destroy_mark(entry, audit_tree_group);
357 fsnotify_put_mark(entry);
360 chunk->owners[0].index = (1U << 31);
361 chunk->owners[0].owner = tree;
363 list_add(&chunk->owners[0].list, &tree->chunks);
366 list_add(&tree->same_root, &chunk->trees);
368 chunk->key = inode_to_key(inode);
370 spin_unlock(&hash_lock);
371 spin_unlock(&entry->lock);
372 fsnotify_put_mark(entry); /* drop initial reference */
376 /* the first tagged inode becomes root of tree */
377 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
379 struct fsnotify_mark *old_entry, *chunk_entry;
380 struct audit_tree *owner;
381 struct audit_chunk *chunk, *old;
385 old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
388 return create_chunk(inode, tree);
390 old = container_of(old_entry, struct audit_chunk, mark);
392 /* are we already there? */
393 spin_lock(&hash_lock);
394 for (n = 0; n < old->count; n++) {
395 if (old->owners[n].owner == tree) {
396 spin_unlock(&hash_lock);
397 fsnotify_put_mark(old_entry);
401 spin_unlock(&hash_lock);
403 chunk = alloc_chunk(old->count + 1);
405 fsnotify_put_mark(old_entry);
409 chunk_entry = &chunk->mark;
411 mutex_lock(&old_entry->group->mark_mutex);
412 spin_lock(&old_entry->lock);
414 * mark_mutex protects mark from getting detached and thus also from
415 * mark->connector->obj getting NULL.
417 if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
418 /* old_entry is being shot, lets just lie */
419 spin_unlock(&old_entry->lock);
420 mutex_unlock(&old_entry->group->mark_mutex);
421 fsnotify_put_mark(old_entry);
422 fsnotify_put_mark(&chunk->mark);
426 if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj,
427 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
428 spin_unlock(&old_entry->lock);
429 mutex_unlock(&old_entry->group->mark_mutex);
430 fsnotify_put_mark(chunk_entry);
431 fsnotify_put_mark(old_entry);
435 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
436 spin_lock(&chunk_entry->lock);
437 spin_lock(&hash_lock);
439 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
441 spin_unlock(&hash_lock);
443 spin_unlock(&chunk_entry->lock);
444 spin_unlock(&old_entry->lock);
445 mutex_unlock(&old_entry->group->mark_mutex);
447 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
449 fsnotify_put_mark(chunk_entry);
450 fsnotify_put_mark(old_entry);
453 chunk->key = old->key;
454 list_replace_init(&old->trees, &chunk->trees);
455 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
456 struct audit_tree *s = old->owners[n].owner;
458 p->index = old->owners[n].index;
459 if (!s) /* result of fallback in untag */
462 list_replace_init(&old->owners[n].list, &p->list);
464 p->index = (chunk->count - 1) | (1U<<31);
467 list_add(&p->list, &tree->chunks);
468 list_replace_rcu(&old->hash, &chunk->hash);
469 list_for_each_entry(owner, &chunk->trees, same_root)
474 list_add(&tree->same_root, &chunk->trees);
476 spin_unlock(&hash_lock);
477 spin_unlock(&chunk_entry->lock);
478 spin_unlock(&old_entry->lock);
479 mutex_unlock(&old_entry->group->mark_mutex);
480 fsnotify_destroy_mark(old_entry, audit_tree_group);
481 fsnotify_put_mark(chunk_entry); /* drop initial reference */
482 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
486 static void audit_tree_log_remove_rule(struct audit_krule *rule)
488 struct audit_buffer *ab;
492 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
495 audit_log_format(ab, "op=remove_rule");
496 audit_log_format(ab, " dir=");
497 audit_log_untrustedstring(ab, rule->tree->pathname);
498 audit_log_key(ab, rule->filterkey);
499 audit_log_format(ab, " list=%d res=1", rule->listnr);
503 static void kill_rules(struct audit_tree *tree)
505 struct audit_krule *rule, *next;
506 struct audit_entry *entry;
508 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
509 entry = container_of(rule, struct audit_entry, rule);
511 list_del_init(&rule->rlist);
513 /* not a half-baked one */
514 audit_tree_log_remove_rule(rule);
516 audit_remove_mark(entry->rule.exe);
518 list_del_rcu(&entry->list);
519 list_del(&entry->rule.list);
520 call_rcu(&entry->rcu, audit_free_rule_rcu);
526 * finish killing struct audit_tree
528 static void prune_one(struct audit_tree *victim)
530 spin_lock(&hash_lock);
531 while (!list_empty(&victim->chunks)) {
534 p = list_entry(victim->chunks.next, struct node, list);
538 spin_unlock(&hash_lock);
542 /* trim the uncommitted chunks from tree */
544 static void trim_marked(struct audit_tree *tree)
546 struct list_head *p, *q;
547 spin_lock(&hash_lock);
549 spin_unlock(&hash_lock);
553 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
554 struct node *node = list_entry(p, struct node, list);
556 if (node->index & (1U<<31)) {
558 list_add(p, &tree->chunks);
562 while (!list_empty(&tree->chunks)) {
565 node = list_entry(tree->chunks.next, struct node, list);
567 /* have we run out of marked? */
568 if (!(node->index & (1U<<31)))
573 if (!tree->root && !tree->goner) {
575 spin_unlock(&hash_lock);
576 mutex_lock(&audit_filter_mutex);
578 list_del_init(&tree->list);
579 mutex_unlock(&audit_filter_mutex);
582 spin_unlock(&hash_lock);
586 static void audit_schedule_prune(void);
588 /* called with audit_filter_mutex */
589 int audit_remove_tree_rule(struct audit_krule *rule)
591 struct audit_tree *tree;
594 spin_lock(&hash_lock);
595 list_del_init(&rule->rlist);
596 if (list_empty(&tree->rules) && !tree->goner) {
598 list_del_init(&tree->same_root);
600 list_move(&tree->list, &prune_list);
602 spin_unlock(&hash_lock);
603 audit_schedule_prune();
607 spin_unlock(&hash_lock);
613 static int compare_root(struct vfsmount *mnt, void *arg)
615 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
619 void audit_trim_trees(void)
621 struct list_head cursor;
623 mutex_lock(&audit_filter_mutex);
624 list_add(&cursor, &tree_list);
625 while (cursor.next != &tree_list) {
626 struct audit_tree *tree;
628 struct vfsmount *root_mnt;
632 tree = container_of(cursor.next, struct audit_tree, list);
635 list_add(&cursor, &tree->list);
636 mutex_unlock(&audit_filter_mutex);
638 err = kern_path(tree->pathname, 0, &path);
642 root_mnt = collect_mounts(&path);
644 if (IS_ERR(root_mnt))
647 spin_lock(&hash_lock);
648 list_for_each_entry(node, &tree->chunks, list) {
649 struct audit_chunk *chunk = find_chunk(node);
650 /* this could be NULL if the watch is dying else where... */
651 node->index |= 1U<<31;
652 if (iterate_mounts(compare_root,
653 (void *)(chunk->key),
655 node->index &= ~(1U<<31);
657 spin_unlock(&hash_lock);
659 drop_collected_mounts(root_mnt);
662 mutex_lock(&audit_filter_mutex);
665 mutex_unlock(&audit_filter_mutex);
668 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
671 if (pathname[0] != '/' ||
672 rule->listnr != AUDIT_FILTER_EXIT ||
674 rule->inode_f || rule->watch || rule->tree)
676 rule->tree = alloc_tree(pathname);
682 void audit_put_tree(struct audit_tree *tree)
687 static int tag_mount(struct vfsmount *mnt, void *arg)
689 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
693 * That gets run when evict_chunk() ends up needing to kill audit_tree.
694 * Runs from a separate thread.
696 static int prune_tree_thread(void *unused)
699 if (list_empty(&prune_list)) {
700 set_current_state(TASK_INTERRUPTIBLE);
705 mutex_lock(&audit_filter_mutex);
707 while (!list_empty(&prune_list)) {
708 struct audit_tree *victim;
710 victim = list_entry(prune_list.next,
711 struct audit_tree, list);
712 list_del_init(&victim->list);
714 mutex_unlock(&audit_filter_mutex);
718 mutex_lock(&audit_filter_mutex);
721 mutex_unlock(&audit_filter_mutex);
727 static int audit_launch_prune(void)
731 prune_thread = kthread_run(prune_tree_thread, NULL,
733 if (IS_ERR(prune_thread)) {
734 pr_err("cannot start thread audit_prune_tree");
741 /* called with audit_filter_mutex */
742 int audit_add_tree_rule(struct audit_krule *rule)
744 struct audit_tree *seed = rule->tree, *tree;
746 struct vfsmount *mnt;
750 list_for_each_entry(tree, &tree_list, list) {
751 if (!strcmp(seed->pathname, tree->pathname)) {
754 list_add(&rule->rlist, &tree->rules);
759 list_add(&tree->list, &tree_list);
760 list_add(&rule->rlist, &tree->rules);
761 /* do not set rule->tree yet */
762 mutex_unlock(&audit_filter_mutex);
764 if (unlikely(!prune_thread)) {
765 err = audit_launch_prune();
770 err = kern_path(tree->pathname, 0, &path);
773 mnt = collect_mounts(&path);
781 err = iterate_mounts(tag_mount, tree, mnt);
782 drop_collected_mounts(mnt);
786 spin_lock(&hash_lock);
787 list_for_each_entry(node, &tree->chunks, list)
788 node->index &= ~(1U<<31);
789 spin_unlock(&hash_lock);
795 mutex_lock(&audit_filter_mutex);
796 if (list_empty(&rule->rlist)) {
805 mutex_lock(&audit_filter_mutex);
806 list_del_init(&tree->list);
807 list_del_init(&tree->rules);
812 int audit_tag_tree(char *old, char *new)
814 struct list_head cursor, barrier;
816 struct path path1, path2;
817 struct vfsmount *tagged;
820 err = kern_path(new, 0, &path2);
823 tagged = collect_mounts(&path2);
826 return PTR_ERR(tagged);
828 err = kern_path(old, 0, &path1);
830 drop_collected_mounts(tagged);
834 mutex_lock(&audit_filter_mutex);
835 list_add(&barrier, &tree_list);
836 list_add(&cursor, &barrier);
838 while (cursor.next != &tree_list) {
839 struct audit_tree *tree;
842 tree = container_of(cursor.next, struct audit_tree, list);
845 list_add(&cursor, &tree->list);
846 mutex_unlock(&audit_filter_mutex);
848 err = kern_path(tree->pathname, 0, &path2);
850 good_one = path_is_under(&path1, &path2);
856 mutex_lock(&audit_filter_mutex);
860 failed = iterate_mounts(tag_mount, tree, tagged);
863 mutex_lock(&audit_filter_mutex);
867 mutex_lock(&audit_filter_mutex);
868 spin_lock(&hash_lock);
870 list_del(&tree->list);
871 list_add(&tree->list, &tree_list);
873 spin_unlock(&hash_lock);
877 while (barrier.prev != &tree_list) {
878 struct audit_tree *tree;
880 tree = container_of(barrier.prev, struct audit_tree, list);
882 list_del(&tree->list);
883 list_add(&tree->list, &barrier);
884 mutex_unlock(&audit_filter_mutex);
888 spin_lock(&hash_lock);
889 list_for_each_entry(node, &tree->chunks, list)
890 node->index &= ~(1U<<31);
891 spin_unlock(&hash_lock);
897 mutex_lock(&audit_filter_mutex);
901 mutex_unlock(&audit_filter_mutex);
903 drop_collected_mounts(tagged);
908 static void audit_schedule_prune(void)
910 wake_up_process(prune_thread);
914 * ... and that one is done if evict_chunk() decides to delay until the end
915 * of syscall. Runs synchronously.
917 void audit_kill_trees(struct list_head *list)
920 mutex_lock(&audit_filter_mutex);
922 while (!list_empty(list)) {
923 struct audit_tree *victim;
925 victim = list_entry(list->next, struct audit_tree, list);
927 list_del_init(&victim->list);
929 mutex_unlock(&audit_filter_mutex);
933 mutex_lock(&audit_filter_mutex);
936 mutex_unlock(&audit_filter_mutex);
941 * Here comes the stuff asynchronous to auditctl operations
944 static void evict_chunk(struct audit_chunk *chunk)
946 struct audit_tree *owner;
947 struct list_head *postponed = audit_killed_trees();
955 mutex_lock(&audit_filter_mutex);
956 spin_lock(&hash_lock);
957 while (!list_empty(&chunk->trees)) {
958 owner = list_entry(chunk->trees.next,
959 struct audit_tree, same_root);
962 list_del_init(&owner->same_root);
963 spin_unlock(&hash_lock);
966 list_move(&owner->list, &prune_list);
969 list_move(&owner->list, postponed);
971 spin_lock(&hash_lock);
973 list_del_rcu(&chunk->hash);
974 for (n = 0; n < chunk->count; n++)
975 list_del_init(&chunk->owners[n].list);
976 spin_unlock(&hash_lock);
977 mutex_unlock(&audit_filter_mutex);
979 audit_schedule_prune();
982 static int audit_tree_handle_event(struct fsnotify_group *group,
983 struct inode *to_tell,
984 u32 mask, const void *data, int data_type,
985 const unsigned char *file_name, u32 cookie,
986 struct fsnotify_iter_info *iter_info)
991 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
993 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
998 * We are guaranteed to have at least one reference to the mark from
999 * either the inode or the caller of fsnotify_destroy_mark().
1001 BUG_ON(refcount_read(&entry->refcnt) < 1);
1004 static const struct fsnotify_ops audit_tree_ops = {
1005 .handle_event = audit_tree_handle_event,
1006 .freeing_mark = audit_tree_freeing_mark,
1007 .free_mark = audit_tree_destroy_watch,
1010 static int __init audit_tree_init(void)
1014 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1015 if (IS_ERR(audit_tree_group))
1016 audit_panic("cannot initialize fsnotify group for rectree watches");
1018 for (i = 0; i < HASH_SIZE; i++)
1019 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1023 __initcall(audit_tree_init);