1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
18 #include <asm/ioctls.h>
20 #include "../../mount.h"
21 #include "../fdinfo.h"
24 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
25 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
26 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
29 * All flags that may be specified in parameter event_f_flags of fanotify_init.
31 * Internal and external open flags are stored together in field f_flags of
32 * struct file. Only external open flags shall be allowed in event_f_flags.
33 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
36 #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
37 O_ACCMODE | O_APPEND | O_NONBLOCK | \
38 __O_SYNC | O_DSYNC | O_CLOEXEC | \
39 O_LARGEFILE | O_NOATIME )
41 extern const struct fsnotify_ops fanotify_fsnotify_ops;
43 static struct kmem_cache *fanotify_mark_cache __read_mostly;
44 struct kmem_cache *fanotify_event_cachep __read_mostly;
45 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
48 * Get an fsnotify notification event if one exists and is small
49 * enough to fit in "count". Return an error pointer if the count
50 * is not large enough.
52 * Called with the group->notification_lock held.
54 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
57 assert_spin_locked(&group->notification_lock);
59 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
61 if (fsnotify_notify_queue_is_empty(group))
64 if (FAN_EVENT_METADATA_LEN > count)
65 return ERR_PTR(-EINVAL);
67 /* held the notification_lock the whole time, so this is the
68 * same event we peeked above */
69 return fsnotify_remove_first_event(group);
72 static int create_fd(struct fsnotify_group *group,
73 struct fanotify_event_info *event,
77 struct file *new_file;
79 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
81 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
86 * we need a new file handle for the userspace program so it can read even if it was
87 * originally opened O_WRONLY.
89 /* it's possible this event was an overflow event. in that case dentry and mnt
90 * are NULL; That's fine, just don't call dentry open */
91 if (event->path.dentry && event->path.mnt)
92 new_file = dentry_open(&event->path,
93 group->fanotify_data.f_flags | FMODE_NONOTIFY,
96 new_file = ERR_PTR(-EOVERFLOW);
97 if (IS_ERR(new_file)) {
99 * we still send an event even if we can't open the file. this
100 * can happen when say tasks are gone and we try to open their
101 * /proc files or we try to open a WRONLY file like in sysfs
102 * we just send the errno to userspace since there isn't much
105 put_unused_fd(client_fd);
106 client_fd = PTR_ERR(new_file);
114 static int fill_event_metadata(struct fsnotify_group *group,
115 struct fanotify_event_metadata *metadata,
116 struct fsnotify_event *fsn_event,
120 struct fanotify_event_info *event;
122 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
123 group, metadata, fsn_event);
126 event = container_of(fsn_event, struct fanotify_event_info, fse);
127 metadata->event_len = FAN_EVENT_METADATA_LEN;
128 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
129 metadata->vers = FANOTIFY_METADATA_VERSION;
130 metadata->reserved = 0;
131 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
132 metadata->pid = pid_vnr(event->tgid);
133 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
134 metadata->fd = FAN_NOFD;
136 metadata->fd = create_fd(group, event, file);
137 if (metadata->fd < 0)
144 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
145 static struct fanotify_perm_event_info *dequeue_event(
146 struct fsnotify_group *group, int fd)
148 struct fanotify_perm_event_info *event, *return_e = NULL;
150 spin_lock(&group->notification_lock);
151 list_for_each_entry(event, &group->fanotify_data.access_list,
156 list_del_init(&event->fae.fse.list);
160 spin_unlock(&group->notification_lock);
162 pr_debug("%s: found return_re=%p\n", __func__, return_e);
167 static int process_access_response(struct fsnotify_group *group,
168 struct fanotify_response *response_struct)
170 struct fanotify_perm_event_info *event;
171 int fd = response_struct->fd;
172 int response = response_struct->response;
174 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
177 * make sure the response is valid, if invalid we do nothing and either
178 * userspace can send a valid response or we will clean it up after the
192 event = dequeue_event(group, fd);
196 event->response = response;
197 wake_up(&group->fanotify_data.access_waitq);
203 static ssize_t copy_event_to_user(struct fsnotify_group *group,
204 struct fsnotify_event *event,
207 struct fanotify_event_metadata fanotify_event_metadata;
211 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
213 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
217 fd = fanotify_event_metadata.fd;
219 if (copy_to_user(buf, &fanotify_event_metadata,
220 fanotify_event_metadata.event_len))
223 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
224 if (event->mask & FAN_ALL_PERM_EVENTS)
225 FANOTIFY_PE(event)->fd = fd;
230 return fanotify_event_metadata.event_len;
233 if (fd != FAN_NOFD) {
240 /* intofiy userspace file descriptor functions */
241 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
243 struct fsnotify_group *group = file->private_data;
246 poll_wait(file, &group->notification_waitq, wait);
247 spin_lock(&group->notification_lock);
248 if (!fsnotify_notify_queue_is_empty(group))
249 ret = POLLIN | POLLRDNORM;
250 spin_unlock(&group->notification_lock);
255 static ssize_t fanotify_read(struct file *file, char __user *buf,
256 size_t count, loff_t *pos)
258 struct fsnotify_group *group;
259 struct fsnotify_event *kevent;
262 DEFINE_WAIT_FUNC(wait, woken_wake_function);
265 group = file->private_data;
267 pr_debug("%s: group=%p\n", __func__, group);
269 add_wait_queue(&group->notification_waitq, &wait);
271 spin_lock(&group->notification_lock);
272 kevent = get_one_event(group, count);
273 spin_unlock(&group->notification_lock);
275 if (IS_ERR(kevent)) {
276 ret = PTR_ERR(kevent);
282 if (file->f_flags & O_NONBLOCK)
286 if (signal_pending(current))
292 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
296 ret = copy_event_to_user(group, kevent, buf);
297 if (unlikely(ret == -EOPENSTALE)) {
299 * We cannot report events with stale fd so drop it.
300 * Setting ret to 0 will continue the event loop and
301 * do the right thing if there are no more events to
302 * read (i.e. return bytes read, -EAGAIN or wait).
308 * Permission events get queued to wait for response. Other
309 * events can be destroyed now.
311 if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
312 fsnotify_destroy_event(group, kevent);
314 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
316 FANOTIFY_PE(kevent)->response = FAN_DENY;
317 wake_up(&group->fanotify_data.access_waitq);
319 spin_lock(&group->notification_lock);
320 list_add_tail(&kevent->list,
321 &group->fanotify_data.access_list);
322 spin_unlock(&group->notification_lock);
331 remove_wait_queue(&group->notification_waitq, &wait);
333 if (start != buf && ret != -EFAULT)
338 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
340 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
341 struct fanotify_response response = { .fd = -1, .response = -1 };
342 struct fsnotify_group *group;
345 group = file->private_data;
347 if (count > sizeof(response))
348 count = sizeof(response);
350 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
352 if (copy_from_user(&response, buf, count))
355 ret = process_access_response(group, &response);
365 static int fanotify_release(struct inode *ignored, struct file *file)
367 struct fsnotify_group *group = file->private_data;
369 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
370 struct fanotify_perm_event_info *event, *next;
371 struct fsnotify_event *fsn_event;
374 * Stop new events from arriving in the notification queue. since
375 * userspace cannot use fanotify fd anymore, no event can enter or
376 * leave access_list by now either.
378 fsnotify_group_stop_queueing(group);
381 * Process all permission events on access_list and notification queue
382 * and simulate reply from userspace.
384 spin_lock(&group->notification_lock);
385 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
387 pr_debug("%s: found group=%p event=%p\n", __func__, group,
390 list_del_init(&event->fae.fse.list);
391 event->response = FAN_ALLOW;
395 * Destroy all non-permission events. For permission events just
396 * dequeue them and set the response. They will be freed once the
397 * response is consumed and fanotify_get_response() returns.
399 while (!fsnotify_notify_queue_is_empty(group)) {
400 fsn_event = fsnotify_remove_first_event(group);
401 if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
402 spin_unlock(&group->notification_lock);
403 fsnotify_destroy_event(group, fsn_event);
404 spin_lock(&group->notification_lock);
406 FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
408 spin_unlock(&group->notification_lock);
410 /* Response for all permission events it set, wakeup waiters */
411 wake_up(&group->fanotify_data.access_waitq);
414 /* matches the fanotify_init->fsnotify_alloc_group */
415 fsnotify_destroy_group(group);
420 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
422 struct fsnotify_group *group;
423 struct fsnotify_event *fsn_event;
428 group = file->private_data;
430 p = (void __user *) arg;
434 spin_lock(&group->notification_lock);
435 list_for_each_entry(fsn_event, &group->notification_list, list)
436 send_len += FAN_EVENT_METADATA_LEN;
437 spin_unlock(&group->notification_lock);
438 ret = put_user(send_len, (int __user *) p);
445 static const struct file_operations fanotify_fops = {
446 .show_fdinfo = fanotify_show_fdinfo,
447 .poll = fanotify_poll,
448 .read = fanotify_read,
449 .write = fanotify_write,
451 .release = fanotify_release,
452 .unlocked_ioctl = fanotify_ioctl,
453 .compat_ioctl = fanotify_ioctl,
454 .llseek = noop_llseek,
457 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
459 kmem_cache_free(fanotify_mark_cache, fsn_mark);
462 static int fanotify_find_path(int dfd, const char __user *filename,
463 struct path *path, unsigned int flags)
467 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
468 dfd, filename, flags);
470 if (filename == NULL) {
471 struct fd f = fdget(dfd);
478 if ((flags & FAN_MARK_ONLYDIR) &&
479 !(S_ISDIR(file_inode(f.file)->i_mode))) {
484 *path = f.file->f_path;
488 unsigned int lookup_flags = 0;
490 if (!(flags & FAN_MARK_DONT_FOLLOW))
491 lookup_flags |= LOOKUP_FOLLOW;
492 if (flags & FAN_MARK_ONLYDIR)
493 lookup_flags |= LOOKUP_DIRECTORY;
495 ret = user_path_at(dfd, filename, lookup_flags, path);
500 /* you can only watch an inode if you have read permissions on it */
501 ret = inode_permission(path->dentry->d_inode, MAY_READ);
508 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
515 spin_lock(&fsn_mark->lock);
516 if (!(flags & FAN_MARK_IGNORED_MASK)) {
517 __u32 tmask = fsn_mark->mask & ~mask;
519 if (flags & FAN_MARK_ONDIR)
522 oldmask = fsn_mark->mask;
523 fsnotify_set_mark_mask_locked(fsn_mark, tmask);
525 __u32 tmask = fsn_mark->ignored_mask & ~mask;
526 if (flags & FAN_MARK_ONDIR)
529 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
531 *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
532 spin_unlock(&fsn_mark->lock);
534 return mask & oldmask;
537 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
538 struct vfsmount *mnt, __u32 mask,
541 struct fsnotify_mark *fsn_mark = NULL;
545 mutex_lock(&group->mark_mutex);
546 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
548 mutex_unlock(&group->mark_mutex);
552 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
555 fsnotify_detach_mark(fsn_mark);
556 mutex_unlock(&group->mark_mutex);
558 fsnotify_free_mark(fsn_mark);
560 fsnotify_put_mark(fsn_mark);
561 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
562 fsnotify_recalc_vfsmount_mask(mnt);
567 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
568 struct inode *inode, __u32 mask,
571 struct fsnotify_mark *fsn_mark = NULL;
575 mutex_lock(&group->mark_mutex);
576 fsn_mark = fsnotify_find_inode_mark(group, inode);
578 mutex_unlock(&group->mark_mutex);
582 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
585 fsnotify_detach_mark(fsn_mark);
586 mutex_unlock(&group->mark_mutex);
588 fsnotify_free_mark(fsn_mark);
590 /* matches the fsnotify_find_inode_mark() */
591 fsnotify_put_mark(fsn_mark);
592 if (removed & inode->i_fsnotify_mask)
593 fsnotify_recalc_inode_mask(inode);
598 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
604 spin_lock(&fsn_mark->lock);
605 if (!(flags & FAN_MARK_IGNORED_MASK)) {
606 __u32 tmask = fsn_mark->mask | mask;
608 if (flags & FAN_MARK_ONDIR)
611 oldmask = fsn_mark->mask;
612 fsnotify_set_mark_mask_locked(fsn_mark, tmask);
614 __u32 tmask = fsn_mark->ignored_mask | mask;
615 if (flags & FAN_MARK_ONDIR)
618 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
619 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
620 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
622 spin_unlock(&fsn_mark->lock);
624 return mask & ~oldmask;
627 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
629 struct vfsmount *mnt)
631 struct fsnotify_mark *mark;
634 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
635 return ERR_PTR(-ENOSPC);
637 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
639 return ERR_PTR(-ENOMEM);
641 fsnotify_init_mark(mark, fanotify_free_mark);
642 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
644 fsnotify_put_mark(mark);
652 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
653 struct vfsmount *mnt, __u32 mask,
656 struct fsnotify_mark *fsn_mark;
659 mutex_lock(&group->mark_mutex);
660 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
662 fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
663 if (IS_ERR(fsn_mark)) {
664 mutex_unlock(&group->mark_mutex);
665 return PTR_ERR(fsn_mark);
668 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
669 mutex_unlock(&group->mark_mutex);
671 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
672 fsnotify_recalc_vfsmount_mask(mnt);
674 fsnotify_put_mark(fsn_mark);
678 static int fanotify_add_inode_mark(struct fsnotify_group *group,
679 struct inode *inode, __u32 mask,
682 struct fsnotify_mark *fsn_mark;
685 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
688 * If some other task has this inode open for write we should not add
689 * an ignored mark, unless that ignored mark is supposed to survive
690 * modification changes anyway.
692 if ((flags & FAN_MARK_IGNORED_MASK) &&
693 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
694 (atomic_read(&inode->i_writecount) > 0))
697 mutex_lock(&group->mark_mutex);
698 fsn_mark = fsnotify_find_inode_mark(group, inode);
700 fsn_mark = fanotify_add_new_mark(group, inode, NULL);
701 if (IS_ERR(fsn_mark)) {
702 mutex_unlock(&group->mark_mutex);
703 return PTR_ERR(fsn_mark);
706 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
707 mutex_unlock(&group->mark_mutex);
709 if (added & ~inode->i_fsnotify_mask)
710 fsnotify_recalc_inode_mask(inode);
712 fsnotify_put_mark(fsn_mark);
716 /* fanotify syscalls */
717 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
719 struct fsnotify_group *group;
721 struct user_struct *user;
722 struct fanotify_event_info *oevent;
724 pr_debug("%s: flags=%d event_f_flags=%d\n",
725 __func__, flags, event_f_flags);
727 if (!capable(CAP_SYS_ADMIN))
730 if (flags & ~FAN_ALL_INIT_FLAGS)
733 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
736 switch (event_f_flags & O_ACCMODE) {
745 user = get_current_user();
746 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
751 f_flags = O_RDWR | FMODE_NONOTIFY;
752 if (flags & FAN_CLOEXEC)
753 f_flags |= O_CLOEXEC;
754 if (flags & FAN_NONBLOCK)
755 f_flags |= O_NONBLOCK;
757 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
758 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
761 return PTR_ERR(group);
764 group->fanotify_data.user = user;
765 atomic_inc(&user->fanotify_listeners);
767 oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
768 if (unlikely(!oevent)) {
770 goto out_destroy_group;
772 group->overflow_event = &oevent->fse;
774 if (force_o_largefile())
775 event_f_flags |= O_LARGEFILE;
776 group->fanotify_data.f_flags = event_f_flags;
777 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
778 init_waitqueue_head(&group->fanotify_data.access_waitq);
779 INIT_LIST_HEAD(&group->fanotify_data.access_list);
781 switch (flags & FAN_ALL_CLASS_BITS) {
782 case FAN_CLASS_NOTIF:
783 group->priority = FS_PRIO_0;
785 case FAN_CLASS_CONTENT:
786 group->priority = FS_PRIO_1;
788 case FAN_CLASS_PRE_CONTENT:
789 group->priority = FS_PRIO_2;
793 goto out_destroy_group;
796 if (flags & FAN_UNLIMITED_QUEUE) {
798 if (!capable(CAP_SYS_ADMIN))
799 goto out_destroy_group;
800 group->max_events = UINT_MAX;
802 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
805 if (flags & FAN_UNLIMITED_MARKS) {
807 if (!capable(CAP_SYS_ADMIN))
808 goto out_destroy_group;
809 group->fanotify_data.max_marks = UINT_MAX;
811 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
814 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
816 goto out_destroy_group;
821 fsnotify_destroy_group(group);
825 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
826 __u64, mask, int, dfd,
827 const char __user *, pathname)
829 struct inode *inode = NULL;
830 struct vfsmount *mnt = NULL;
831 struct fsnotify_group *group;
836 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
837 __func__, fanotify_fd, flags, dfd, pathname, mask);
839 /* we only use the lower 32 bits as of right now. */
840 if (mask & ((__u64)0xffffffff << 32))
843 if (flags & ~FAN_ALL_MARK_FLAGS)
845 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
846 case FAN_MARK_ADD: /* fallthrough */
847 case FAN_MARK_REMOVE:
852 if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
859 if (mask & FAN_ONDIR) {
860 flags |= FAN_MARK_ONDIR;
864 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
865 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
867 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
871 f = fdget(fanotify_fd);
872 if (unlikely(!f.file))
875 /* verify that this is indeed an fanotify instance */
877 if (unlikely(f.file->f_op != &fanotify_fops))
879 group = f.file->private_data;
882 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
883 * allowed to set permissions events.
886 if (mask & FAN_ALL_PERM_EVENTS &&
887 group->priority == FS_PRIO_0)
890 if (flags & FAN_MARK_FLUSH) {
892 if (flags & FAN_MARK_MOUNT)
893 fsnotify_clear_vfsmount_marks_by_group(group);
895 fsnotify_clear_inode_marks_by_group(group);
899 ret = fanotify_find_path(dfd, pathname, &path, flags);
903 /* inode held in place by reference to path; group by fget on fd */
904 if (!(flags & FAN_MARK_MOUNT))
905 inode = path.dentry->d_inode;
909 /* create/update an inode mark */
910 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
912 if (flags & FAN_MARK_MOUNT)
913 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
915 ret = fanotify_add_inode_mark(group, inode, mask, flags);
917 case FAN_MARK_REMOVE:
918 if (flags & FAN_MARK_MOUNT)
919 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
921 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
934 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
935 int, fanotify_fd, unsigned int, flags,
936 __u32, mask0, __u32, mask1, int, dfd,
937 const char __user *, pathname)
939 return sys_fanotify_mark(fanotify_fd, flags,
941 ((__u64)mask0 << 32) | mask1,
943 ((__u64)mask1 << 32) | mask0,
950 * fanotify_user_setup - Our initialization function. Note that we cannot return
951 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
952 * must result in panic().
954 static int __init fanotify_user_setup(void)
956 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
957 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
958 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
959 fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
965 device_initcall(fanotify_user_setup);