1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * fs/inotify_user.c - inotify support for userspace
6 * John McCutchan <ttb@tentacle.dhs.org>
7 * Robert Love <rml@novell.com>
9 * Copyright (C) 2005 John McCutchan
10 * Copyright 2006 Hewlett-Packard Development Company, L.P.
12 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
13 * inotify was largely rewriten to make use of the fsnotify infrastructure
16 #include <linux/file.h>
17 #include <linux/fs.h> /* struct inode */
18 #include <linux/fsnotify_backend.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* fs_initcall */
21 #include <linux/inotify.h>
22 #include <linux/kernel.h> /* roundup() */
23 #include <linux/namei.h> /* LOOKUP_FOLLOW */
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h> /* struct kmem_cache */
26 #include <linux/syscalls.h>
27 #include <linux/types.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/uaccess.h>
30 #include <linux/poll.h>
31 #include <linux/wait.h>
32 #include <linux/memcontrol.h>
33 #include <linux/security.h>
36 #include "../fdinfo.h"
38 #include <asm/ioctls.h>
41 * An inotify watch requires allocating an inotify_inode_mark structure as
42 * well as pinning the watched inode. Doubling the size of a VFS inode
43 * should be more than enough to cover the additional filesystem inode
46 #define INOTIFY_WATCH_COST (sizeof(struct inotify_inode_mark) + \
47 2 * sizeof(struct inode))
49 /* configurable via /proc/sys/fs/inotify/ */
50 static int inotify_max_queued_events __read_mostly;
52 struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
56 #include <linux/sysctl.h>
58 static long it_zero = 0;
59 static long it_int_max = INT_MAX;
61 struct ctl_table inotify_table[] = {
63 .procname = "max_user_instances",
64 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
65 .maxlen = sizeof(long),
67 .proc_handler = proc_doulongvec_minmax,
69 .extra2 = &it_int_max,
72 .procname = "max_user_watches",
73 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
74 .maxlen = sizeof(long),
76 .proc_handler = proc_doulongvec_minmax,
78 .extra2 = &it_int_max,
81 .procname = "max_queued_events",
82 .data = &inotify_max_queued_events,
83 .maxlen = sizeof(int),
85 .proc_handler = proc_dointvec_minmax,
90 #endif /* CONFIG_SYSCTL */
92 static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg)
97 * Everything should accept their own ignored and should receive events
98 * when the inode is unmounted. All directories care about children.
100 mask = (FS_IN_IGNORED | FS_UNMOUNT);
101 if (S_ISDIR(inode->i_mode))
102 mask |= FS_EVENT_ON_CHILD;
104 /* mask off the flags used to open the fd */
105 mask |= (arg & INOTIFY_USER_MASK);
110 static inline u32 inotify_mask_to_arg(__u32 mask)
112 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
116 /* intofiy userspace file descriptor functions */
117 static __poll_t inotify_poll(struct file *file, poll_table *wait)
119 struct fsnotify_group *group = file->private_data;
122 poll_wait(file, &group->notification_waitq, wait);
123 spin_lock(&group->notification_lock);
124 if (!fsnotify_notify_queue_is_empty(group))
125 ret = EPOLLIN | EPOLLRDNORM;
126 spin_unlock(&group->notification_lock);
131 static int round_event_name_len(struct fsnotify_event *fsn_event)
133 struct inotify_event_info *event;
135 event = INOTIFY_E(fsn_event);
136 if (!event->name_len)
138 return roundup(event->name_len + 1, sizeof(struct inotify_event));
142 * Get an inotify_kernel_event if one exists and is small
143 * enough to fit in "count". Return an error pointer if
146 * Called with the group->notification_lock held.
148 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
151 size_t event_size = sizeof(struct inotify_event);
152 struct fsnotify_event *event;
154 event = fsnotify_peek_first_event(group);
158 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
160 event_size += round_event_name_len(event);
161 if (event_size > count)
162 return ERR_PTR(-EINVAL);
164 /* held the notification_lock the whole time, so this is the
165 * same event we peeked above */
166 fsnotify_remove_first_event(group);
172 * Copy an event to user space, returning how much we copied.
174 * We already checked that the event size is smaller than the
175 * buffer we had in "get_one_event()" above.
177 static ssize_t copy_event_to_user(struct fsnotify_group *group,
178 struct fsnotify_event *fsn_event,
181 struct inotify_event inotify_event;
182 struct inotify_event_info *event;
183 size_t event_size = sizeof(struct inotify_event);
187 pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
189 event = INOTIFY_E(fsn_event);
190 name_len = event->name_len;
192 * round up name length so it is a multiple of event_size
193 * plus an extra byte for the terminating '\0'.
195 pad_name_len = round_event_name_len(fsn_event);
196 inotify_event.len = pad_name_len;
197 inotify_event.mask = inotify_mask_to_arg(event->mask);
198 inotify_event.wd = event->wd;
199 inotify_event.cookie = event->sync_cookie;
201 /* send the main event */
202 if (copy_to_user(buf, &inotify_event, event_size))
208 * fsnotify only stores the pathname, so here we have to send the pathname
209 * and then pad that pathname out to a multiple of sizeof(inotify_event)
213 /* copy the path name */
214 if (copy_to_user(buf, event->name, name_len))
218 /* fill userspace with 0's */
219 if (clear_user(buf, pad_name_len - name_len))
221 event_size += pad_name_len;
227 static ssize_t inotify_read(struct file *file, char __user *buf,
228 size_t count, loff_t *pos)
230 struct fsnotify_group *group;
231 struct fsnotify_event *kevent;
234 DEFINE_WAIT_FUNC(wait, woken_wake_function);
237 group = file->private_data;
239 add_wait_queue(&group->notification_waitq, &wait);
241 spin_lock(&group->notification_lock);
242 kevent = get_one_event(group, count);
243 spin_unlock(&group->notification_lock);
245 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
248 ret = PTR_ERR(kevent);
251 ret = copy_event_to_user(group, kevent, buf);
252 fsnotify_destroy_event(group, kevent);
261 if (file->f_flags & O_NONBLOCK)
264 if (signal_pending(current))
270 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
272 remove_wait_queue(&group->notification_waitq, &wait);
274 if (start != buf && ret != -EFAULT)
279 static int inotify_release(struct inode *ignored, struct file *file)
281 struct fsnotify_group *group = file->private_data;
283 pr_debug("%s: group=%p\n", __func__, group);
285 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
286 fsnotify_destroy_group(group);
291 static long inotify_ioctl(struct file *file, unsigned int cmd,
294 struct fsnotify_group *group;
295 struct fsnotify_event *fsn_event;
300 group = file->private_data;
301 p = (void __user *) arg;
303 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
307 spin_lock(&group->notification_lock);
308 list_for_each_entry(fsn_event, &group->notification_list,
310 send_len += sizeof(struct inotify_event);
311 send_len += round_event_name_len(fsn_event);
313 spin_unlock(&group->notification_lock);
314 ret = put_user(send_len, (int __user *) p);
316 #ifdef CONFIG_CHECKPOINT_RESTORE
317 case INOTIFY_IOC_SETNEXTWD:
319 if (arg >= 1 && arg <= INT_MAX) {
320 struct inotify_group_private_data *data;
322 data = &group->inotify_data;
323 spin_lock(&data->idr_lock);
324 idr_set_cursor(&data->idr, (unsigned int)arg);
325 spin_unlock(&data->idr_lock);
329 #endif /* CONFIG_CHECKPOINT_RESTORE */
335 static const struct file_operations inotify_fops = {
336 .show_fdinfo = inotify_show_fdinfo,
337 .poll = inotify_poll,
338 .read = inotify_read,
339 .fasync = fsnotify_fasync,
340 .release = inotify_release,
341 .unlocked_ioctl = inotify_ioctl,
342 .compat_ioctl = inotify_ioctl,
343 .llseek = noop_llseek,
348 * find_inode - resolve a user-given path to a specific inode
350 static int inotify_find_inode(const char __user *dirname, struct path *path,
351 unsigned int flags, __u64 mask)
355 error = user_path_at(AT_FDCWD, dirname, flags, path);
358 /* you can only watch an inode if you have read permissions on it */
359 error = path_permission(path, MAY_READ);
364 error = security_path_notify(path, mask,
365 FSNOTIFY_OBJ_TYPE_INODE);
372 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
373 struct inotify_inode_mark *i_mark)
377 idr_preload(GFP_KERNEL);
380 ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
382 /* we added the mark to the idr, take a reference */
384 fsnotify_get_mark(&i_mark->fsn_mark);
387 spin_unlock(idr_lock);
389 return ret < 0 ? ret : 0;
392 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
395 struct idr *idr = &group->inotify_data.idr;
396 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
397 struct inotify_inode_mark *i_mark;
399 assert_spin_locked(idr_lock);
401 i_mark = idr_find(idr, wd);
403 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
405 fsnotify_get_mark(fsn_mark);
406 /* One ref for being in the idr, one ref we just took */
407 BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
413 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
416 struct inotify_inode_mark *i_mark;
417 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
420 i_mark = inotify_idr_find_locked(group, wd);
421 spin_unlock(idr_lock);
427 * Remove the mark from the idr (if present) and drop the reference
428 * on the mark because it was in the idr.
430 static void inotify_remove_from_idr(struct fsnotify_group *group,
431 struct inotify_inode_mark *i_mark)
433 struct idr *idr = &group->inotify_data.idr;
434 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
435 struct inotify_inode_mark *found_i_mark = NULL;
442 * does this i_mark think it is in the idr? we shouldn't get called
446 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
447 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
451 /* Lets look in the idr to see if we find it */
452 found_i_mark = inotify_idr_find_locked(group, wd);
453 if (unlikely(!found_i_mark)) {
454 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
455 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
460 * We found an mark in the idr at the right wd, but it's
461 * not the mark we were told to remove. eparis seriously
462 * fucked up somewhere.
464 if (unlikely(found_i_mark != i_mark)) {
465 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
466 "found_i_mark=%p found_i_mark->wd=%d "
467 "found_i_mark->group=%p\n", __func__, i_mark,
468 i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
469 found_i_mark->wd, found_i_mark->fsn_mark.group);
474 * One ref for being in the idr
475 * one ref grabbed by inotify_idr_find
477 if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
478 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
479 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
480 /* we can't really recover with bad ref cnting.. */
485 /* Removed from the idr, drop that ref. */
486 fsnotify_put_mark(&i_mark->fsn_mark);
489 spin_unlock(idr_lock);
490 /* match the ref taken by inotify_idr_find_locked() */
492 fsnotify_put_mark(&found_i_mark->fsn_mark);
496 * Send IN_IGNORED for this wd, remove this wd from the idr.
498 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
499 struct fsnotify_group *group)
501 struct inotify_inode_mark *i_mark;
503 /* Queue ignore event for the watch */
504 inotify_handle_inode_event(fsn_mark, FS_IN_IGNORED, NULL, NULL, NULL,
507 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
508 /* remove this mark from the idr */
509 inotify_remove_from_idr(group, i_mark);
511 dec_inotify_watches(group->inotify_data.ucounts);
514 static int inotify_update_existing_watch(struct fsnotify_group *group,
518 struct fsnotify_mark *fsn_mark;
519 struct inotify_inode_mark *i_mark;
520 __u32 old_mask, new_mask;
522 int add = (arg & IN_MASK_ADD);
523 int create = (arg & IN_MASK_CREATE);
526 mask = inotify_arg_to_mask(inode, arg);
528 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
536 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
538 spin_lock(&fsn_mark->lock);
539 old_mask = fsn_mark->mask;
541 fsn_mark->mask |= mask;
543 fsn_mark->mask = mask;
544 new_mask = fsn_mark->mask;
545 spin_unlock(&fsn_mark->lock);
547 if (old_mask != new_mask) {
548 /* more bits in old than in new? */
549 int dropped = (old_mask & ~new_mask);
550 /* more bits in this fsn_mark than the inode's mask? */
551 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
553 /* update the inode with this new fsn_mark */
554 if (dropped || do_inode)
555 fsnotify_recalc_mask(inode->i_fsnotify_marks);
563 /* match the get from fsnotify_find_mark() */
564 fsnotify_put_mark(fsn_mark);
569 static int inotify_new_watch(struct fsnotify_group *group,
573 struct inotify_inode_mark *tmp_i_mark;
576 struct idr *idr = &group->inotify_data.idr;
577 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
579 mask = inotify_arg_to_mask(inode, arg);
581 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
582 if (unlikely(!tmp_i_mark))
585 fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
586 tmp_i_mark->fsn_mark.mask = mask;
589 ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
593 /* increment the number of watches the user has */
594 if (!inc_inotify_watches(group->inotify_data.ucounts)) {
595 inotify_remove_from_idr(group, tmp_i_mark);
600 /* we are on the idr, now get on the inode */
601 ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
603 /* we failed to get on the inode, get off the idr */
604 inotify_remove_from_idr(group, tmp_i_mark);
609 /* return the watch descriptor for this new mark */
610 ret = tmp_i_mark->wd;
613 /* match the ref from fsnotify_init_mark() */
614 fsnotify_put_mark(&tmp_i_mark->fsn_mark);
619 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
623 mutex_lock(&group->mark_mutex);
624 /* try to update and existing watch with the new arg */
625 ret = inotify_update_existing_watch(group, inode, arg);
626 /* no mark present, try to add a new one */
628 ret = inotify_new_watch(group, inode, arg);
629 mutex_unlock(&group->mark_mutex);
634 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
636 struct fsnotify_group *group;
637 struct inotify_event_info *oevent;
639 group = fsnotify_alloc_user_group(&inotify_fsnotify_ops);
643 oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL_ACCOUNT);
644 if (unlikely(!oevent)) {
645 fsnotify_destroy_group(group);
646 return ERR_PTR(-ENOMEM);
648 group->overflow_event = &oevent->fse;
649 fsnotify_init_event(group->overflow_event);
650 oevent->mask = FS_Q_OVERFLOW;
652 oevent->sync_cookie = 0;
653 oevent->name_len = 0;
655 group->max_events = max_events;
656 group->memcg = get_mem_cgroup_from_mm(current->mm);
658 spin_lock_init(&group->inotify_data.idr_lock);
659 idr_init(&group->inotify_data.idr);
660 group->inotify_data.ucounts = inc_ucount(current_user_ns(),
662 UCOUNT_INOTIFY_INSTANCES);
664 if (!group->inotify_data.ucounts) {
665 fsnotify_destroy_group(group);
666 return ERR_PTR(-EMFILE);
673 /* inotify syscalls */
674 static int do_inotify_init(int flags)
676 struct fsnotify_group *group;
679 /* Check the IN_* constants for consistency. */
680 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
681 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
683 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
686 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
687 group = inotify_new_group(inotify_max_queued_events);
689 return PTR_ERR(group);
691 ret = anon_inode_getfd("inotify", &inotify_fops, group,
694 fsnotify_destroy_group(group);
699 SYSCALL_DEFINE1(inotify_init1, int, flags)
701 return do_inotify_init(flags);
704 SYSCALL_DEFINE0(inotify_init)
706 return do_inotify_init(0);
709 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
712 struct fsnotify_group *group;
720 * We share a lot of code with fs/dnotify. We also share
721 * the bit layout between inotify's IN_* and the fsnotify
722 * FS_*. This check ensures that only the inotify IN_*
723 * bits get passed in and set in watches/events.
725 if (unlikely(mask & ~ALL_INOTIFY_BITS))
728 * Require at least one valid bit set in the mask.
729 * Without _something_ set, we would have no events to
732 if (unlikely(!(mask & ALL_INOTIFY_BITS)))
736 if (unlikely(!f.file))
739 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
740 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
745 /* verify that this is indeed an inotify instance */
746 if (unlikely(f.file->f_op != &inotify_fops)) {
751 if (!(mask & IN_DONT_FOLLOW))
752 flags |= LOOKUP_FOLLOW;
753 if (mask & IN_ONLYDIR)
754 flags |= LOOKUP_DIRECTORY;
756 ret = inotify_find_inode(pathname, &path, flags,
757 (mask & IN_ALL_EVENTS));
761 /* inode held in place by reference to path; group by fget on fd */
762 inode = path.dentry->d_inode;
763 group = f.file->private_data;
765 /* create/update an inode mark */
766 ret = inotify_update_watch(group, inode, mask);
773 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
775 struct fsnotify_group *group;
776 struct inotify_inode_mark *i_mark;
781 if (unlikely(!f.file))
784 /* verify that this is indeed an inotify instance */
785 if (unlikely(f.file->f_op != &inotify_fops))
788 group = f.file->private_data;
790 i_mark = inotify_idr_find(group, wd);
791 if (unlikely(!i_mark))
796 fsnotify_destroy_mark(&i_mark->fsn_mark, group);
798 /* match ref taken by inotify_idr_find */
799 fsnotify_put_mark(&i_mark->fsn_mark);
807 * inotify_user_setup - Our initialization function. Note that we cannot return
808 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
809 * must result in panic().
811 static int __init inotify_user_setup(void)
813 unsigned long watches_max;
818 * Allow up to 1% of addressable memory to be allocated for inotify
819 * watches (per user) limited to the range [8192, 1048576].
821 watches_max = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) /
823 watches_max = clamp(watches_max, 8192UL, 1048576UL);
825 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
826 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
827 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
828 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
829 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
830 BUILD_BUG_ON(IN_OPEN != FS_OPEN);
831 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
832 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
833 BUILD_BUG_ON(IN_CREATE != FS_CREATE);
834 BUILD_BUG_ON(IN_DELETE != FS_DELETE);
835 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
836 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
837 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
838 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
839 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
840 BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
841 BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
842 BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
844 BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22);
846 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
847 SLAB_PANIC|SLAB_ACCOUNT);
849 inotify_max_queued_events = 16384;
850 init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
851 init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = watches_max;
855 fs_initcall(inotify_user_setup);