2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/file.h>
13 #include <linux/fs_context.h>
14 #include <linux/sched.h>
15 #include <linux/namei.h>
16 #include <linux/slab.h>
17 #include <linux/xattr.h>
18 #include <linux/iversion.h>
19 #include <linux/posix_acl.h>
21 static void fuse_advise_use_readdirplus(struct inode *dir)
23 struct fuse_inode *fi = get_fuse_inode(dir);
25 set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
28 #if BITS_PER_LONG >= 64
29 static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
31 entry->d_fsdata = (void *) time;
34 static inline u64 fuse_dentry_time(const struct dentry *entry)
36 return (u64)entry->d_fsdata;
45 static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
47 ((union fuse_dentry *) dentry->d_fsdata)->time = time;
50 static inline u64 fuse_dentry_time(const struct dentry *entry)
52 return ((union fuse_dentry *) entry->d_fsdata)->time;
56 static void fuse_dentry_settime(struct dentry *dentry, u64 time)
58 struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
59 bool delete = !time && fc->delete_stale;
61 * Mess with DCACHE_OP_DELETE because dput() will be faster without it.
62 * Don't care about races, either way it's just an optimization
64 if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) ||
65 (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) {
66 spin_lock(&dentry->d_lock);
68 dentry->d_flags &= ~DCACHE_OP_DELETE;
70 dentry->d_flags |= DCACHE_OP_DELETE;
71 spin_unlock(&dentry->d_lock);
74 __fuse_dentry_settime(dentry, time);
78 * FUSE caches dentries and attributes with separate timeout. The
79 * time in jiffies until the dentry/attributes are valid is stored in
80 * dentry->d_fsdata and fuse_inode->i_time respectively.
84 * Calculate the time in jiffies until a dentry/attributes are valid
86 static u64 time_to_jiffies(u64 sec, u32 nsec)
89 struct timespec64 ts = {
91 min_t(u32, nsec, NSEC_PER_SEC - 1)
94 return get_jiffies_64() + timespec64_to_jiffies(&ts);
100 * Set dentry and possibly attribute timeouts from the lookup/mk*
103 void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
105 fuse_dentry_settime(entry,
106 time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
109 static u64 attr_timeout(struct fuse_attr_out *o)
111 return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
114 u64 entry_attr_timeout(struct fuse_entry_out *o)
116 return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
119 static void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
121 set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask);
125 * Mark the attributes as stale, so that at the next call to
126 * ->getattr() they will be fetched from userspace
128 void fuse_invalidate_attr(struct inode *inode)
130 fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS);
133 static void fuse_dir_changed(struct inode *dir)
135 fuse_invalidate_attr(dir);
136 inode_maybe_inc_iversion(dir, false);
140 * Mark the attributes as stale due to an atime change. Avoid the invalidate if
143 void fuse_invalidate_atime(struct inode *inode)
145 if (!IS_RDONLY(inode))
146 fuse_invalidate_attr_mask(inode, STATX_ATIME);
150 * Just mark the entry as stale, so that a next attempt to look it up
151 * will result in a new lookup call to userspace
153 * This is called when a dentry is about to become negative and the
154 * timeout is unknown (unlink, rmdir, rename and in some cases
157 void fuse_invalidate_entry_cache(struct dentry *entry)
159 fuse_dentry_settime(entry, 0);
163 * Same as fuse_invalidate_entry_cache(), but also try to remove the
164 * dentry from the hash
166 static void fuse_invalidate_entry(struct dentry *entry)
169 fuse_invalidate_entry_cache(entry);
172 static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
173 u64 nodeid, const struct qstr *name,
174 struct fuse_entry_out *outarg)
176 memset(outarg, 0, sizeof(struct fuse_entry_out));
177 args->opcode = FUSE_LOOKUP;
178 args->nodeid = nodeid;
179 args->in_numargs = 1;
180 args->in_args[0].size = name->len + 1;
181 args->in_args[0].value = name->name;
182 args->out_numargs = 1;
183 args->out_args[0].size = sizeof(struct fuse_entry_out);
184 args->out_args[0].value = outarg;
188 * Check whether the dentry is still valid
190 * If the entry validity timeout has expired and the dentry is
191 * positive, try to redo the lookup. If the lookup results in a
192 * different inode, then let the VFS invalidate the dentry and redo
193 * the lookup once more. If the lookup results in the same inode,
194 * then refresh the attributes, timeouts and mark the dentry valid.
196 static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
199 struct dentry *parent;
200 struct fuse_mount *fm;
201 struct fuse_inode *fi;
204 inode = d_inode_rcu(entry);
205 if (inode && fuse_is_bad(inode))
207 else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
208 (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) {
209 struct fuse_entry_out outarg;
211 struct fuse_forget_link *forget;
214 /* For negative dentries, always do a fresh lookup */
219 if (flags & LOOKUP_RCU)
222 fm = get_fuse_mount(inode);
224 forget = fuse_alloc_forget();
229 attr_version = fuse_get_attr_version(fm->fc);
231 parent = dget_parent(entry);
232 fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)),
233 &entry->d_name, &outarg);
234 ret = fuse_simple_request(fm, &args);
236 /* Zero nodeid is same as -ENOENT */
237 if (!ret && !outarg.nodeid)
240 fi = get_fuse_inode(inode);
241 if (outarg.nodeid != get_node_id(inode) ||
242 (bool) IS_AUTOMOUNT(inode) != (bool) (outarg.attr.flags & FUSE_ATTR_SUBMOUNT)) {
243 fuse_queue_forget(fm->fc, forget,
247 spin_lock(&fi->lock);
249 spin_unlock(&fi->lock);
252 if (ret == -ENOMEM || ret == -EINTR)
254 if (ret || fuse_invalid_attr(&outarg.attr) ||
255 fuse_stale_inode(inode, outarg.generation, &outarg.attr))
258 forget_all_cached_acls(inode);
259 fuse_change_attributes(inode, &outarg.attr,
260 entry_attr_timeout(&outarg),
262 fuse_change_entry_timeout(entry, &outarg);
264 fi = get_fuse_inode(inode);
265 if (flags & LOOKUP_RCU) {
266 if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
268 } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
269 parent = dget_parent(entry);
270 fuse_advise_use_readdirplus(d_inode(parent));
283 #if BITS_PER_LONG < 64
284 static int fuse_dentry_init(struct dentry *dentry)
286 dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry),
287 GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
289 return dentry->d_fsdata ? 0 : -ENOMEM;
291 static void fuse_dentry_release(struct dentry *dentry)
293 union fuse_dentry *fd = dentry->d_fsdata;
299 static int fuse_dentry_delete(const struct dentry *dentry)
301 return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
305 * Create a fuse_mount object with a new superblock (with path->dentry
306 * as the root), and return that mount so it can be auto-mounted on
309 static struct vfsmount *fuse_dentry_automount(struct path *path)
311 struct fs_context *fsc;
312 struct fuse_mount *parent_fm = get_fuse_mount_super(path->mnt->mnt_sb);
313 struct fuse_conn *fc = parent_fm->fc;
314 struct fuse_mount *fm;
315 struct vfsmount *mnt;
316 struct fuse_inode *mp_fi = get_fuse_inode(d_inode(path->dentry));
317 struct super_block *sb;
320 fsc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
327 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
331 refcount_set(&fm->count, 1);
333 sb = sget_fc(fsc, NULL, set_anon_super_fc);
339 fm->fc = fuse_conn_get(fc);
341 /* Initialize superblock, making @mp_fi its root */
342 err = fuse_fill_super_submount(sb, mp_fi);
346 sb->s_fs_info = NULL;
350 down_write(&fc->killsb);
351 list_add_tail(&fm->fc_entry, &fc->mounts);
352 up_write(&fc->killsb);
354 sb->s_flags |= SB_ACTIVE;
355 fsc->root = dget(sb->s_root);
358 * FIXME: setting SB_BORN requires a write barrier for
359 * super_cache_count(). We should actually come
360 * up with a proper ->get_tree() implementation
361 * for submounts and call vfs_get_tree() to take
362 * care of the write barrier.
365 sb->s_flags |= SB_BORN;
367 /* We are done configuring the superblock, so unlock it */
368 up_write(&sb->s_umount);
370 /* Create the submount */
371 mnt = vfs_create_mount(fsc);
382 * Only jump here when fsc->root is NULL and sb is still locked
383 * (otherwise put_fs_context() will put the superblock)
385 deactivate_locked_super(sb);
392 const struct dentry_operations fuse_dentry_operations = {
393 .d_revalidate = fuse_dentry_revalidate,
394 .d_delete = fuse_dentry_delete,
395 #if BITS_PER_LONG < 64
396 .d_init = fuse_dentry_init,
397 .d_release = fuse_dentry_release,
399 .d_automount = fuse_dentry_automount,
402 const struct dentry_operations fuse_root_dentry_operations = {
403 #if BITS_PER_LONG < 64
404 .d_init = fuse_dentry_init,
405 .d_release = fuse_dentry_release,
409 int fuse_valid_type(int m)
411 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
412 S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
415 bool fuse_invalid_attr(struct fuse_attr *attr)
417 return !fuse_valid_type(attr->mode) ||
418 attr->size > LLONG_MAX;
421 int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
422 struct fuse_entry_out *outarg, struct inode **inode)
424 struct fuse_mount *fm = get_fuse_mount_super(sb);
426 struct fuse_forget_link *forget;
432 if (name->len > FUSE_NAME_MAX)
436 forget = fuse_alloc_forget();
441 attr_version = fuse_get_attr_version(fm->fc);
443 fuse_lookup_init(fm->fc, &args, nodeid, name, outarg);
444 err = fuse_simple_request(fm, &args);
445 /* Zero nodeid is same as -ENOENT, but with valid timeout */
446 if (err || !outarg->nodeid)
452 if (fuse_invalid_attr(&outarg->attr))
454 if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
455 pr_warn_once("root generation should be zero\n");
456 outarg->generation = 0;
459 *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
460 &outarg->attr, entry_attr_timeout(outarg),
464 fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1);
475 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
479 struct fuse_entry_out outarg;
481 struct dentry *newent;
482 bool outarg_valid = true;
485 if (fuse_is_bad(dir))
486 return ERR_PTR(-EIO);
488 locked = fuse_lock_inode(dir);
489 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
491 fuse_unlock_inode(dir, locked);
492 if (err == -ENOENT) {
493 outarg_valid = false;
500 if (inode && get_node_id(inode) == FUSE_ROOT_ID)
503 newent = d_splice_alias(inode, entry);
504 err = PTR_ERR(newent);
508 entry = newent ? newent : entry;
510 fuse_change_entry_timeout(entry, &outarg);
512 fuse_invalidate_entry_cache(entry);
515 fuse_advise_use_readdirplus(dir);
525 * Atomic create+open operation
527 * If the filesystem doesn't support this, then fall back to separate
528 * 'mknod' + 'open' requests.
530 static int fuse_create_open(struct inode *dir, struct dentry *entry,
531 struct file *file, unsigned flags,
536 struct fuse_mount *fm = get_fuse_mount(dir);
538 struct fuse_forget_link *forget;
539 struct fuse_create_in inarg;
540 struct fuse_open_out outopen;
541 struct fuse_entry_out outentry;
542 struct fuse_inode *fi;
543 struct fuse_file *ff;
544 bool trunc = flags & O_TRUNC;
546 /* Userspace expects S_IFREG in create mode */
547 BUG_ON((mode & S_IFMT) != S_IFREG);
549 forget = fuse_alloc_forget();
555 ff = fuse_file_alloc(fm);
557 goto out_put_forget_req;
559 if (!fm->fc->dont_mask)
560 mode &= ~current_umask();
563 memset(&inarg, 0, sizeof(inarg));
564 memset(&outentry, 0, sizeof(outentry));
567 inarg.umask = current_umask();
568 args.opcode = FUSE_CREATE;
569 args.nodeid = get_node_id(dir);
571 args.in_args[0].size = sizeof(inarg);
572 args.in_args[0].value = &inarg;
573 args.in_args[1].size = entry->d_name.len + 1;
574 args.in_args[1].value = entry->d_name.name;
575 args.out_numargs = 2;
576 args.out_args[0].size = sizeof(outentry);
577 args.out_args[0].value = &outentry;
578 args.out_args[1].size = sizeof(outopen);
579 args.out_args[1].value = &outopen;
580 err = fuse_simple_request(fm, &args);
585 if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
586 fuse_invalid_attr(&outentry.attr))
590 ff->nodeid = outentry.nodeid;
591 ff->open_flags = outopen.open_flags;
592 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
593 &outentry.attr, entry_attr_timeout(&outentry), 0);
595 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
596 fuse_sync_release(NULL, ff, flags);
597 fuse_queue_forget(fm->fc, forget, outentry.nodeid, 1);
602 d_instantiate(entry, inode);
603 fuse_change_entry_timeout(entry, &outentry);
604 fuse_dir_changed(dir);
605 err = finish_open(file, entry, generic_file_open);
607 fi = get_fuse_inode(inode);
608 fuse_sync_release(fi, ff, flags);
610 file->private_data = ff;
611 fuse_finish_open(inode, file);
612 if (fm->fc->atomic_o_trunc && trunc)
613 truncate_pagecache(inode, 0);
614 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
615 invalidate_inode_pages2(inode->i_mapping);
627 static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
628 static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
629 struct file *file, unsigned flags,
633 struct fuse_conn *fc = get_fuse_conn(dir);
634 struct dentry *res = NULL;
636 if (fuse_is_bad(dir))
639 if (d_in_lookup(entry)) {
640 res = fuse_lookup(dir, entry, 0);
648 if (!(flags & O_CREAT) || d_really_is_positive(entry))
652 file->f_mode |= FMODE_CREATED;
657 err = fuse_create_open(dir, entry, file, flags, mode);
658 if (err == -ENOSYS) {
667 err = fuse_mknod(dir, entry, mode, 0);
671 return finish_no_open(file, res);
675 * Code shared between mknod, mkdir, symlink and link
677 static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
678 struct inode *dir, struct dentry *entry,
681 struct fuse_entry_out outarg;
685 struct fuse_forget_link *forget;
687 if (fuse_is_bad(dir))
690 forget = fuse_alloc_forget();
694 memset(&outarg, 0, sizeof(outarg));
695 args->nodeid = get_node_id(dir);
696 args->out_numargs = 1;
697 args->out_args[0].size = sizeof(outarg);
698 args->out_args[0].value = &outarg;
699 err = fuse_simple_request(fm, args);
701 goto out_put_forget_req;
704 if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
705 goto out_put_forget_req;
707 if ((outarg.attr.mode ^ mode) & S_IFMT)
708 goto out_put_forget_req;
710 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
711 &outarg.attr, entry_attr_timeout(&outarg), 0);
713 fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
719 d = d_splice_alias(inode, entry);
724 fuse_change_entry_timeout(d, &outarg);
727 fuse_change_entry_timeout(entry, &outarg);
729 fuse_dir_changed(dir);
737 static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
740 struct fuse_mknod_in inarg;
741 struct fuse_mount *fm = get_fuse_mount(dir);
744 if (!fm->fc->dont_mask)
745 mode &= ~current_umask();
747 memset(&inarg, 0, sizeof(inarg));
749 inarg.rdev = new_encode_dev(rdev);
750 inarg.umask = current_umask();
751 args.opcode = FUSE_MKNOD;
753 args.in_args[0].size = sizeof(inarg);
754 args.in_args[0].value = &inarg;
755 args.in_args[1].size = entry->d_name.len + 1;
756 args.in_args[1].value = entry->d_name.name;
757 return create_new_entry(fm, &args, dir, entry, mode);
760 static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
763 return fuse_mknod(dir, entry, mode, 0);
766 static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
768 struct fuse_mkdir_in inarg;
769 struct fuse_mount *fm = get_fuse_mount(dir);
772 if (!fm->fc->dont_mask)
773 mode &= ~current_umask();
775 memset(&inarg, 0, sizeof(inarg));
777 inarg.umask = current_umask();
778 args.opcode = FUSE_MKDIR;
780 args.in_args[0].size = sizeof(inarg);
781 args.in_args[0].value = &inarg;
782 args.in_args[1].size = entry->d_name.len + 1;
783 args.in_args[1].value = entry->d_name.name;
784 return create_new_entry(fm, &args, dir, entry, S_IFDIR);
787 static int fuse_symlink(struct inode *dir, struct dentry *entry,
790 struct fuse_mount *fm = get_fuse_mount(dir);
791 unsigned len = strlen(link) + 1;
794 args.opcode = FUSE_SYMLINK;
796 args.in_args[0].size = entry->d_name.len + 1;
797 args.in_args[0].value = entry->d_name.name;
798 args.in_args[1].size = len;
799 args.in_args[1].value = link;
800 return create_new_entry(fm, &args, dir, entry, S_IFLNK);
803 void fuse_flush_time_update(struct inode *inode)
805 int err = sync_inode_metadata(inode, 1);
807 mapping_set_error(inode->i_mapping, err);
810 void fuse_update_ctime(struct inode *inode)
812 if (!IS_NOCMTIME(inode)) {
813 inode->i_ctime = current_time(inode);
814 mark_inode_dirty_sync(inode);
815 fuse_flush_time_update(inode);
819 static int fuse_unlink(struct inode *dir, struct dentry *entry)
822 struct fuse_mount *fm = get_fuse_mount(dir);
825 if (fuse_is_bad(dir))
828 args.opcode = FUSE_UNLINK;
829 args.nodeid = get_node_id(dir);
831 args.in_args[0].size = entry->d_name.len + 1;
832 args.in_args[0].value = entry->d_name.name;
833 err = fuse_simple_request(fm, &args);
835 struct inode *inode = d_inode(entry);
836 struct fuse_inode *fi = get_fuse_inode(inode);
838 spin_lock(&fi->lock);
839 fi->attr_version = atomic64_inc_return(&fm->fc->attr_version);
841 * If i_nlink == 0 then unlink doesn't make sense, yet this can
842 * happen if userspace filesystem is careless. It would be
843 * difficult to enforce correct nlink usage so just ignore this
846 if (inode->i_nlink > 0)
848 spin_unlock(&fi->lock);
849 fuse_invalidate_attr(inode);
850 fuse_dir_changed(dir);
851 fuse_invalidate_entry_cache(entry);
852 fuse_update_ctime(inode);
853 } else if (err == -EINTR)
854 fuse_invalidate_entry(entry);
858 static int fuse_rmdir(struct inode *dir, struct dentry *entry)
861 struct fuse_mount *fm = get_fuse_mount(dir);
864 if (fuse_is_bad(dir))
867 args.opcode = FUSE_RMDIR;
868 args.nodeid = get_node_id(dir);
870 args.in_args[0].size = entry->d_name.len + 1;
871 args.in_args[0].value = entry->d_name.name;
872 err = fuse_simple_request(fm, &args);
874 clear_nlink(d_inode(entry));
875 fuse_dir_changed(dir);
876 fuse_invalidate_entry_cache(entry);
877 } else if (err == -EINTR)
878 fuse_invalidate_entry(entry);
882 static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
883 struct inode *newdir, struct dentry *newent,
884 unsigned int flags, int opcode, size_t argsize)
887 struct fuse_rename2_in inarg;
888 struct fuse_mount *fm = get_fuse_mount(olddir);
891 memset(&inarg, 0, argsize);
892 inarg.newdir = get_node_id(newdir);
894 args.opcode = opcode;
895 args.nodeid = get_node_id(olddir);
897 args.in_args[0].size = argsize;
898 args.in_args[0].value = &inarg;
899 args.in_args[1].size = oldent->d_name.len + 1;
900 args.in_args[1].value = oldent->d_name.name;
901 args.in_args[2].size = newent->d_name.len + 1;
902 args.in_args[2].value = newent->d_name.name;
903 err = fuse_simple_request(fm, &args);
906 fuse_invalidate_attr(d_inode(oldent));
907 fuse_update_ctime(d_inode(oldent));
909 if (flags & RENAME_EXCHANGE) {
910 fuse_invalidate_attr(d_inode(newent));
911 fuse_update_ctime(d_inode(newent));
914 fuse_dir_changed(olddir);
915 if (olddir != newdir)
916 fuse_dir_changed(newdir);
918 /* newent will end up negative */
919 if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent)) {
920 fuse_invalidate_attr(d_inode(newent));
921 fuse_invalidate_entry_cache(newent);
922 fuse_update_ctime(d_inode(newent));
924 } else if (err == -EINTR) {
925 /* If request was interrupted, DEITY only knows if the
926 rename actually took place. If the invalidation
927 fails (e.g. some process has CWD under the renamed
928 directory), then there can be inconsistency between
929 the dcache and the real filesystem. Tough luck. */
930 fuse_invalidate_entry(oldent);
931 if (d_really_is_positive(newent))
932 fuse_invalidate_entry(newent);
938 static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
939 struct inode *newdir, struct dentry *newent,
942 struct fuse_conn *fc = get_fuse_conn(olddir);
945 if (fuse_is_bad(olddir))
948 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
952 if (fc->no_rename2 || fc->minor < 23)
955 err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
957 sizeof(struct fuse_rename2_in));
958 if (err == -ENOSYS) {
963 err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
965 sizeof(struct fuse_rename_in));
971 static int fuse_link(struct dentry *entry, struct inode *newdir,
972 struct dentry *newent)
975 struct fuse_link_in inarg;
976 struct inode *inode = d_inode(entry);
977 struct fuse_mount *fm = get_fuse_mount(inode);
980 memset(&inarg, 0, sizeof(inarg));
981 inarg.oldnodeid = get_node_id(inode);
982 args.opcode = FUSE_LINK;
984 args.in_args[0].size = sizeof(inarg);
985 args.in_args[0].value = &inarg;
986 args.in_args[1].size = newent->d_name.len + 1;
987 args.in_args[1].value = newent->d_name.name;
988 err = create_new_entry(fm, &args, newdir, newent, inode->i_mode);
989 /* Contrary to "normal" filesystems it can happen that link
990 makes two "logical" inodes point to the same "physical"
991 inode. We invalidate the attributes of the old one, so it
992 will reflect changes in the backing inode (link count,
996 struct fuse_inode *fi = get_fuse_inode(inode);
998 spin_lock(&fi->lock);
999 fi->attr_version = atomic64_inc_return(&fm->fc->attr_version);
1000 if (likely(inode->i_nlink < UINT_MAX))
1002 spin_unlock(&fi->lock);
1003 fuse_invalidate_attr(inode);
1004 fuse_update_ctime(inode);
1005 } else if (err == -EINTR) {
1006 fuse_invalidate_attr(inode);
1011 static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
1014 unsigned int blkbits;
1015 struct fuse_conn *fc = get_fuse_conn(inode);
1017 /* see the comment in fuse_change_attributes() */
1018 if (fc->writeback_cache && S_ISREG(inode->i_mode)) {
1019 attr->size = i_size_read(inode);
1020 attr->mtime = inode->i_mtime.tv_sec;
1021 attr->mtimensec = inode->i_mtime.tv_nsec;
1022 attr->ctime = inode->i_ctime.tv_sec;
1023 attr->ctimensec = inode->i_ctime.tv_nsec;
1026 stat->dev = inode->i_sb->s_dev;
1027 stat->ino = attr->ino;
1028 stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
1029 stat->nlink = attr->nlink;
1030 stat->uid = make_kuid(fc->user_ns, attr->uid);
1031 stat->gid = make_kgid(fc->user_ns, attr->gid);
1032 stat->rdev = inode->i_rdev;
1033 stat->atime.tv_sec = attr->atime;
1034 stat->atime.tv_nsec = attr->atimensec;
1035 stat->mtime.tv_sec = attr->mtime;
1036 stat->mtime.tv_nsec = attr->mtimensec;
1037 stat->ctime.tv_sec = attr->ctime;
1038 stat->ctime.tv_nsec = attr->ctimensec;
1039 stat->size = attr->size;
1040 stat->blocks = attr->blocks;
1042 if (attr->blksize != 0)
1043 blkbits = ilog2(attr->blksize);
1045 blkbits = inode->i_sb->s_blocksize_bits;
1047 stat->blksize = 1 << blkbits;
1050 static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
1054 struct fuse_getattr_in inarg;
1055 struct fuse_attr_out outarg;
1056 struct fuse_mount *fm = get_fuse_mount(inode);
1060 attr_version = fuse_get_attr_version(fm->fc);
1062 memset(&inarg, 0, sizeof(inarg));
1063 memset(&outarg, 0, sizeof(outarg));
1064 /* Directories have separate file-handle space */
1065 if (file && S_ISREG(inode->i_mode)) {
1066 struct fuse_file *ff = file->private_data;
1068 inarg.getattr_flags |= FUSE_GETATTR_FH;
1071 args.opcode = FUSE_GETATTR;
1072 args.nodeid = get_node_id(inode);
1073 args.in_numargs = 1;
1074 args.in_args[0].size = sizeof(inarg);
1075 args.in_args[0].value = &inarg;
1076 args.out_numargs = 1;
1077 args.out_args[0].size = sizeof(outarg);
1078 args.out_args[0].value = &outarg;
1079 err = fuse_simple_request(fm, &args);
1081 if (fuse_invalid_attr(&outarg.attr) ||
1082 inode_wrong_type(inode, outarg.attr.mode)) {
1083 fuse_make_bad(inode);
1086 fuse_change_attributes(inode, &outarg.attr,
1087 attr_timeout(&outarg),
1090 fuse_fillattr(inode, &outarg.attr, stat);
1096 static int fuse_update_get_attr(struct inode *inode, struct file *file,
1097 struct kstat *stat, u32 request_mask,
1100 struct fuse_inode *fi = get_fuse_inode(inode);
1104 if (flags & AT_STATX_FORCE_SYNC)
1106 else if (flags & AT_STATX_DONT_SYNC)
1108 else if (request_mask & READ_ONCE(fi->inval_mask))
1111 sync = time_before64(fi->i_time, get_jiffies_64());
1114 forget_all_cached_acls(inode);
1115 err = fuse_do_getattr(inode, stat, file);
1117 generic_fillattr(inode, stat);
1118 stat->mode = fi->orig_i_mode;
1119 stat->ino = fi->orig_ino;
1125 int fuse_update_attributes(struct inode *inode, struct file *file)
1127 /* Do *not* need to get atime for internal purposes */
1128 return fuse_update_get_attr(inode, file, NULL,
1129 STATX_BASIC_STATS & ~STATX_ATIME, 0);
1132 int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
1133 u64 child_nodeid, struct qstr *name)
1136 struct inode *parent;
1138 struct dentry *entry;
1140 parent = fuse_ilookup(fc, parent_nodeid, NULL);
1144 inode_lock_nested(parent, I_MUTEX_PARENT);
1145 if (!S_ISDIR(parent->i_mode))
1149 dir = d_find_alias(parent);
1153 name->hash = full_name_hash(dir, name->name, name->len);
1154 entry = d_lookup(dir, name);
1159 fuse_dir_changed(parent);
1160 fuse_invalidate_entry(entry);
1162 if (child_nodeid != 0 && d_really_is_positive(entry)) {
1163 inode_lock(d_inode(entry));
1164 if (get_node_id(d_inode(entry)) != child_nodeid) {
1168 if (d_mountpoint(entry)) {
1172 if (d_is_dir(entry)) {
1173 shrink_dcache_parent(entry);
1174 if (!simple_empty(entry)) {
1178 d_inode(entry)->i_flags |= S_DEAD;
1181 clear_nlink(d_inode(entry));
1184 inode_unlock(d_inode(entry));
1193 inode_unlock(parent);
1199 * Calling into a user-controlled filesystem gives the filesystem
1200 * daemon ptrace-like capabilities over the current process. This
1201 * means, that the filesystem daemon is able to record the exact
1202 * filesystem operations performed, and can also control the behavior
1203 * of the requester process in otherwise impossible ways. For example
1204 * it can delay the operation for arbitrary length of time allowing
1205 * DoS against the requester.
1207 * For this reason only those processes can call into the filesystem,
1208 * for which the owner of the mount has ptrace privilege. This
1209 * excludes processes started by other users, suid or sgid processes.
1211 int fuse_allow_current_process(struct fuse_conn *fc)
1213 const struct cred *cred;
1215 if (fc->allow_other)
1216 return current_in_userns(fc->user_ns);
1218 cred = current_cred();
1219 if (uid_eq(cred->euid, fc->user_id) &&
1220 uid_eq(cred->suid, fc->user_id) &&
1221 uid_eq(cred->uid, fc->user_id) &&
1222 gid_eq(cred->egid, fc->group_id) &&
1223 gid_eq(cred->sgid, fc->group_id) &&
1224 gid_eq(cred->gid, fc->group_id))
1230 static int fuse_access(struct inode *inode, int mask)
1232 struct fuse_mount *fm = get_fuse_mount(inode);
1234 struct fuse_access_in inarg;
1237 BUG_ON(mask & MAY_NOT_BLOCK);
1239 if (fm->fc->no_access)
1242 memset(&inarg, 0, sizeof(inarg));
1243 inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
1244 args.opcode = FUSE_ACCESS;
1245 args.nodeid = get_node_id(inode);
1246 args.in_numargs = 1;
1247 args.in_args[0].size = sizeof(inarg);
1248 args.in_args[0].value = &inarg;
1249 err = fuse_simple_request(fm, &args);
1250 if (err == -ENOSYS) {
1251 fm->fc->no_access = 1;
1257 static int fuse_perm_getattr(struct inode *inode, int mask)
1259 if (mask & MAY_NOT_BLOCK)
1262 forget_all_cached_acls(inode);
1263 return fuse_do_getattr(inode, NULL, NULL);
1267 * Check permission. The two basic access models of FUSE are:
1269 * 1) Local access checking ('default_permissions' mount option) based
1270 * on file mode. This is the plain old disk filesystem permission
1273 * 2) "Remote" access checking, where server is responsible for
1274 * checking permission in each inode operation. An exception to this
1275 * is if ->permission() was invoked from sys_access() in which case an
1276 * access request is sent. Execute permission is still checked
1277 * locally based on file mode.
1279 static int fuse_permission(struct inode *inode, int mask)
1281 struct fuse_conn *fc = get_fuse_conn(inode);
1282 bool refreshed = false;
1285 if (fuse_is_bad(inode))
1288 if (!fuse_allow_current_process(fc))
1292 * If attributes are needed, refresh them before proceeding
1294 if (fc->default_permissions ||
1295 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1296 struct fuse_inode *fi = get_fuse_inode(inode);
1297 u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
1299 if (perm_mask & READ_ONCE(fi->inval_mask) ||
1300 time_before64(fi->i_time, get_jiffies_64())) {
1303 err = fuse_perm_getattr(inode, mask);
1309 if (fc->default_permissions) {
1310 err = generic_permission(inode, mask);
1312 /* If permission is denied, try to refresh file
1313 attributes. This is also needed, because the root
1314 node will at first have no permissions */
1315 if (err == -EACCES && !refreshed) {
1316 err = fuse_perm_getattr(inode, mask);
1318 err = generic_permission(inode, mask);
1321 /* Note: the opposite of the above test does not
1322 exist. So if permissions are revoked this won't be
1323 noticed immediately, only after the attribute
1324 timeout has expired */
1325 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1326 err = fuse_access(inode, mask);
1327 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1328 if (!(inode->i_mode & S_IXUGO)) {
1332 err = fuse_perm_getattr(inode, mask);
1333 if (!err && !(inode->i_mode & S_IXUGO))
1340 static int fuse_readlink_page(struct inode *inode, struct page *page)
1342 struct fuse_mount *fm = get_fuse_mount(inode);
1343 struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 };
1344 struct fuse_args_pages ap = {
1352 ap.args.opcode = FUSE_READLINK;
1353 ap.args.nodeid = get_node_id(inode);
1354 ap.args.out_pages = true;
1355 ap.args.out_argvar = true;
1356 ap.args.page_zeroing = true;
1357 ap.args.out_numargs = 1;
1358 ap.args.out_args[0].size = desc.length;
1359 res = fuse_simple_request(fm, &ap.args);
1361 fuse_invalidate_atime(inode);
1366 if (WARN_ON(res >= PAGE_SIZE))
1369 link = page_address(page);
1375 static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
1376 struct delayed_call *callback)
1378 struct fuse_conn *fc = get_fuse_conn(inode);
1383 if (fuse_is_bad(inode))
1386 if (fc->cache_symlinks)
1387 return page_get_link(dentry, inode, callback);
1393 page = alloc_page(GFP_KERNEL);
1398 err = fuse_readlink_page(inode, page);
1404 set_delayed_call(callback, page_put_link, page);
1406 return page_address(page);
1409 return ERR_PTR(err);
1412 static int fuse_dir_open(struct inode *inode, struct file *file)
1414 return fuse_open_common(inode, file, true);
1417 static int fuse_dir_release(struct inode *inode, struct file *file)
1419 fuse_release_common(file, true);
1424 static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
1427 struct inode *inode = file->f_mapping->host;
1428 struct fuse_conn *fc = get_fuse_conn(inode);
1431 if (fuse_is_bad(inode))
1434 if (fc->no_fsyncdir)
1438 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
1439 if (err == -ENOSYS) {
1440 fc->no_fsyncdir = 1;
1443 inode_unlock(inode);
1448 static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
1451 struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1453 /* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
1457 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
1460 static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
1463 struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1468 return fuse_ioctl_common(file, cmd, arg,
1469 FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
1472 static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
1474 /* Always update if mtime is explicitly set */
1475 if (ivalid & ATTR_MTIME_SET)
1478 /* Or if kernel i_mtime is the official one */
1479 if (trust_local_mtime)
1482 /* If it's an open(O_TRUNC) or an ftruncate(), don't update */
1483 if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
1486 /* In all other cases update */
1490 static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
1491 struct fuse_setattr_in *arg, bool trust_local_cmtime)
1493 unsigned ivalid = iattr->ia_valid;
1495 if (ivalid & ATTR_MODE)
1496 arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
1497 if (ivalid & ATTR_UID)
1498 arg->valid |= FATTR_UID, arg->uid = from_kuid(fc->user_ns, iattr->ia_uid);
1499 if (ivalid & ATTR_GID)
1500 arg->valid |= FATTR_GID, arg->gid = from_kgid(fc->user_ns, iattr->ia_gid);
1501 if (ivalid & ATTR_SIZE)
1502 arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
1503 if (ivalid & ATTR_ATIME) {
1504 arg->valid |= FATTR_ATIME;
1505 arg->atime = iattr->ia_atime.tv_sec;
1506 arg->atimensec = iattr->ia_atime.tv_nsec;
1507 if (!(ivalid & ATTR_ATIME_SET))
1508 arg->valid |= FATTR_ATIME_NOW;
1510 if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
1511 arg->valid |= FATTR_MTIME;
1512 arg->mtime = iattr->ia_mtime.tv_sec;
1513 arg->mtimensec = iattr->ia_mtime.tv_nsec;
1514 if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
1515 arg->valid |= FATTR_MTIME_NOW;
1517 if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
1518 arg->valid |= FATTR_CTIME;
1519 arg->ctime = iattr->ia_ctime.tv_sec;
1520 arg->ctimensec = iattr->ia_ctime.tv_nsec;
1525 * Prevent concurrent writepages on inode
1527 * This is done by adding a negative bias to the inode write counter
1528 * and waiting for all pending writes to finish.
1530 void fuse_set_nowrite(struct inode *inode)
1532 struct fuse_inode *fi = get_fuse_inode(inode);
1534 BUG_ON(!inode_is_locked(inode));
1536 spin_lock(&fi->lock);
1537 BUG_ON(fi->writectr < 0);
1538 fi->writectr += FUSE_NOWRITE;
1539 spin_unlock(&fi->lock);
1540 wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
1544 * Allow writepages on inode
1546 * Remove the bias from the writecounter and send any queued
1549 static void __fuse_release_nowrite(struct inode *inode)
1551 struct fuse_inode *fi = get_fuse_inode(inode);
1553 BUG_ON(fi->writectr != FUSE_NOWRITE);
1555 fuse_flush_writepages(inode);
1558 void fuse_release_nowrite(struct inode *inode)
1560 struct fuse_inode *fi = get_fuse_inode(inode);
1562 spin_lock(&fi->lock);
1563 __fuse_release_nowrite(inode);
1564 spin_unlock(&fi->lock);
1567 static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
1568 struct inode *inode,
1569 struct fuse_setattr_in *inarg_p,
1570 struct fuse_attr_out *outarg_p)
1572 args->opcode = FUSE_SETATTR;
1573 args->nodeid = get_node_id(inode);
1574 args->in_numargs = 1;
1575 args->in_args[0].size = sizeof(*inarg_p);
1576 args->in_args[0].value = inarg_p;
1577 args->out_numargs = 1;
1578 args->out_args[0].size = sizeof(*outarg_p);
1579 args->out_args[0].value = outarg_p;
1583 * Flush inode->i_mtime to the server
1585 int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
1587 struct fuse_mount *fm = get_fuse_mount(inode);
1589 struct fuse_setattr_in inarg;
1590 struct fuse_attr_out outarg;
1592 memset(&inarg, 0, sizeof(inarg));
1593 memset(&outarg, 0, sizeof(outarg));
1595 inarg.valid = FATTR_MTIME;
1596 inarg.mtime = inode->i_mtime.tv_sec;
1597 inarg.mtimensec = inode->i_mtime.tv_nsec;
1598 if (fm->fc->minor >= 23) {
1599 inarg.valid |= FATTR_CTIME;
1600 inarg.ctime = inode->i_ctime.tv_sec;
1601 inarg.ctimensec = inode->i_ctime.tv_nsec;
1604 inarg.valid |= FATTR_FH;
1607 fuse_setattr_fill(fm->fc, &args, inode, &inarg, &outarg);
1609 return fuse_simple_request(fm, &args);
1613 * Set attributes, and at the same time refresh them.
1615 * Truncation is slightly complicated, because the 'truncate' request
1616 * may fail, in which case we don't want to touch the mapping.
1617 * vmtruncate() doesn't allow for this case, so do the rlimit checking
1618 * and the actual truncation by hand.
1620 int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
1623 struct inode *inode = d_inode(dentry);
1624 struct fuse_mount *fm = get_fuse_mount(inode);
1625 struct fuse_conn *fc = fm->fc;
1626 struct fuse_inode *fi = get_fuse_inode(inode);
1628 struct fuse_setattr_in inarg;
1629 struct fuse_attr_out outarg;
1630 bool is_truncate = false;
1631 bool is_wb = fc->writeback_cache;
1634 bool trust_local_cmtime = is_wb && S_ISREG(inode->i_mode);
1635 bool fault_blocked = false;
1637 if (!fc->default_permissions)
1638 attr->ia_valid |= ATTR_FORCE;
1640 err = setattr_prepare(dentry, attr);
1644 if (attr->ia_valid & ATTR_SIZE) {
1645 if (WARN_ON(!S_ISREG(inode->i_mode)))
1650 if (FUSE_IS_DAX(inode) && is_truncate) {
1651 down_write(&fi->i_mmap_sem);
1652 fault_blocked = true;
1653 err = fuse_dax_break_layouts(inode, 0, 0);
1655 up_write(&fi->i_mmap_sem);
1660 if (attr->ia_valid & ATTR_OPEN) {
1661 /* This is coming from open(..., ... | O_TRUNC); */
1662 WARN_ON(!(attr->ia_valid & ATTR_SIZE));
1663 WARN_ON(attr->ia_size != 0);
1664 if (fc->atomic_o_trunc) {
1666 * No need to send request to userspace, since actual
1667 * truncation has already been done by OPEN. But still
1668 * need to truncate page cache.
1670 i_size_write(inode, 0);
1671 truncate_pagecache(inode, 0);
1677 /* Flush dirty data/metadata before non-truncate SETATTR */
1678 if (is_wb && S_ISREG(inode->i_mode) &&
1680 (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
1682 err = write_inode_now(inode, true);
1686 fuse_set_nowrite(inode);
1687 fuse_release_nowrite(inode);
1691 fuse_set_nowrite(inode);
1692 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1693 if (trust_local_cmtime && attr->ia_size != inode->i_size)
1694 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1697 memset(&inarg, 0, sizeof(inarg));
1698 memset(&outarg, 0, sizeof(outarg));
1699 iattr_to_fattr(fc, attr, &inarg, trust_local_cmtime);
1701 struct fuse_file *ff = file->private_data;
1702 inarg.valid |= FATTR_FH;
1705 if (attr->ia_valid & ATTR_SIZE) {
1706 /* For mandatory locking in truncate */
1707 inarg.valid |= FATTR_LOCKOWNER;
1708 inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
1710 fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
1711 err = fuse_simple_request(fm, &args);
1714 fuse_invalidate_attr(inode);
1718 if (fuse_invalid_attr(&outarg.attr) ||
1719 inode_wrong_type(inode, outarg.attr.mode)) {
1720 fuse_make_bad(inode);
1725 spin_lock(&fi->lock);
1726 /* the kernel maintains i_mtime locally */
1727 if (trust_local_cmtime) {
1728 if (attr->ia_valid & ATTR_MTIME)
1729 inode->i_mtime = attr->ia_mtime;
1730 if (attr->ia_valid & ATTR_CTIME)
1731 inode->i_ctime = attr->ia_ctime;
1732 /* FIXME: clear I_DIRTY_SYNC? */
1735 fuse_change_attributes_common(inode, &outarg.attr,
1736 attr_timeout(&outarg));
1737 oldsize = inode->i_size;
1738 /* see the comment in fuse_change_attributes() */
1739 if (!is_wb || is_truncate || !S_ISREG(inode->i_mode))
1740 i_size_write(inode, outarg.attr.size);
1743 /* NOTE: this may release/reacquire fi->lock */
1744 __fuse_release_nowrite(inode);
1746 spin_unlock(&fi->lock);
1749 * Only call invalidate_inode_pages2() after removing
1750 * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
1752 if ((is_truncate || !is_wb) &&
1753 S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
1754 truncate_pagecache(inode, outarg.attr.size);
1755 invalidate_inode_pages2(inode->i_mapping);
1758 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1761 up_write(&fi->i_mmap_sem);
1767 fuse_release_nowrite(inode);
1769 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1772 up_write(&fi->i_mmap_sem);
1776 static int fuse_setattr(struct dentry *entry, struct iattr *attr)
1778 struct inode *inode = d_inode(entry);
1779 struct fuse_conn *fc = get_fuse_conn(inode);
1780 struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
1783 if (fuse_is_bad(inode))
1786 if (!fuse_allow_current_process(get_fuse_conn(inode)))
1789 if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
1790 attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
1794 * The only sane way to reliably kill suid/sgid is to do it in
1795 * the userspace filesystem
1797 * This should be done on write(), truncate() and chown().
1799 if (!fc->handle_killpriv) {
1801 * ia_mode calculation may have used stale i_mode.
1802 * Refresh and recalculate.
1804 ret = fuse_do_getattr(inode, NULL, file);
1808 attr->ia_mode = inode->i_mode;
1809 if (inode->i_mode & S_ISUID) {
1810 attr->ia_valid |= ATTR_MODE;
1811 attr->ia_mode &= ~S_ISUID;
1813 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1814 attr->ia_valid |= ATTR_MODE;
1815 attr->ia_mode &= ~S_ISGID;
1819 if (!attr->ia_valid)
1822 ret = fuse_do_setattr(entry, attr, file);
1825 * If filesystem supports acls it may have updated acl xattrs in
1826 * the filesystem, so forget cached acls for the inode.
1829 forget_all_cached_acls(inode);
1831 /* Directory mode changed, may need to revalidate access */
1832 if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE))
1833 fuse_invalidate_entry_cache(entry);
1838 static int fuse_getattr(const struct path *path, struct kstat *stat,
1839 u32 request_mask, unsigned int flags)
1841 struct inode *inode = d_inode(path->dentry);
1842 struct fuse_conn *fc = get_fuse_conn(inode);
1844 if (fuse_is_bad(inode))
1847 if (!fuse_allow_current_process(fc)) {
1848 if (!request_mask) {
1850 * If user explicitly requested *nothing* then don't
1851 * error out, but return st_dev only.
1853 stat->result_mask = 0;
1854 stat->dev = inode->i_sb->s_dev;
1860 return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
1863 static const struct inode_operations fuse_dir_inode_operations = {
1864 .lookup = fuse_lookup,
1865 .mkdir = fuse_mkdir,
1866 .symlink = fuse_symlink,
1867 .unlink = fuse_unlink,
1868 .rmdir = fuse_rmdir,
1869 .rename = fuse_rename2,
1871 .setattr = fuse_setattr,
1872 .create = fuse_create,
1873 .atomic_open = fuse_atomic_open,
1874 .mknod = fuse_mknod,
1875 .permission = fuse_permission,
1876 .getattr = fuse_getattr,
1877 .listxattr = fuse_listxattr,
1878 .get_acl = fuse_get_acl,
1879 .set_acl = fuse_set_acl,
1882 static const struct file_operations fuse_dir_operations = {
1883 .llseek = generic_file_llseek,
1884 .read = generic_read_dir,
1885 .iterate_shared = fuse_readdir,
1886 .open = fuse_dir_open,
1887 .release = fuse_dir_release,
1888 .fsync = fuse_dir_fsync,
1889 .unlocked_ioctl = fuse_dir_ioctl,
1890 .compat_ioctl = fuse_dir_compat_ioctl,
1893 static const struct inode_operations fuse_common_inode_operations = {
1894 .setattr = fuse_setattr,
1895 .permission = fuse_permission,
1896 .getattr = fuse_getattr,
1897 .listxattr = fuse_listxattr,
1898 .get_acl = fuse_get_acl,
1899 .set_acl = fuse_set_acl,
1902 static const struct inode_operations fuse_symlink_inode_operations = {
1903 .setattr = fuse_setattr,
1904 .get_link = fuse_get_link,
1905 .getattr = fuse_getattr,
1906 .listxattr = fuse_listxattr,
1909 void fuse_init_common(struct inode *inode)
1911 inode->i_op = &fuse_common_inode_operations;
1914 void fuse_init_dir(struct inode *inode)
1916 struct fuse_inode *fi = get_fuse_inode(inode);
1918 inode->i_op = &fuse_dir_inode_operations;
1919 inode->i_fop = &fuse_dir_operations;
1921 spin_lock_init(&fi->rdc.lock);
1922 fi->rdc.cached = false;
1925 fi->rdc.version = 0;
1928 static int fuse_symlink_readpage(struct file *null, struct page *page)
1930 int err = fuse_readlink_page(page->mapping->host, page);
1933 SetPageUptodate(page);
1940 static const struct address_space_operations fuse_symlink_aops = {
1941 .readpage = fuse_symlink_readpage,
1944 void fuse_init_symlink(struct inode *inode)
1946 inode->i_op = &fuse_symlink_inode_operations;
1947 inode->i_data.a_ops = &fuse_symlink_aops;
1948 inode_nohighmem(inode);