2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/parser.h>
19 #include <linux/statfs.h>
20 #include <linux/random.h>
21 #include <linux/sched.h>
22 #include <linux/exportfs.h>
23 #include <linux/posix_acl.h>
25 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
26 MODULE_DESCRIPTION("Filesystem in Userspace");
27 MODULE_LICENSE("GPL");
29 static struct kmem_cache *fuse_inode_cachep;
30 struct list_head fuse_conn_list;
31 DEFINE_MUTEX(fuse_mutex);
33 static int set_global_limit(const char *val, struct kernel_param *kp);
35 unsigned max_user_bgreq;
36 module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
37 &max_user_bgreq, 0644);
38 __MODULE_PARM_TYPE(max_user_bgreq, "uint");
39 MODULE_PARM_DESC(max_user_bgreq,
40 "Global limit for the maximum number of backgrounded requests an "
41 "unprivileged user can set");
43 unsigned max_user_congthresh;
44 module_param_call(max_user_congthresh, set_global_limit, param_get_uint,
45 &max_user_congthresh, 0644);
46 __MODULE_PARM_TYPE(max_user_congthresh, "uint");
47 MODULE_PARM_DESC(max_user_congthresh,
48 "Global limit for the maximum congestion threshold an "
49 "unprivileged user can set");
51 #define FUSE_SUPER_MAGIC 0x65735546
53 #define FUSE_DEFAULT_BLKSIZE 512
55 /** Maximum number of outstanding background requests */
56 #define FUSE_DEFAULT_MAX_BACKGROUND 12
58 /** Congestion starts at 75% of maximum */
59 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
61 struct fuse_mount_data {
66 unsigned fd_present:1;
67 unsigned rootmode_present:1;
68 unsigned user_id_present:1;
69 unsigned group_id_present:1;
70 unsigned default_permissions:1;
71 unsigned allow_other:1;
76 struct fuse_forget_link *fuse_alloc_forget(void)
78 return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL);
81 static struct inode *fuse_alloc_inode(struct super_block *sb)
84 struct fuse_inode *fi;
86 inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
90 fi = get_fuse_inode(inode);
98 INIT_LIST_HEAD(&fi->write_files);
99 INIT_LIST_HEAD(&fi->queued_writes);
100 INIT_LIST_HEAD(&fi->writepages);
101 init_waitqueue_head(&fi->page_waitq);
102 mutex_init(&fi->mutex);
103 fi->forget = fuse_alloc_forget();
105 kmem_cache_free(fuse_inode_cachep, inode);
112 static void fuse_i_callback(struct rcu_head *head)
114 struct inode *inode = container_of(head, struct inode, i_rcu);
115 kmem_cache_free(fuse_inode_cachep, inode);
118 static void fuse_destroy_inode(struct inode *inode)
120 struct fuse_inode *fi = get_fuse_inode(inode);
121 BUG_ON(!list_empty(&fi->write_files));
122 BUG_ON(!list_empty(&fi->queued_writes));
123 mutex_destroy(&fi->mutex);
125 call_rcu(&inode->i_rcu, fuse_i_callback);
128 static void fuse_evict_inode(struct inode *inode)
130 truncate_inode_pages_final(&inode->i_data);
132 if (inode->i_sb->s_flags & MS_ACTIVE) {
133 struct fuse_conn *fc = get_fuse_conn(inode);
134 struct fuse_inode *fi = get_fuse_inode(inode);
135 fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
140 static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
143 if (*flags & MS_MANDLOCK)
150 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
151 * so that it will fit.
153 static ino_t fuse_squash_ino(u64 ino64)
155 ino_t ino = (ino_t) ino64;
156 if (sizeof(ino_t) < sizeof(u64))
157 ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
161 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
164 struct fuse_conn *fc = get_fuse_conn(inode);
165 struct fuse_inode *fi = get_fuse_inode(inode);
167 fi->attr_version = ++fc->attr_version;
168 fi->i_time = attr_valid;
170 inode->i_ino = fuse_squash_ino(attr->ino);
171 inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
172 set_nlink(inode, attr->nlink);
173 inode->i_uid = make_kuid(&init_user_ns, attr->uid);
174 inode->i_gid = make_kgid(&init_user_ns, attr->gid);
175 inode->i_blocks = attr->blocks;
178 attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1);
179 attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1);
180 attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1);
182 inode->i_atime.tv_sec = attr->atime;
183 inode->i_atime.tv_nsec = attr->atimensec;
184 /* mtime from server may be stale due to local buffered write */
185 if (!fc->writeback_cache || !S_ISREG(inode->i_mode)) {
186 inode->i_mtime.tv_sec = attr->mtime;
187 inode->i_mtime.tv_nsec = attr->mtimensec;
188 inode->i_ctime.tv_sec = attr->ctime;
189 inode->i_ctime.tv_nsec = attr->ctimensec;
192 if (attr->blksize != 0)
193 inode->i_blkbits = ilog2(attr->blksize);
195 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
198 * Don't set the sticky bit in i_mode, unless we want the VFS
199 * to check permissions. This prevents failures due to the
200 * check in may_delete().
202 fi->orig_i_mode = inode->i_mode;
203 if (!fc->default_permissions)
204 inode->i_mode &= ~S_ISVTX;
206 fi->orig_ino = attr->ino;
209 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
210 u64 attr_valid, u64 attr_version)
212 struct fuse_conn *fc = get_fuse_conn(inode);
213 struct fuse_inode *fi = get_fuse_inode(inode);
214 bool is_wb = fc->writeback_cache;
216 struct timespec old_mtime;
218 spin_lock(&fc->lock);
219 if ((attr_version != 0 && fi->attr_version > attr_version) ||
220 test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
221 spin_unlock(&fc->lock);
225 old_mtime = inode->i_mtime;
226 fuse_change_attributes_common(inode, attr, attr_valid);
228 oldsize = inode->i_size;
230 * In case of writeback_cache enabled, the cached writes beyond EOF
231 * extend local i_size without keeping userspace server in sync. So,
232 * attr->size coming from server can be stale. We cannot trust it.
234 if (!is_wb || !S_ISREG(inode->i_mode))
235 i_size_write(inode, attr->size);
236 spin_unlock(&fc->lock);
238 if (!is_wb && S_ISREG(inode->i_mode)) {
241 if (oldsize != attr->size) {
242 truncate_pagecache(inode, attr->size);
244 } else if (fc->auto_inval_data) {
245 struct timespec new_mtime = {
246 .tv_sec = attr->mtime,
247 .tv_nsec = attr->mtimensec,
251 * Auto inval mode also checks and invalidates if mtime
254 if (!timespec_equal(&old_mtime, &new_mtime))
259 invalidate_inode_pages2(inode->i_mapping);
263 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
265 inode->i_mode = attr->mode & S_IFMT;
266 inode->i_size = attr->size;
267 inode->i_mtime.tv_sec = attr->mtime;
268 inode->i_mtime.tv_nsec = attr->mtimensec;
269 inode->i_ctime.tv_sec = attr->ctime;
270 inode->i_ctime.tv_nsec = attr->ctimensec;
271 if (S_ISREG(inode->i_mode)) {
272 fuse_init_common(inode);
273 fuse_init_file_inode(inode);
274 } else if (S_ISDIR(inode->i_mode))
275 fuse_init_dir(inode);
276 else if (S_ISLNK(inode->i_mode))
277 fuse_init_symlink(inode);
278 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
279 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
280 fuse_init_common(inode);
281 init_special_inode(inode, inode->i_mode,
282 new_decode_dev(attr->rdev));
287 int fuse_inode_eq(struct inode *inode, void *_nodeidp)
289 u64 nodeid = *(u64 *) _nodeidp;
290 if (get_node_id(inode) == nodeid)
296 static int fuse_inode_set(struct inode *inode, void *_nodeidp)
298 u64 nodeid = *(u64 *) _nodeidp;
299 get_fuse_inode(inode)->nodeid = nodeid;
303 struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
304 int generation, struct fuse_attr *attr,
305 u64 attr_valid, u64 attr_version)
308 struct fuse_inode *fi;
309 struct fuse_conn *fc = get_fuse_conn_super(sb);
312 inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid);
316 if ((inode->i_state & I_NEW)) {
317 inode->i_flags |= S_NOATIME;
318 if (!fc->writeback_cache || !S_ISREG(attr->mode))
319 inode->i_flags |= S_NOCMTIME;
320 inode->i_generation = generation;
321 fuse_init_inode(inode, attr);
322 unlock_new_inode(inode);
323 } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
324 /* Inode has changed type, any I/O on the old should fail */
325 fuse_make_bad(inode);
330 fi = get_fuse_inode(inode);
331 spin_lock(&fc->lock);
333 spin_unlock(&fc->lock);
334 fuse_change_attributes(inode, attr, attr_valid, attr_version);
339 int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
340 loff_t offset, loff_t len)
346 inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid);
350 fuse_invalidate_attr(inode);
351 forget_all_cached_acls(inode);
353 pg_start = offset >> PAGE_SHIFT;
357 pg_end = (offset + len - 1) >> PAGE_SHIFT;
358 invalidate_inode_pages2_range(inode->i_mapping,
365 bool fuse_lock_inode(struct inode *inode)
369 if (!get_fuse_conn(inode)->parallel_dirops) {
370 mutex_lock(&get_fuse_inode(inode)->mutex);
377 void fuse_unlock_inode(struct inode *inode, bool locked)
380 mutex_unlock(&get_fuse_inode(inode)->mutex);
383 static void fuse_umount_begin(struct super_block *sb)
385 fuse_abort_conn(get_fuse_conn_super(sb));
388 static void fuse_send_destroy(struct fuse_conn *fc)
390 struct fuse_req *req = fc->destroy_req;
391 if (req && fc->conn_init) {
392 fc->destroy_req = NULL;
393 req->in.h.opcode = FUSE_DESTROY;
394 __set_bit(FR_FORCE, &req->flags);
395 __clear_bit(FR_BACKGROUND, &req->flags);
396 fuse_request_send(fc, req);
397 fuse_put_request(fc, req);
401 static void fuse_bdi_destroy(struct fuse_conn *fc)
403 if (fc->bdi_initialized)
404 bdi_destroy(&fc->bdi);
407 static void fuse_put_super(struct super_block *sb)
409 struct fuse_conn *fc = get_fuse_conn_super(sb);
411 mutex_lock(&fuse_mutex);
412 list_del(&fc->entry);
413 fuse_ctl_remove_conn(fc);
414 mutex_unlock(&fuse_mutex);
415 fuse_bdi_destroy(fc);
420 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
422 stbuf->f_type = FUSE_SUPER_MAGIC;
423 stbuf->f_bsize = attr->bsize;
424 stbuf->f_frsize = attr->frsize;
425 stbuf->f_blocks = attr->blocks;
426 stbuf->f_bfree = attr->bfree;
427 stbuf->f_bavail = attr->bavail;
428 stbuf->f_files = attr->files;
429 stbuf->f_ffree = attr->ffree;
430 stbuf->f_namelen = attr->namelen;
431 /* fsid is left zero */
434 static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
436 struct super_block *sb = dentry->d_sb;
437 struct fuse_conn *fc = get_fuse_conn_super(sb);
439 struct fuse_statfs_out outarg;
442 if (!fuse_allow_current_process(fc)) {
443 buf->f_type = FUSE_SUPER_MAGIC;
447 memset(&outarg, 0, sizeof(outarg));
449 args.in.h.opcode = FUSE_STATFS;
450 args.in.h.nodeid = get_node_id(d_inode(dentry));
451 args.out.numargs = 1;
452 args.out.args[0].size = sizeof(outarg);
453 args.out.args[0].value = &outarg;
454 err = fuse_simple_request(fc, &args);
456 convert_fuse_statfs(buf, &outarg.st);
465 OPT_DEFAULT_PERMISSIONS,
472 static const match_table_t tokens = {
474 {OPT_ROOTMODE, "rootmode=%o"},
475 {OPT_USER_ID, "user_id=%u"},
476 {OPT_GROUP_ID, "group_id=%u"},
477 {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
478 {OPT_ALLOW_OTHER, "allow_other"},
479 {OPT_MAX_READ, "max_read=%u"},
480 {OPT_BLKSIZE, "blksize=%u"},
484 static int fuse_match_uint(substring_t *s, unsigned int *res)
487 char *buf = match_strdup(s);
489 err = kstrtouint(buf, 10, res);
495 static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
498 memset(d, 0, sizeof(struct fuse_mount_data));
500 d->blksize = FUSE_DEFAULT_BLKSIZE;
502 while ((p = strsep(&opt, ",")) != NULL) {
506 substring_t args[MAX_OPT_ARGS];
510 token = match_token(p, tokens, args);
513 if (match_int(&args[0], &value))
520 if (match_octal(&args[0], &value))
522 if (!fuse_valid_type(value))
525 d->rootmode_present = 1;
529 if (fuse_match_uint(&args[0], &uv))
531 d->user_id = make_kuid(current_user_ns(), uv);
532 if (!uid_valid(d->user_id))
534 d->user_id_present = 1;
538 if (fuse_match_uint(&args[0], &uv))
540 d->group_id = make_kgid(current_user_ns(), uv);
541 if (!gid_valid(d->group_id))
543 d->group_id_present = 1;
546 case OPT_DEFAULT_PERMISSIONS:
547 d->default_permissions = 1;
550 case OPT_ALLOW_OTHER:
555 if (match_int(&args[0], &value))
561 if (!is_bdev || match_int(&args[0], &value))
571 if (!d->fd_present || !d->rootmode_present ||
572 !d->user_id_present || !d->group_id_present)
578 static int fuse_show_options(struct seq_file *m, struct dentry *root)
580 struct super_block *sb = root->d_sb;
581 struct fuse_conn *fc = get_fuse_conn_super(sb);
583 seq_printf(m, ",user_id=%u", from_kuid_munged(&init_user_ns, fc->user_id));
584 seq_printf(m, ",group_id=%u", from_kgid_munged(&init_user_ns, fc->group_id));
585 if (fc->default_permissions)
586 seq_puts(m, ",default_permissions");
588 seq_puts(m, ",allow_other");
589 if (fc->max_read != ~0)
590 seq_printf(m, ",max_read=%u", fc->max_read);
591 if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
592 seq_printf(m, ",blksize=%lu", sb->s_blocksize);
596 static void fuse_iqueue_init(struct fuse_iqueue *fiq)
598 memset(fiq, 0, sizeof(struct fuse_iqueue));
599 init_waitqueue_head(&fiq->waitq);
600 INIT_LIST_HEAD(&fiq->pending);
601 INIT_LIST_HEAD(&fiq->interrupts);
602 fiq->forget_list_tail = &fiq->forget_list_head;
606 static void fuse_pqueue_init(struct fuse_pqueue *fpq)
608 memset(fpq, 0, sizeof(struct fuse_pqueue));
609 spin_lock_init(&fpq->lock);
610 INIT_LIST_HEAD(&fpq->processing);
611 INIT_LIST_HEAD(&fpq->io);
615 void fuse_conn_init(struct fuse_conn *fc)
617 memset(fc, 0, sizeof(*fc));
618 spin_lock_init(&fc->lock);
619 init_rwsem(&fc->killsb);
620 atomic_set(&fc->count, 1);
621 atomic_set(&fc->dev_count, 1);
622 init_waitqueue_head(&fc->blocked_waitq);
623 init_waitqueue_head(&fc->reserved_req_waitq);
624 fuse_iqueue_init(&fc->iq);
625 INIT_LIST_HEAD(&fc->bg_queue);
626 INIT_LIST_HEAD(&fc->entry);
627 INIT_LIST_HEAD(&fc->devices);
628 atomic_set(&fc->num_waiting, 0);
629 fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
630 fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
632 fc->polled_files = RB_ROOT;
636 fc->attr_version = 1;
637 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
639 EXPORT_SYMBOL_GPL(fuse_conn_init);
641 void fuse_conn_put(struct fuse_conn *fc)
643 if (atomic_dec_and_test(&fc->count)) {
645 fuse_request_free(fc->destroy_req);
649 EXPORT_SYMBOL_GPL(fuse_conn_put);
651 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
653 atomic_inc(&fc->count);
656 EXPORT_SYMBOL_GPL(fuse_conn_get);
658 static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
660 struct fuse_attr attr;
661 memset(&attr, 0, sizeof(attr));
664 attr.ino = FUSE_ROOT_ID;
666 return fuse_iget(sb, 1, 0, &attr, 0, 0);
669 struct fuse_inode_handle {
674 static struct dentry *fuse_get_dentry(struct super_block *sb,
675 struct fuse_inode_handle *handle)
677 struct fuse_conn *fc = get_fuse_conn_super(sb);
679 struct dentry *entry;
682 if (handle->nodeid == 0)
685 inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid);
687 struct fuse_entry_out outarg;
688 const struct qstr name = QSTR_INIT(".", 1);
690 if (!fc->export_support)
693 err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg,
695 if (err && err != -ENOENT)
702 if (get_node_id(inode) != handle->nodeid)
706 if (inode->i_generation != handle->generation)
709 entry = d_obtain_alias(inode);
710 if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID)
711 fuse_invalidate_entry_cache(entry);
721 static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
722 struct inode *parent)
724 int len = parent ? 6 : 3;
728 if (*max_len < len) {
730 return FILEID_INVALID;
733 nodeid = get_fuse_inode(inode)->nodeid;
734 generation = inode->i_generation;
736 fh[0] = (u32)(nodeid >> 32);
737 fh[1] = (u32)(nodeid & 0xffffffff);
741 nodeid = get_fuse_inode(parent)->nodeid;
742 generation = parent->i_generation;
744 fh[3] = (u32)(nodeid >> 32);
745 fh[4] = (u32)(nodeid & 0xffffffff);
750 return parent ? 0x82 : 0x81;
753 static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
754 struct fid *fid, int fh_len, int fh_type)
756 struct fuse_inode_handle handle;
758 if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3)
761 handle.nodeid = (u64) fid->raw[0] << 32;
762 handle.nodeid |= (u64) fid->raw[1];
763 handle.generation = fid->raw[2];
764 return fuse_get_dentry(sb, &handle);
767 static struct dentry *fuse_fh_to_parent(struct super_block *sb,
768 struct fid *fid, int fh_len, int fh_type)
770 struct fuse_inode_handle parent;
772 if (fh_type != 0x82 || fh_len < 6)
775 parent.nodeid = (u64) fid->raw[3] << 32;
776 parent.nodeid |= (u64) fid->raw[4];
777 parent.generation = fid->raw[5];
778 return fuse_get_dentry(sb, &parent);
781 static struct dentry *fuse_get_parent(struct dentry *child)
783 struct inode *child_inode = d_inode(child);
784 struct fuse_conn *fc = get_fuse_conn(child_inode);
786 struct dentry *parent;
787 struct fuse_entry_out outarg;
788 const struct qstr name = QSTR_INIT("..", 2);
791 if (!fc->export_support)
792 return ERR_PTR(-ESTALE);
794 err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode),
795 &name, &outarg, &inode);
798 return ERR_PTR(-ESTALE);
802 parent = d_obtain_alias(inode);
803 if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID)
804 fuse_invalidate_entry_cache(parent);
809 static const struct export_operations fuse_export_operations = {
810 .fh_to_dentry = fuse_fh_to_dentry,
811 .fh_to_parent = fuse_fh_to_parent,
812 .encode_fh = fuse_encode_fh,
813 .get_parent = fuse_get_parent,
816 static const struct super_operations fuse_super_operations = {
817 .alloc_inode = fuse_alloc_inode,
818 .destroy_inode = fuse_destroy_inode,
819 .evict_inode = fuse_evict_inode,
820 .write_inode = fuse_write_inode,
821 .drop_inode = generic_delete_inode,
822 .remount_fs = fuse_remount_fs,
823 .put_super = fuse_put_super,
824 .umount_begin = fuse_umount_begin,
825 .statfs = fuse_statfs,
826 .show_options = fuse_show_options,
829 static void sanitize_global_limit(unsigned *limit)
832 *limit = ((totalram_pages << PAGE_SHIFT) >> 13) /
833 sizeof(struct fuse_req);
835 if (*limit >= 1 << 16)
836 *limit = (1 << 16) - 1;
839 static int set_global_limit(const char *val, struct kernel_param *kp)
843 rv = param_set_uint(val, kp);
847 sanitize_global_limit((unsigned *)kp->arg);
852 static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
854 int cap_sys_admin = capable(CAP_SYS_ADMIN);
859 sanitize_global_limit(&max_user_bgreq);
860 sanitize_global_limit(&max_user_congthresh);
862 if (arg->max_background) {
863 fc->max_background = arg->max_background;
865 if (!cap_sys_admin && fc->max_background > max_user_bgreq)
866 fc->max_background = max_user_bgreq;
868 if (arg->congestion_threshold) {
869 fc->congestion_threshold = arg->congestion_threshold;
871 if (!cap_sys_admin &&
872 fc->congestion_threshold > max_user_congthresh)
873 fc->congestion_threshold = max_user_congthresh;
877 static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
879 struct fuse_init_out *arg = &req->misc.init_out;
881 if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
884 unsigned long ra_pages;
886 process_init_limits(fc, arg);
888 if (arg->minor >= 6) {
889 ra_pages = arg->max_readahead / PAGE_SIZE;
890 if (arg->flags & FUSE_ASYNC_READ)
892 if (!(arg->flags & FUSE_POSIX_LOCKS))
894 if (arg->minor >= 17) {
895 if (!(arg->flags & FUSE_FLOCK_LOCKS))
898 if (!(arg->flags & FUSE_POSIX_LOCKS))
901 if (arg->flags & FUSE_ATOMIC_O_TRUNC)
902 fc->atomic_o_trunc = 1;
903 if (arg->minor >= 9) {
904 /* LOOKUP has dependency on proto version */
905 if (arg->flags & FUSE_EXPORT_SUPPORT)
906 fc->export_support = 1;
908 if (arg->flags & FUSE_BIG_WRITES)
910 if (arg->flags & FUSE_DONT_MASK)
912 if (arg->flags & FUSE_AUTO_INVAL_DATA)
913 fc->auto_inval_data = 1;
914 if (arg->flags & FUSE_DO_READDIRPLUS) {
915 fc->do_readdirplus = 1;
916 if (arg->flags & FUSE_READDIRPLUS_AUTO)
917 fc->readdirplus_auto = 1;
919 if (arg->flags & FUSE_ASYNC_DIO)
921 if (arg->flags & FUSE_WRITEBACK_CACHE)
922 fc->writeback_cache = 1;
923 if (arg->flags & FUSE_PARALLEL_DIROPS)
924 fc->parallel_dirops = 1;
925 if (arg->flags & FUSE_HANDLE_KILLPRIV)
926 fc->handle_killpriv = 1;
927 if (arg->time_gran && arg->time_gran <= 1000000000)
928 fc->sb->s_time_gran = arg->time_gran;
929 if ((arg->flags & FUSE_POSIX_ACL)) {
930 fc->default_permissions = 1;
932 fc->sb->s_xattr = fuse_acl_xattr_handlers;
935 ra_pages = fc->max_read / PAGE_SIZE;
940 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
941 fc->minor = arg->minor;
942 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
943 fc->max_write = max_t(unsigned, 4096, fc->max_write);
946 fuse_set_initialized(fc);
947 wake_up_all(&fc->blocked_waitq);
950 static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
952 struct fuse_init_in *arg = &req->misc.init_in;
954 arg->major = FUSE_KERNEL_VERSION;
955 arg->minor = FUSE_KERNEL_MINOR_VERSION;
956 arg->max_readahead = fc->bdi.ra_pages * PAGE_SIZE;
957 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
958 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
959 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
960 FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
961 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
962 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
963 FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL;
964 req->in.h.opcode = FUSE_INIT;
966 req->in.args[0].size = sizeof(*arg);
967 req->in.args[0].value = arg;
968 req->out.numargs = 1;
969 /* Variable length argument used for backward compatibility
970 with interface version < 7.5. Rest of init_out is zeroed
971 by do_get_request(), so a short reply is not a problem */
973 req->out.args[0].size = sizeof(struct fuse_init_out);
974 req->out.args[0].value = &req->misc.init_out;
975 req->end = process_init_reply;
976 fuse_request_send_background(fc, req);
979 static void fuse_free_conn(struct fuse_conn *fc)
981 WARN_ON(!list_empty(&fc->devices));
985 static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
989 fc->bdi.name = "fuse";
990 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
991 /* fuse does it's own writeback accounting */
992 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
994 err = bdi_init(&fc->bdi);
998 fc->bdi_initialized = 1;
1001 err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk",
1002 MAJOR(fc->dev), MINOR(fc->dev));
1004 err = bdi_register_dev(&fc->bdi, fc->dev);
1011 * For a single fuse filesystem use max 1% of dirty +
1012 * writeback threshold.
1014 * This gives about 1M of write buffer for memory maps on a
1015 * machine with 1G and 10% dirty_ratio, which should be more
1018 * Privileged users can raise it by writing to
1020 * /sys/class/bdi/<bdi>/max_ratio
1022 bdi_set_max_ratio(&fc->bdi, 1);
1027 struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
1029 struct fuse_dev *fud;
1031 fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL);
1033 fud->fc = fuse_conn_get(fc);
1034 fuse_pqueue_init(&fud->pq);
1036 spin_lock(&fc->lock);
1037 list_add_tail(&fud->entry, &fc->devices);
1038 spin_unlock(&fc->lock);
1043 EXPORT_SYMBOL_GPL(fuse_dev_alloc);
1045 void fuse_dev_free(struct fuse_dev *fud)
1047 struct fuse_conn *fc = fud->fc;
1050 spin_lock(&fc->lock);
1051 list_del(&fud->entry);
1052 spin_unlock(&fc->lock);
1058 EXPORT_SYMBOL_GPL(fuse_dev_free);
1060 static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1062 struct fuse_dev *fud;
1063 struct fuse_conn *fc;
1065 struct fuse_mount_data d;
1067 struct dentry *root_dentry;
1068 struct fuse_req *init_req;
1070 int is_bdev = sb->s_bdev != NULL;
1073 if (sb->s_flags & MS_MANDLOCK)
1076 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
1078 if (!parse_fuse_opt(data, &d, is_bdev))
1084 if (!sb_set_blocksize(sb, d.blksize))
1088 sb->s_blocksize = PAGE_SIZE;
1089 sb->s_blocksize_bits = PAGE_SHIFT;
1091 sb->s_magic = FUSE_SUPER_MAGIC;
1092 sb->s_op = &fuse_super_operations;
1093 sb->s_xattr = fuse_xattr_handlers;
1094 sb->s_maxbytes = MAX_LFS_FILESIZE;
1095 sb->s_time_gran = 1;
1096 sb->s_export_op = &fuse_export_operations;
1103 if ((file->f_op != &fuse_dev_operations) ||
1104 (file->f_cred->user_ns != &init_user_ns))
1107 fc = kmalloc(sizeof(*fc), GFP_KERNEL);
1113 fc->release = fuse_free_conn;
1115 fud = fuse_dev_alloc(fc);
1119 fc->dev = sb->s_dev;
1121 err = fuse_bdi_init(fc, sb);
1125 sb->s_bdi = &fc->bdi;
1127 /* Handle umasking inside the fuse code */
1128 if (sb->s_flags & MS_POSIXACL)
1130 sb->s_flags |= MS_POSIXACL;
1132 fc->default_permissions = d.default_permissions;
1133 fc->allow_other = d.allow_other;
1134 fc->user_id = d.user_id;
1135 fc->group_id = d.group_id;
1136 fc->max_read = max_t(unsigned, 4096, d.max_read);
1138 /* Used by get_root_inode() */
1142 root = fuse_get_root_inode(sb, d.rootmode);
1143 sb->s_d_op = &fuse_root_dentry_operations;
1144 root_dentry = d_make_root(root);
1147 /* Root dentry doesn't have .d_revalidate */
1148 sb->s_d_op = &fuse_dentry_operations;
1150 init_req = fuse_request_alloc(0);
1153 __set_bit(FR_BACKGROUND, &init_req->flags);
1156 fc->destroy_req = fuse_request_alloc(0);
1157 if (!fc->destroy_req)
1158 goto err_free_init_req;
1161 mutex_lock(&fuse_mutex);
1163 if (file->private_data)
1166 err = fuse_ctl_add_conn(fc);
1170 list_add_tail(&fc->entry, &fuse_conn_list);
1171 sb->s_root = root_dentry;
1172 file->private_data = fud;
1173 mutex_unlock(&fuse_mutex);
1175 * atomic_dec_and_test() in fput() provides the necessary
1176 * memory barrier for file->private_data to be visible on all
1181 fuse_send_init(fc, init_req);
1186 mutex_unlock(&fuse_mutex);
1188 fuse_request_free(init_req);
1194 fuse_bdi_destroy(fc);
1196 sb->s_fs_info = NULL;
1203 static struct dentry *fuse_mount(struct file_system_type *fs_type,
1204 int flags, const char *dev_name,
1207 return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
1210 static void fuse_sb_destroy(struct super_block *sb)
1212 struct fuse_conn *fc = get_fuse_conn_super(sb);
1215 fuse_send_destroy(fc);
1217 fuse_abort_conn(fc);
1218 fuse_wait_aborted(fc);
1220 down_write(&fc->killsb);
1222 up_write(&fc->killsb);
1226 static void fuse_kill_sb_anon(struct super_block *sb)
1228 fuse_sb_destroy(sb);
1229 kill_anon_super(sb);
1232 static struct file_system_type fuse_fs_type = {
1233 .owner = THIS_MODULE,
1235 .fs_flags = FS_HAS_SUBTYPE,
1236 .mount = fuse_mount,
1237 .kill_sb = fuse_kill_sb_anon,
1239 MODULE_ALIAS_FS("fuse");
1242 static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
1243 int flags, const char *dev_name,
1246 return mount_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super);
1249 static void fuse_kill_sb_blk(struct super_block *sb)
1251 fuse_sb_destroy(sb);
1252 kill_block_super(sb);
1255 static struct file_system_type fuseblk_fs_type = {
1256 .owner = THIS_MODULE,
1258 .mount = fuse_mount_blk,
1259 .kill_sb = fuse_kill_sb_blk,
1260 .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
1262 MODULE_ALIAS_FS("fuseblk");
1264 static inline int register_fuseblk(void)
1266 return register_filesystem(&fuseblk_fs_type);
1269 static inline void unregister_fuseblk(void)
1271 unregister_filesystem(&fuseblk_fs_type);
1274 static inline int register_fuseblk(void)
1279 static inline void unregister_fuseblk(void)
1284 static void fuse_inode_init_once(void *foo)
1286 struct inode *inode = foo;
1288 inode_init_once(inode);
1291 static int __init fuse_fs_init(void)
1295 fuse_inode_cachep = kmem_cache_create("fuse_inode",
1296 sizeof(struct fuse_inode), 0,
1297 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
1298 fuse_inode_init_once);
1300 if (!fuse_inode_cachep)
1303 err = register_fuseblk();
1307 err = register_filesystem(&fuse_fs_type);
1314 unregister_fuseblk();
1316 kmem_cache_destroy(fuse_inode_cachep);
1321 static void fuse_fs_cleanup(void)
1323 unregister_filesystem(&fuse_fs_type);
1324 unregister_fuseblk();
1327 * Make sure all delayed rcu free inodes are flushed before we
1331 kmem_cache_destroy(fuse_inode_cachep);
1334 static struct kobject *fuse_kobj;
1336 static int fuse_sysfs_init(void)
1340 fuse_kobj = kobject_create_and_add("fuse", fs_kobj);
1346 err = sysfs_create_mount_point(fuse_kobj, "connections");
1348 goto out_fuse_unregister;
1352 out_fuse_unregister:
1353 kobject_put(fuse_kobj);
1358 static void fuse_sysfs_cleanup(void)
1360 sysfs_remove_mount_point(fuse_kobj, "connections");
1361 kobject_put(fuse_kobj);
1364 static int __init fuse_init(void)
1368 printk(KERN_INFO "fuse init (API version %i.%i)\n",
1369 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
1371 INIT_LIST_HEAD(&fuse_conn_list);
1372 res = fuse_fs_init();
1376 res = fuse_dev_init();
1378 goto err_fs_cleanup;
1380 res = fuse_sysfs_init();
1382 goto err_dev_cleanup;
1384 res = fuse_ctl_init();
1386 goto err_sysfs_cleanup;
1388 sanitize_global_limit(&max_user_bgreq);
1389 sanitize_global_limit(&max_user_congthresh);
1394 fuse_sysfs_cleanup();
1403 static void __exit fuse_exit(void)
1405 printk(KERN_DEBUG "fuse exit\n");
1408 fuse_sysfs_cleanup();
1413 module_init(fuse_init);
1414 module_exit(fuse_exit);