2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/sched/signal.h>
16 #include <linux/module.h>
17 #include <linux/compat.h>
18 #include <linux/swap.h>
19 #include <linux/falloc.h>
20 #include <linux/uio.h>
23 static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags,
24 struct fuse_page_desc **desc)
28 pages = kzalloc(npages * (sizeof(struct page *) +
29 sizeof(struct fuse_page_desc)), flags);
30 *desc = (void *) (pages + npages);
35 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
36 int opcode, struct fuse_open_out *outargp)
38 struct fuse_open_in inarg;
41 memset(&inarg, 0, sizeof(inarg));
42 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
43 if (!fc->atomic_o_trunc)
44 inarg.flags &= ~O_TRUNC;
48 args.in_args[0].size = sizeof(inarg);
49 args.in_args[0].value = &inarg;
51 args.out_args[0].size = sizeof(*outargp);
52 args.out_args[0].value = outargp;
54 return fuse_simple_request(fc, &args);
57 struct fuse_release_args {
58 struct fuse_args args;
59 struct fuse_release_in inarg;
63 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
67 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
72 ff->release_args = kzalloc(sizeof(*ff->release_args),
74 if (!ff->release_args) {
79 INIT_LIST_HEAD(&ff->write_entry);
80 mutex_init(&ff->readdir.lock);
81 refcount_set(&ff->count, 1);
82 RB_CLEAR_NODE(&ff->polled_node);
83 init_waitqueue_head(&ff->poll_wait);
85 ff->kh = atomic64_inc_return(&fc->khctr);
90 void fuse_file_free(struct fuse_file *ff)
92 kfree(ff->release_args);
93 mutex_destroy(&ff->readdir.lock);
97 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
99 refcount_inc(&ff->count);
103 static void fuse_release_end(struct fuse_conn *fc, struct fuse_args *args,
106 struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
112 static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
114 if (refcount_dec_and_test(&ff->count)) {
115 struct fuse_args *args = &ff->release_args->args;
117 if (isdir ? ff->fc->no_opendir : ff->fc->no_open) {
118 /* Do nothing when client does not implement 'open' */
119 fuse_release_end(ff->fc, args, 0);
121 fuse_simple_request(ff->fc, args);
122 fuse_release_end(ff->fc, args, 0);
124 args->end = fuse_release_end;
125 if (fuse_simple_background(ff->fc, args,
126 GFP_KERNEL | __GFP_NOFAIL))
127 fuse_release_end(ff->fc, args, -ENOTCONN);
133 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
136 struct fuse_file *ff;
137 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
139 ff = fuse_file_alloc(fc);
144 /* Default for no-open */
145 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
146 if (isdir ? !fc->no_opendir : !fc->no_open) {
147 struct fuse_open_out outarg;
150 err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
153 ff->open_flags = outarg.open_flags;
155 } else if (err != -ENOSYS) {
167 ff->open_flags &= ~FOPEN_DIRECT_IO;
170 file->private_data = ff;
174 EXPORT_SYMBOL_GPL(fuse_do_open);
176 static void fuse_link_write_file(struct file *file)
178 struct inode *inode = file_inode(file);
179 struct fuse_inode *fi = get_fuse_inode(inode);
180 struct fuse_file *ff = file->private_data;
182 * file may be written through mmap, so chain it onto the
183 * inodes's write_file list
185 spin_lock(&fi->lock);
186 if (list_empty(&ff->write_entry))
187 list_add(&ff->write_entry, &fi->write_files);
188 spin_unlock(&fi->lock);
191 void fuse_finish_open(struct inode *inode, struct file *file)
193 struct fuse_file *ff = file->private_data;
194 struct fuse_conn *fc = get_fuse_conn(inode);
196 if (ff->open_flags & FOPEN_STREAM)
197 stream_open(inode, file);
198 else if (ff->open_flags & FOPEN_NONSEEKABLE)
199 nonseekable_open(inode, file);
201 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
202 struct fuse_inode *fi = get_fuse_inode(inode);
204 spin_lock(&fi->lock);
205 fi->attr_version = atomic64_inc_return(&fc->attr_version);
206 i_size_write(inode, 0);
207 spin_unlock(&fi->lock);
208 truncate_pagecache(inode, 0);
209 fuse_invalidate_attr(inode);
210 if (fc->writeback_cache)
211 file_update_time(file);
212 } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) {
213 invalidate_inode_pages2(inode->i_mapping);
216 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
217 fuse_link_write_file(file);
220 int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
222 struct fuse_conn *fc = get_fuse_conn(inode);
224 bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
225 fc->atomic_o_trunc &&
228 if (fuse_is_bad(inode))
231 err = generic_file_open(inode, file);
235 if (is_wb_truncate) {
237 fuse_set_nowrite(inode);
240 err = fuse_do_open(fc, get_node_id(inode), file, isdir);
243 fuse_finish_open(inode, file);
245 if (is_wb_truncate) {
246 fuse_release_nowrite(inode);
253 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
254 int flags, int opcode)
256 struct fuse_conn *fc = ff->fc;
257 struct fuse_release_args *ra = ff->release_args;
259 /* Inode is NULL on error path of fuse_create_open() */
261 spin_lock(&fi->lock);
262 list_del(&ff->write_entry);
263 spin_unlock(&fi->lock);
265 spin_lock(&fc->lock);
266 if (!RB_EMPTY_NODE(&ff->polled_node))
267 rb_erase(&ff->polled_node, &fc->polled_files);
268 spin_unlock(&fc->lock);
270 wake_up_interruptible_all(&ff->poll_wait);
272 ra->inarg.fh = ff->fh;
273 ra->inarg.flags = flags;
274 ra->args.in_numargs = 1;
275 ra->args.in_args[0].size = sizeof(struct fuse_release_in);
276 ra->args.in_args[0].value = &ra->inarg;
277 ra->args.opcode = opcode;
278 ra->args.nodeid = ff->nodeid;
279 ra->args.force = true;
280 ra->args.nocreds = true;
283 void fuse_release_common(struct file *file, bool isdir)
285 struct fuse_inode *fi = get_fuse_inode(file_inode(file));
286 struct fuse_file *ff = file->private_data;
287 struct fuse_release_args *ra = ff->release_args;
288 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
290 fuse_prepare_release(fi, ff, file->f_flags, opcode);
293 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
294 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fc,
297 /* Hold inode until release is finished */
298 ra->inode = igrab(file_inode(file));
301 * Normally this will send the RELEASE request, however if
302 * some asynchronous READ or WRITE requests are outstanding,
303 * the sending will be delayed.
305 * Make the release synchronous if this is a fuseblk mount,
306 * synchronous RELEASE is allowed (and desirable) in this case
307 * because the server can be trusted not to screw up.
309 fuse_file_put(ff, ff->fc->destroy, isdir);
312 static int fuse_open(struct inode *inode, struct file *file)
314 return fuse_open_common(inode, file, false);
317 static int fuse_release(struct inode *inode, struct file *file)
319 struct fuse_conn *fc = get_fuse_conn(inode);
321 /* see fuse_vma_close() for !writeback_cache case */
322 if (fc->writeback_cache)
323 write_inode_now(inode, 1);
325 fuse_release_common(file, false);
327 /* return value is ignored by VFS */
331 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags)
333 WARN_ON(refcount_read(&ff->count) > 1);
334 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE);
336 * iput(NULL) is a no-op and since the refcount is 1 and everything's
337 * synchronous, we are fine with not doing igrab() here"
339 fuse_file_put(ff, true, false);
341 EXPORT_SYMBOL_GPL(fuse_sync_release);
344 * Scramble the ID space with XTEA, so that the value of the files_struct
345 * pointer is not exposed to userspace.
347 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
349 u32 *k = fc->scramble_key;
350 u64 v = (unsigned long) id;
356 for (i = 0; i < 32; i++) {
357 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
359 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
362 return (u64) v0 + ((u64) v1 << 32);
365 struct fuse_writepage_args {
366 struct fuse_io_args ia;
367 struct list_head writepages_entry;
368 struct list_head queue_entry;
369 struct fuse_writepage_args *next;
373 static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
374 pgoff_t idx_from, pgoff_t idx_to)
376 struct fuse_writepage_args *wpa;
378 list_for_each_entry(wpa, &fi->writepages, writepages_entry) {
381 WARN_ON(get_fuse_inode(wpa->inode) != fi);
382 curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
383 if (idx_from < curr_index + wpa->ia.ap.num_pages &&
384 curr_index <= idx_to) {
392 * Check if any page in a range is under writeback
394 * This is currently done by walking the list of writepage requests
395 * for the inode, which can be pretty inefficient.
397 static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
400 struct fuse_inode *fi = get_fuse_inode(inode);
403 spin_lock(&fi->lock);
404 found = fuse_find_writeback(fi, idx_from, idx_to);
405 spin_unlock(&fi->lock);
410 static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
412 return fuse_range_is_writeback(inode, index, index);
416 * Wait for page writeback to be completed.
418 * Since fuse doesn't rely on the VM writeback tracking, this has to
419 * use some other means.
421 static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
423 struct fuse_inode *fi = get_fuse_inode(inode);
425 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
429 * Wait for all pending writepages on the inode to finish.
431 * This is currently done by blocking further writes with FUSE_NOWRITE
432 * and waiting for all sent writes to complete.
434 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
435 * could conflict with truncation.
437 static void fuse_sync_writes(struct inode *inode)
439 fuse_set_nowrite(inode);
440 fuse_release_nowrite(inode);
443 static int fuse_flush(struct file *file, fl_owner_t id)
445 struct inode *inode = file_inode(file);
446 struct fuse_conn *fc = get_fuse_conn(inode);
447 struct fuse_file *ff = file->private_data;
448 struct fuse_flush_in inarg;
452 if (fuse_is_bad(inode))
458 err = write_inode_now(inode, 1);
463 fuse_sync_writes(inode);
466 err = filemap_check_errors(file->f_mapping);
470 memset(&inarg, 0, sizeof(inarg));
472 inarg.lock_owner = fuse_lock_owner_id(fc, id);
473 args.opcode = FUSE_FLUSH;
474 args.nodeid = get_node_id(inode);
476 args.in_args[0].size = sizeof(inarg);
477 args.in_args[0].value = &inarg;
480 err = fuse_simple_request(fc, &args);
481 if (err == -ENOSYS) {
488 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
489 int datasync, int opcode)
491 struct inode *inode = file->f_mapping->host;
492 struct fuse_conn *fc = get_fuse_conn(inode);
493 struct fuse_file *ff = file->private_data;
495 struct fuse_fsync_in inarg;
497 memset(&inarg, 0, sizeof(inarg));
499 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
500 args.opcode = opcode;
501 args.nodeid = get_node_id(inode);
503 args.in_args[0].size = sizeof(inarg);
504 args.in_args[0].value = &inarg;
505 return fuse_simple_request(fc, &args);
508 static int fuse_fsync(struct file *file, loff_t start, loff_t end,
511 struct inode *inode = file->f_mapping->host;
512 struct fuse_conn *fc = get_fuse_conn(inode);
515 if (fuse_is_bad(inode))
521 * Start writeback against all dirty pages of the inode, then
522 * wait for all outstanding writes, before sending the FSYNC
525 err = file_write_and_wait_range(file, start, end);
529 fuse_sync_writes(inode);
532 * Due to implementation of fuse writeback
533 * file_write_and_wait_range() does not catch errors.
534 * We have to do this directly after fuse_sync_writes()
536 err = file_check_and_advance_wb_err(file);
540 err = sync_inode_metadata(inode, 1);
547 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
548 if (err == -ENOSYS) {
558 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
559 size_t count, int opcode)
561 struct fuse_file *ff = file->private_data;
562 struct fuse_args *args = &ia->ap.args;
564 ia->read.in.fh = ff->fh;
565 ia->read.in.offset = pos;
566 ia->read.in.size = count;
567 ia->read.in.flags = file->f_flags;
568 args->opcode = opcode;
569 args->nodeid = ff->nodeid;
570 args->in_numargs = 1;
571 args->in_args[0].size = sizeof(ia->read.in);
572 args->in_args[0].value = &ia->read.in;
573 args->out_argvar = true;
574 args->out_numargs = 1;
575 args->out_args[0].size = count;
578 static void fuse_release_user_pages(struct fuse_args_pages *ap,
583 for (i = 0; i < ap->num_pages; i++) {
585 set_page_dirty_lock(ap->pages[i]);
586 put_page(ap->pages[i]);
590 static void fuse_io_release(struct kref *kref)
592 kfree(container_of(kref, struct fuse_io_priv, refcnt));
595 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
600 if (io->bytes >= 0 && io->write)
603 return io->bytes < 0 ? io->size : io->bytes;
607 * In case of short read, the caller sets 'pos' to the position of
608 * actual end of fuse request in IO request. Otherwise, if bytes_requested
609 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
612 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
613 * both submitted asynchronously. The first of them was ACKed by userspace as
614 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
615 * second request was ACKed as short, e.g. only 1K was read, resulting in
618 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
619 * will be equal to the length of the longest contiguous fragment of
620 * transferred data starting from the beginning of IO request.
622 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
626 spin_lock(&io->lock);
628 io->err = io->err ? : err;
629 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
633 if (!left && io->blocking)
635 spin_unlock(&io->lock);
637 if (!left && !io->blocking) {
638 ssize_t res = fuse_get_res_by_io(io);
641 struct inode *inode = file_inode(io->iocb->ki_filp);
642 struct fuse_conn *fc = get_fuse_conn(inode);
643 struct fuse_inode *fi = get_fuse_inode(inode);
645 spin_lock(&fi->lock);
646 fi->attr_version = atomic64_inc_return(&fc->attr_version);
647 spin_unlock(&fi->lock);
650 io->iocb->ki_complete(io->iocb, res, 0);
653 kref_put(&io->refcnt, fuse_io_release);
656 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
659 struct fuse_io_args *ia;
661 ia = kzalloc(sizeof(*ia), GFP_KERNEL);
664 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
674 static void fuse_io_free(struct fuse_io_args *ia)
680 static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_args *args,
683 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
684 struct fuse_io_priv *io = ia->io;
687 fuse_release_user_pages(&ia->ap, io->should_dirty);
691 } else if (io->write) {
692 if (ia->write.out.size > ia->write.in.size) {
694 } else if (ia->write.in.size != ia->write.out.size) {
695 pos = ia->write.in.offset - io->offset +
699 u32 outsize = args->out_args[0].size;
701 if (ia->read.in.size != outsize)
702 pos = ia->read.in.offset - io->offset + outsize;
705 fuse_aio_complete(io, err, pos);
709 static ssize_t fuse_async_req_send(struct fuse_conn *fc,
710 struct fuse_io_args *ia, size_t num_bytes)
713 struct fuse_io_priv *io = ia->io;
715 spin_lock(&io->lock);
716 kref_get(&io->refcnt);
717 io->size += num_bytes;
719 spin_unlock(&io->lock);
721 ia->ap.args.end = fuse_aio_complete_req;
722 ia->ap.args.may_block = io->should_dirty;
723 err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL);
725 fuse_aio_complete_req(fc, &ia->ap.args, err);
730 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
733 struct file *file = ia->io->iocb->ki_filp;
734 struct fuse_file *ff = file->private_data;
735 struct fuse_conn *fc = ff->fc;
737 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
739 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
740 ia->read.in.lock_owner = fuse_lock_owner_id(fc, owner);
744 return fuse_async_req_send(fc, ia, count);
746 return fuse_simple_request(fc, &ia->ap.args);
749 static void fuse_read_update_size(struct inode *inode, loff_t size,
752 struct fuse_conn *fc = get_fuse_conn(inode);
753 struct fuse_inode *fi = get_fuse_inode(inode);
755 spin_lock(&fi->lock);
756 if (attr_ver == fi->attr_version && size < inode->i_size &&
757 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
758 fi->attr_version = atomic64_inc_return(&fc->attr_version);
759 i_size_write(inode, size);
761 spin_unlock(&fi->lock);
764 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
765 struct fuse_args_pages *ap)
767 struct fuse_conn *fc = get_fuse_conn(inode);
769 if (fc->writeback_cache) {
771 * A hole in a file. Some data after the hole are in page cache,
772 * but have not reached the client fs yet. So, the hole is not
776 int start_idx = num_read >> PAGE_SHIFT;
777 size_t off = num_read & (PAGE_SIZE - 1);
779 for (i = start_idx; i < ap->num_pages; i++) {
780 zero_user_segment(ap->pages[i], off, PAGE_SIZE);
784 loff_t pos = page_offset(ap->pages[0]) + num_read;
785 fuse_read_update_size(inode, pos, attr_ver);
789 static int fuse_do_readpage(struct file *file, struct page *page)
791 struct inode *inode = page->mapping->host;
792 struct fuse_conn *fc = get_fuse_conn(inode);
793 loff_t pos = page_offset(page);
794 struct fuse_page_desc desc = { .length = PAGE_SIZE };
795 struct fuse_io_args ia = {
796 .ap.args.page_zeroing = true,
797 .ap.args.out_pages = true,
806 * Page writeback can extend beyond the lifetime of the
807 * page-cache page, so make sure we read a properly synced
810 fuse_wait_on_page_writeback(inode, page->index);
812 attr_ver = fuse_get_attr_version(fc);
814 /* Don't overflow end offset */
815 if (pos + (desc.length - 1) == LLONG_MAX)
818 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
819 res = fuse_simple_request(fc, &ia.ap.args);
823 * Short read means EOF. If file size is larger, truncate it
825 if (res < desc.length)
826 fuse_short_read(inode, attr_ver, res, &ia.ap);
828 SetPageUptodate(page);
833 static int fuse_readpage(struct file *file, struct page *page)
835 struct inode *inode = page->mapping->host;
839 if (fuse_is_bad(inode))
842 err = fuse_do_readpage(file, page);
843 fuse_invalidate_atime(inode);
849 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_args *args,
853 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
854 struct fuse_args_pages *ap = &ia->ap;
855 size_t count = ia->read.in.size;
856 size_t num_read = args->out_args[0].size;
857 struct address_space *mapping = NULL;
859 for (i = 0; mapping == NULL && i < ap->num_pages; i++)
860 mapping = ap->pages[i]->mapping;
863 struct inode *inode = mapping->host;
866 * Short read means EOF. If file size is larger, truncate it
868 if (!err && num_read < count)
869 fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
871 fuse_invalidate_atime(inode);
874 for (i = 0; i < ap->num_pages; i++) {
875 struct page *page = ap->pages[i];
878 SetPageUptodate(page);
885 fuse_file_put(ia->ff, false, false);
890 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
892 struct fuse_file *ff = file->private_data;
893 struct fuse_conn *fc = ff->fc;
894 struct fuse_args_pages *ap = &ia->ap;
895 loff_t pos = page_offset(ap->pages[0]);
896 size_t count = ap->num_pages << PAGE_SHIFT;
900 ap->args.out_pages = true;
901 ap->args.page_zeroing = true;
902 ap->args.page_replace = true;
904 /* Don't overflow end offset */
905 if (pos + (count - 1) == LLONG_MAX) {
907 ap->descs[ap->num_pages - 1].length--;
909 WARN_ON((loff_t) (pos + count) < 0);
911 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
912 ia->read.attr_ver = fuse_get_attr_version(fc);
913 if (fc->async_read) {
914 ia->ff = fuse_file_get(ff);
915 ap->args.end = fuse_readpages_end;
916 err = fuse_simple_background(fc, &ap->args, GFP_KERNEL);
920 res = fuse_simple_request(fc, &ap->args);
921 err = res < 0 ? res : 0;
923 fuse_readpages_end(fc, &ap->args, err);
926 struct fuse_fill_data {
927 struct fuse_io_args *ia;
930 unsigned int nr_pages;
931 unsigned int max_pages;
934 static int fuse_readpages_fill(void *_data, struct page *page)
936 struct fuse_fill_data *data = _data;
937 struct fuse_io_args *ia = data->ia;
938 struct fuse_args_pages *ap = &ia->ap;
939 struct inode *inode = data->inode;
940 struct fuse_conn *fc = get_fuse_conn(inode);
942 fuse_wait_on_page_writeback(inode, page->index);
945 (ap->num_pages == fc->max_pages ||
946 (ap->num_pages + 1) * PAGE_SIZE > fc->max_read ||
947 ap->pages[ap->num_pages - 1]->index + 1 != page->index)) {
948 data->max_pages = min_t(unsigned int, data->nr_pages,
950 fuse_send_readpages(ia, data->file);
951 data->ia = ia = fuse_io_alloc(NULL, data->max_pages);
959 if (WARN_ON(ap->num_pages >= data->max_pages)) {
966 ap->pages[ap->num_pages] = page;
967 ap->descs[ap->num_pages].length = PAGE_SIZE;
973 static int fuse_readpages(struct file *file, struct address_space *mapping,
974 struct list_head *pages, unsigned nr_pages)
976 struct inode *inode = mapping->host;
977 struct fuse_conn *fc = get_fuse_conn(inode);
978 struct fuse_fill_data data;
982 if (fuse_is_bad(inode))
987 data.nr_pages = nr_pages;
988 data.max_pages = min_t(unsigned int, nr_pages, fc->max_pages);
990 data.ia = fuse_io_alloc(NULL, data.max_pages);
995 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
997 if (data.ia->ap.num_pages)
998 fuse_send_readpages(data.ia, file);
1000 fuse_io_free(data.ia);
1006 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
1008 struct inode *inode = iocb->ki_filp->f_mapping->host;
1009 struct fuse_conn *fc = get_fuse_conn(inode);
1012 * In auto invalidate mode, always update attributes on read.
1013 * Otherwise, only update if we attempt to read past EOF (to ensure
1014 * i_size is up to date).
1016 if (fc->auto_inval_data ||
1017 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
1019 err = fuse_update_attributes(inode, iocb->ki_filp);
1024 return generic_file_read_iter(iocb, to);
1027 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
1028 loff_t pos, size_t count)
1030 struct fuse_args *args = &ia->ap.args;
1032 ia->write.in.fh = ff->fh;
1033 ia->write.in.offset = pos;
1034 ia->write.in.size = count;
1035 args->opcode = FUSE_WRITE;
1036 args->nodeid = ff->nodeid;
1037 args->in_numargs = 2;
1038 if (ff->fc->minor < 9)
1039 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
1041 args->in_args[0].size = sizeof(ia->write.in);
1042 args->in_args[0].value = &ia->write.in;
1043 args->in_args[1].size = count;
1044 args->out_numargs = 1;
1045 args->out_args[0].size = sizeof(ia->write.out);
1046 args->out_args[0].value = &ia->write.out;
1049 static unsigned int fuse_write_flags(struct kiocb *iocb)
1051 unsigned int flags = iocb->ki_filp->f_flags;
1053 if (iocb->ki_flags & IOCB_DSYNC)
1055 if (iocb->ki_flags & IOCB_SYNC)
1061 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
1062 size_t count, fl_owner_t owner)
1064 struct kiocb *iocb = ia->io->iocb;
1065 struct file *file = iocb->ki_filp;
1066 struct fuse_file *ff = file->private_data;
1067 struct fuse_conn *fc = ff->fc;
1068 struct fuse_write_in *inarg = &ia->write.in;
1071 fuse_write_args_fill(ia, ff, pos, count);
1072 inarg->flags = fuse_write_flags(iocb);
1073 if (owner != NULL) {
1074 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
1075 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
1079 return fuse_async_req_send(fc, ia, count);
1081 err = fuse_simple_request(fc, &ia->ap.args);
1082 if (!err && ia->write.out.size > count)
1085 return err ?: ia->write.out.size;
1088 bool fuse_write_update_size(struct inode *inode, loff_t pos)
1090 struct fuse_conn *fc = get_fuse_conn(inode);
1091 struct fuse_inode *fi = get_fuse_inode(inode);
1094 spin_lock(&fi->lock);
1095 fi->attr_version = atomic64_inc_return(&fc->attr_version);
1096 if (pos > inode->i_size) {
1097 i_size_write(inode, pos);
1100 spin_unlock(&fi->lock);
1105 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1106 struct kiocb *iocb, struct inode *inode,
1107 loff_t pos, size_t count)
1109 struct fuse_args_pages *ap = &ia->ap;
1110 struct file *file = iocb->ki_filp;
1111 struct fuse_file *ff = file->private_data;
1112 struct fuse_conn *fc = ff->fc;
1113 unsigned int offset, i;
1117 for (i = 0; i < ap->num_pages; i++)
1118 fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
1120 fuse_write_args_fill(ia, ff, pos, count);
1121 ia->write.in.flags = fuse_write_flags(iocb);
1123 err = fuse_simple_request(fc, &ap->args);
1124 if (!err && ia->write.out.size > count)
1127 short_write = ia->write.out.size < count;
1128 offset = ap->descs[0].offset;
1129 count = ia->write.out.size;
1130 for (i = 0; i < ap->num_pages; i++) {
1131 struct page *page = ap->pages[i];
1134 ClearPageUptodate(page);
1136 if (count >= PAGE_SIZE - offset)
1137 count -= PAGE_SIZE - offset;
1140 ClearPageUptodate(page);
1145 if (ia->write.page_locked && (i == ap->num_pages - 1))
1153 static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
1154 struct address_space *mapping,
1155 struct iov_iter *ii, loff_t pos,
1156 unsigned int max_pages)
1158 struct fuse_args_pages *ap = &ia->ap;
1159 struct fuse_conn *fc = get_fuse_conn(mapping->host);
1160 unsigned offset = pos & (PAGE_SIZE - 1);
1164 ap->args.in_pages = true;
1165 ap->descs[0].offset = offset;
1170 pgoff_t index = pos >> PAGE_SHIFT;
1171 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
1172 iov_iter_count(ii));
1174 bytes = min_t(size_t, bytes, fc->max_write - count);
1178 if (iov_iter_fault_in_readable(ii, bytes))
1182 page = grab_cache_page_write_begin(mapping, index, 0);
1186 if (mapping_writably_mapped(mapping))
1187 flush_dcache_page(page);
1189 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
1190 flush_dcache_page(page);
1192 iov_iter_advance(ii, tmp);
1196 bytes = min(bytes, iov_iter_single_seg_count(ii));
1201 ap->pages[ap->num_pages] = page;
1202 ap->descs[ap->num_pages].length = tmp;
1208 if (offset == PAGE_SIZE)
1211 /* If we copied full page, mark it uptodate */
1212 if (tmp == PAGE_SIZE)
1213 SetPageUptodate(page);
1215 if (PageUptodate(page)) {
1218 ia->write.page_locked = true;
1221 if (!fc->big_writes)
1223 } while (iov_iter_count(ii) && count < fc->max_write &&
1224 ap->num_pages < max_pages && offset == 0);
1226 return count > 0 ? count : err;
1229 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
1230 unsigned int max_pages)
1232 return min_t(unsigned int,
1233 ((pos + len - 1) >> PAGE_SHIFT) -
1234 (pos >> PAGE_SHIFT) + 1,
1238 static ssize_t fuse_perform_write(struct kiocb *iocb,
1239 struct address_space *mapping,
1240 struct iov_iter *ii, loff_t pos)
1242 struct inode *inode = mapping->host;
1243 struct fuse_conn *fc = get_fuse_conn(inode);
1244 struct fuse_inode *fi = get_fuse_inode(inode);
1248 if (inode->i_size < pos + iov_iter_count(ii))
1249 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1253 struct fuse_io_args ia = {};
1254 struct fuse_args_pages *ap = &ia.ap;
1255 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
1258 ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
1264 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
1268 err = fuse_send_write_pages(&ia, iocb, inode,
1271 size_t num_written = ia.write.out.size;
1276 /* break out of the loop on short write */
1277 if (num_written != count)
1282 } while (!err && iov_iter_count(ii));
1285 fuse_write_update_size(inode, pos);
1287 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1288 fuse_invalidate_attr(inode);
1290 return res > 0 ? res : err;
1293 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
1295 struct file *file = iocb->ki_filp;
1296 struct address_space *mapping = file->f_mapping;
1297 ssize_t written = 0;
1298 ssize_t written_buffered = 0;
1299 struct inode *inode = mapping->host;
1303 if (get_fuse_conn(inode)->writeback_cache) {
1304 /* Update size (EOF optimization) and mode (SUID clearing) */
1305 err = fuse_update_attributes(mapping->host, file);
1309 return generic_file_write_iter(iocb, from);
1314 /* We can write back this queue in page reclaim */
1315 current->backing_dev_info = inode_to_bdi(inode);
1317 err = generic_write_checks(iocb, from);
1321 err = file_remove_privs(file);
1325 err = file_update_time(file);
1329 if (iocb->ki_flags & IOCB_DIRECT) {
1330 loff_t pos = iocb->ki_pos;
1331 written = generic_file_direct_write(iocb, from);
1332 if (written < 0 || !iov_iter_count(from))
1337 written_buffered = fuse_perform_write(iocb, mapping, from, pos);
1338 if (written_buffered < 0) {
1339 err = written_buffered;
1342 endbyte = pos + written_buffered - 1;
1344 err = filemap_write_and_wait_range(file->f_mapping, pos,
1349 invalidate_mapping_pages(file->f_mapping,
1351 endbyte >> PAGE_SHIFT);
1353 written += written_buffered;
1354 iocb->ki_pos = pos + written_buffered;
1356 written = fuse_perform_write(iocb, mapping, from, iocb->ki_pos);
1358 iocb->ki_pos += written;
1361 current->backing_dev_info = NULL;
1362 inode_unlock(inode);
1364 written = generic_write_sync(iocb, written);
1366 return written ? written : err;
1369 static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs,
1371 unsigned int nr_pages)
1375 for (i = index; i < index + nr_pages; i++)
1376 descs[i].length = PAGE_SIZE - descs[i].offset;
1379 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1381 return (unsigned long)ii->iov->iov_base + ii->iov_offset;
1384 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1387 return min(iov_iter_single_seg_count(ii), max_size);
1390 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
1391 size_t *nbytesp, int write,
1392 unsigned int max_pages)
1394 size_t nbytes = 0; /* # bytes already packed in req */
1397 /* Special case for kernel I/O: can copy directly into the buffer */
1398 if (iov_iter_is_kvec(ii)) {
1399 unsigned long user_addr = fuse_get_user_addr(ii);
1400 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1403 ap->args.in_args[1].value = (void *) user_addr;
1405 ap->args.out_args[0].value = (void *) user_addr;
1407 iov_iter_advance(ii, frag_size);
1408 *nbytesp = frag_size;
1412 while (nbytes < *nbytesp && ap->num_pages < max_pages) {
1415 ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages],
1417 max_pages - ap->num_pages,
1422 iov_iter_advance(ii, ret);
1426 npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
1428 ap->descs[ap->num_pages].offset = start;
1429 fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
1431 ap->num_pages += npages;
1432 ap->descs[ap->num_pages - 1].length -=
1433 (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
1436 ap->args.user_pages = true;
1438 ap->args.in_pages = 1;
1440 ap->args.out_pages = 1;
1444 return ret < 0 ? ret : 0;
1447 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1448 loff_t *ppos, int flags)
1450 int write = flags & FUSE_DIO_WRITE;
1451 int cuse = flags & FUSE_DIO_CUSE;
1452 struct file *file = io->iocb->ki_filp;
1453 struct inode *inode = file->f_mapping->host;
1454 struct fuse_file *ff = file->private_data;
1455 struct fuse_conn *fc = ff->fc;
1456 size_t nmax = write ? fc->max_write : fc->max_read;
1458 size_t count = iov_iter_count(iter);
1459 pgoff_t idx_from = pos >> PAGE_SHIFT;
1460 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
1463 struct fuse_io_args *ia;
1464 unsigned int max_pages;
1466 max_pages = iov_iter_npages(iter, fc->max_pages);
1467 ia = fuse_io_alloc(io, max_pages);
1472 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
1475 fuse_sync_writes(inode);
1477 inode_unlock(inode);
1480 io->should_dirty = !write && iter_is_iovec(iter);
1483 fl_owner_t owner = current->files;
1484 size_t nbytes = min(count, nmax);
1486 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
1492 if (!capable(CAP_FSETID))
1493 ia->write.in.write_flags |= FUSE_WRITE_KILL_PRIV;
1495 nres = fuse_send_write(ia, pos, nbytes, owner);
1497 nres = fuse_send_read(ia, pos, nbytes, owner);
1500 if (!io->async || nres < 0) {
1501 fuse_release_user_pages(&ia->ap, io->should_dirty);
1506 iov_iter_revert(iter, nbytes);
1510 WARN_ON(nres > nbytes);
1515 if (nres != nbytes) {
1516 iov_iter_revert(iter, nbytes - nres);
1520 max_pages = iov_iter_npages(iter, fc->max_pages);
1521 ia = fuse_io_alloc(io, max_pages);
1531 return res > 0 ? res : err;
1533 EXPORT_SYMBOL_GPL(fuse_direct_io);
1535 static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1536 struct iov_iter *iter,
1540 struct inode *inode = file_inode(io->iocb->ki_filp);
1542 res = fuse_direct_io(io, iter, ppos, 0);
1544 fuse_invalidate_atime(inode);
1549 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
1551 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
1555 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1556 res = fuse_direct_IO(iocb, to);
1558 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1560 res = __fuse_direct_read(&io, to, &iocb->ki_pos);
1566 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
1568 struct inode *inode = file_inode(iocb->ki_filp);
1569 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1572 /* Don't allow parallel writes to the same file */
1574 res = generic_write_checks(iocb, from);
1576 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1577 res = fuse_direct_IO(iocb, from);
1579 res = fuse_direct_io(&io, from, &iocb->ki_pos,
1583 fuse_invalidate_attr(inode);
1585 fuse_write_update_size(inode, iocb->ki_pos);
1586 inode_unlock(inode);
1591 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1593 struct file *file = iocb->ki_filp;
1594 struct fuse_file *ff = file->private_data;
1596 if (fuse_is_bad(file_inode(file)))
1599 if (!(ff->open_flags & FOPEN_DIRECT_IO))
1600 return fuse_cache_read_iter(iocb, to);
1602 return fuse_direct_read_iter(iocb, to);
1605 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1607 struct file *file = iocb->ki_filp;
1608 struct fuse_file *ff = file->private_data;
1610 if (fuse_is_bad(file_inode(file)))
1613 if (!(ff->open_flags & FOPEN_DIRECT_IO))
1614 return fuse_cache_write_iter(iocb, from);
1616 return fuse_direct_write_iter(iocb, from);
1619 static void fuse_writepage_free(struct fuse_writepage_args *wpa)
1621 struct fuse_args_pages *ap = &wpa->ia.ap;
1624 for (i = 0; i < ap->num_pages; i++)
1625 __free_page(ap->pages[i]);
1628 fuse_file_put(wpa->ia.ff, false, false);
1634 static void fuse_writepage_finish(struct fuse_conn *fc,
1635 struct fuse_writepage_args *wpa)
1637 struct fuse_args_pages *ap = &wpa->ia.ap;
1638 struct inode *inode = wpa->inode;
1639 struct fuse_inode *fi = get_fuse_inode(inode);
1640 struct backing_dev_info *bdi = inode_to_bdi(inode);
1643 list_del(&wpa->writepages_entry);
1644 for (i = 0; i < ap->num_pages; i++) {
1645 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1646 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
1647 wb_writeout_inc(&bdi->wb);
1649 wake_up(&fi->page_waitq);
1652 /* Called under fi->lock, may release and reacquire it */
1653 static void fuse_send_writepage(struct fuse_conn *fc,
1654 struct fuse_writepage_args *wpa, loff_t size)
1655 __releases(fi->lock)
1656 __acquires(fi->lock)
1658 struct fuse_writepage_args *aux, *next;
1659 struct fuse_inode *fi = get_fuse_inode(wpa->inode);
1660 struct fuse_write_in *inarg = &wpa->ia.write.in;
1661 struct fuse_args *args = &wpa->ia.ap.args;
1662 __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
1666 if (inarg->offset + data_size <= size) {
1667 inarg->size = data_size;
1668 } else if (inarg->offset < size) {
1669 inarg->size = size - inarg->offset;
1671 /* Got truncated off completely */
1675 args->in_args[1].size = inarg->size;
1677 args->nocreds = true;
1679 err = fuse_simple_background(fc, args, GFP_ATOMIC);
1680 if (err == -ENOMEM) {
1681 spin_unlock(&fi->lock);
1682 err = fuse_simple_background(fc, args, GFP_NOFS | __GFP_NOFAIL);
1683 spin_lock(&fi->lock);
1686 /* Fails on broken connection only */
1694 fuse_writepage_finish(fc, wpa);
1695 spin_unlock(&fi->lock);
1697 /* After fuse_writepage_finish() aux request list is private */
1698 for (aux = wpa->next; aux; aux = next) {
1701 fuse_writepage_free(aux);
1704 fuse_writepage_free(wpa);
1705 spin_lock(&fi->lock);
1709 * If fi->writectr is positive (no truncate or fsync going on) send
1710 * all queued writepage requests.
1712 * Called with fi->lock
1714 void fuse_flush_writepages(struct inode *inode)
1715 __releases(fi->lock)
1716 __acquires(fi->lock)
1718 struct fuse_conn *fc = get_fuse_conn(inode);
1719 struct fuse_inode *fi = get_fuse_inode(inode);
1720 loff_t crop = i_size_read(inode);
1721 struct fuse_writepage_args *wpa;
1723 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1724 wpa = list_entry(fi->queued_writes.next,
1725 struct fuse_writepage_args, queue_entry);
1726 list_del_init(&wpa->queue_entry);
1727 fuse_send_writepage(fc, wpa, crop);
1731 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
1734 struct fuse_writepage_args *wpa =
1735 container_of(args, typeof(*wpa), ia.ap.args);
1736 struct inode *inode = wpa->inode;
1737 struct fuse_inode *fi = get_fuse_inode(inode);
1739 mapping_set_error(inode->i_mapping, error);
1740 spin_lock(&fi->lock);
1742 struct fuse_conn *fc = get_fuse_conn(inode);
1743 struct fuse_write_in *inarg = &wpa->ia.write.in;
1744 struct fuse_writepage_args *next = wpa->next;
1746 wpa->next = next->next;
1748 next->ia.ff = fuse_file_get(wpa->ia.ff);
1749 list_add(&next->writepages_entry, &fi->writepages);
1752 * Skip fuse_flush_writepages() to make it easy to crop requests
1753 * based on primary request size.
1755 * 1st case (trivial): there are no concurrent activities using
1756 * fuse_set/release_nowrite. Then we're on safe side because
1757 * fuse_flush_writepages() would call fuse_send_writepage()
1760 * 2nd case: someone called fuse_set_nowrite and it is waiting
1761 * now for completion of all in-flight requests. This happens
1762 * rarely and no more than once per page, so this should be
1765 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1766 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1767 * that fuse_set_nowrite returned implies that all in-flight
1768 * requests were completed along with all of their secondary
1769 * requests. Further primary requests are blocked by negative
1770 * writectr. Hence there cannot be any in-flight requests and
1771 * no invocations of fuse_writepage_end() while we're in
1772 * fuse_set_nowrite..fuse_release_nowrite section.
1774 fuse_send_writepage(fc, next, inarg->offset + inarg->size);
1777 fuse_writepage_finish(fc, wpa);
1778 spin_unlock(&fi->lock);
1779 fuse_writepage_free(wpa);
1782 static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
1783 struct fuse_inode *fi)
1785 struct fuse_file *ff = NULL;
1787 spin_lock(&fi->lock);
1788 if (!list_empty(&fi->write_files)) {
1789 ff = list_entry(fi->write_files.next, struct fuse_file,
1793 spin_unlock(&fi->lock);
1798 static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
1799 struct fuse_inode *fi)
1801 struct fuse_file *ff = __fuse_write_file_get(fc, fi);
1806 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1808 struct fuse_conn *fc = get_fuse_conn(inode);
1809 struct fuse_inode *fi = get_fuse_inode(inode);
1810 struct fuse_file *ff;
1813 ff = __fuse_write_file_get(fc, fi);
1814 err = fuse_flush_times(inode, ff);
1816 fuse_file_put(ff, false, false);
1821 static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
1823 struct fuse_writepage_args *wpa;
1824 struct fuse_args_pages *ap;
1826 wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
1830 ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
1840 static int fuse_writepage_locked(struct page *page)
1842 struct address_space *mapping = page->mapping;
1843 struct inode *inode = mapping->host;
1844 struct fuse_conn *fc = get_fuse_conn(inode);
1845 struct fuse_inode *fi = get_fuse_inode(inode);
1846 struct fuse_writepage_args *wpa;
1847 struct fuse_args_pages *ap;
1848 struct page *tmp_page;
1849 int error = -ENOMEM;
1851 set_page_writeback(page);
1853 wpa = fuse_writepage_args_alloc();
1858 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1863 wpa->ia.ff = fuse_write_file_get(fc, fi);
1867 fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
1869 copy_highpage(tmp_page, page);
1870 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
1872 ap->args.in_pages = true;
1874 ap->pages[0] = tmp_page;
1875 ap->descs[0].offset = 0;
1876 ap->descs[0].length = PAGE_SIZE;
1877 ap->args.end = fuse_writepage_end;
1880 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
1881 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
1883 spin_lock(&fi->lock);
1884 list_add(&wpa->writepages_entry, &fi->writepages);
1885 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
1886 fuse_flush_writepages(inode);
1887 spin_unlock(&fi->lock);
1889 end_page_writeback(page);
1894 __free_page(tmp_page);
1898 mapping_set_error(page->mapping, error);
1899 end_page_writeback(page);
1903 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1907 if (fuse_page_is_writeback(page->mapping->host, page->index)) {
1909 * ->writepages() should be called for sync() and friends. We
1910 * should only get here on direct reclaim and then we are
1911 * allowed to skip a page which is already in flight
1913 WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
1915 redirty_page_for_writepage(wbc, page);
1920 err = fuse_writepage_locked(page);
1926 struct fuse_fill_wb_data {
1927 struct fuse_writepage_args *wpa;
1928 struct fuse_file *ff;
1929 struct inode *inode;
1930 struct page **orig_pages;
1931 unsigned int max_pages;
1934 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
1936 struct fuse_args_pages *ap = &data->wpa->ia.ap;
1937 struct fuse_conn *fc = get_fuse_conn(data->inode);
1938 struct page **pages;
1939 struct fuse_page_desc *descs;
1940 unsigned int npages = min_t(unsigned int,
1941 max_t(unsigned int, data->max_pages * 2,
1942 FUSE_DEFAULT_MAX_PAGES_PER_REQ),
1944 WARN_ON(npages <= data->max_pages);
1946 pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
1950 memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
1951 memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
1955 data->max_pages = npages;
1960 static void fuse_writepages_send(struct fuse_fill_wb_data *data)
1962 struct fuse_writepage_args *wpa = data->wpa;
1963 struct inode *inode = data->inode;
1964 struct fuse_inode *fi = get_fuse_inode(inode);
1965 int num_pages = wpa->ia.ap.num_pages;
1968 wpa->ia.ff = fuse_file_get(data->ff);
1969 spin_lock(&fi->lock);
1970 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
1971 fuse_flush_writepages(inode);
1972 spin_unlock(&fi->lock);
1974 for (i = 0; i < num_pages; i++)
1975 end_page_writeback(data->orig_pages[i]);
1979 * First recheck under fi->lock if the offending offset is still under
1980 * writeback. If yes, then iterate auxiliary write requests, to see if there's
1981 * one already added for a page at this offset. If there's none, then insert
1982 * this new request onto the auxiliary list, otherwise reuse the existing one by
1983 * copying the new page contents over to the old temporary page.
1985 static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa,
1988 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
1989 struct fuse_writepage_args *tmp;
1990 struct fuse_writepage_args *old_wpa;
1991 struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
1993 WARN_ON(new_ap->num_pages != 0);
1995 spin_lock(&fi->lock);
1996 list_del(&new_wpa->writepages_entry);
1997 old_wpa = fuse_find_writeback(fi, page->index, page->index);
1999 list_add(&new_wpa->writepages_entry, &fi->writepages);
2000 spin_unlock(&fi->lock);
2004 new_ap->num_pages = 1;
2005 for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
2008 WARN_ON(tmp->inode != new_wpa->inode);
2009 curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
2010 if (curr_index == page->index) {
2011 WARN_ON(tmp->ia.ap.num_pages != 1);
2012 swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
2018 new_wpa->next = old_wpa->next;
2019 old_wpa->next = new_wpa;
2022 spin_unlock(&fi->lock);
2025 struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
2027 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
2028 dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
2029 wb_writeout_inc(&bdi->wb);
2030 fuse_writepage_free(new_wpa);
2036 static int fuse_writepages_fill(struct page *page,
2037 struct writeback_control *wbc, void *_data)
2039 struct fuse_fill_wb_data *data = _data;
2040 struct fuse_writepage_args *wpa = data->wpa;
2041 struct fuse_args_pages *ap = &wpa->ia.ap;
2042 struct inode *inode = data->inode;
2043 struct fuse_inode *fi = get_fuse_inode(inode);
2044 struct fuse_conn *fc = get_fuse_conn(inode);
2045 struct page *tmp_page;
2051 data->ff = fuse_write_file_get(fc, fi);
2057 * Being under writeback is unlikely but possible. For example direct
2058 * read to an mmaped fuse file will set the page dirty twice; once when
2059 * the pages are faulted with get_user_pages(), and then after the read
2062 is_writeback = fuse_page_is_writeback(inode, page->index);
2064 if (wpa && ap->num_pages &&
2065 (is_writeback || ap->num_pages == fc->max_pages ||
2066 (ap->num_pages + 1) * PAGE_SIZE > fc->max_write ||
2067 data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) {
2068 fuse_writepages_send(data);
2070 } else if (wpa && ap->num_pages == data->max_pages) {
2071 if (!fuse_pages_realloc(data)) {
2072 fuse_writepages_send(data);
2078 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2083 * The page must not be redirtied until the writeout is completed
2084 * (i.e. userspace has sent a reply to the write request). Otherwise
2085 * there could be more than one temporary page instance for each real
2088 * This is ensured by holding the page lock in page_mkwrite() while
2089 * checking fuse_page_is_writeback(). We already hold the page lock
2090 * since clear_page_dirty_for_io() and keep it held until we add the
2091 * request to the fi->writepages list and increment ap->num_pages.
2092 * After this fuse_page_is_writeback() will indicate that the page is
2093 * under writeback, so we can release the page lock.
2095 if (data->wpa == NULL) {
2097 wpa = fuse_writepage_args_alloc();
2099 __free_page(tmp_page);
2102 data->max_pages = 1;
2105 fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0);
2106 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2108 ap->args.in_pages = true;
2109 ap->args.end = fuse_writepage_end;
2113 spin_lock(&fi->lock);
2114 list_add(&wpa->writepages_entry, &fi->writepages);
2115 spin_unlock(&fi->lock);
2119 set_page_writeback(page);
2121 copy_highpage(tmp_page, page);
2122 ap->pages[ap->num_pages] = tmp_page;
2123 ap->descs[ap->num_pages].offset = 0;
2124 ap->descs[ap->num_pages].length = PAGE_SIZE;
2126 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
2127 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
2130 if (is_writeback && fuse_writepage_in_flight(wpa, page)) {
2131 end_page_writeback(page);
2135 data->orig_pages[ap->num_pages] = page;
2138 * Protected by fi->lock against concurrent access by
2139 * fuse_page_is_writeback().
2141 spin_lock(&fi->lock);
2143 spin_unlock(&fi->lock);
2151 static int fuse_writepages(struct address_space *mapping,
2152 struct writeback_control *wbc)
2154 struct inode *inode = mapping->host;
2155 struct fuse_conn *fc = get_fuse_conn(inode);
2156 struct fuse_fill_wb_data data;
2160 if (fuse_is_bad(inode))
2168 data.orig_pages = kcalloc(fc->max_pages,
2169 sizeof(struct page *),
2171 if (!data.orig_pages)
2174 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
2176 WARN_ON(!data.wpa->ia.ap.num_pages);
2177 fuse_writepages_send(&data);
2180 fuse_file_put(data.ff, false, false);
2182 kfree(data.orig_pages);
2188 * It's worthy to make sure that space is reserved on disk for the write,
2189 * but how to implement it without killing performance need more thinking.
2191 static int fuse_write_begin(struct file *file, struct address_space *mapping,
2192 loff_t pos, unsigned len, unsigned flags,
2193 struct page **pagep, void **fsdata)
2195 pgoff_t index = pos >> PAGE_SHIFT;
2196 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
2201 WARN_ON(!fc->writeback_cache);
2203 page = grab_cache_page_write_begin(mapping, index, flags);
2207 fuse_wait_on_page_writeback(mapping->host, page->index);
2209 if (PageUptodate(page) || len == PAGE_SIZE)
2212 * Check if the start this page comes after the end of file, in which
2213 * case the readpage can be optimized away.
2215 fsize = i_size_read(mapping->host);
2216 if (fsize <= (pos & PAGE_MASK)) {
2217 size_t off = pos & ~PAGE_MASK;
2219 zero_user_segment(page, 0, off);
2222 err = fuse_do_readpage(file, page);
2236 static int fuse_write_end(struct file *file, struct address_space *mapping,
2237 loff_t pos, unsigned len, unsigned copied,
2238 struct page *page, void *fsdata)
2240 struct inode *inode = page->mapping->host;
2242 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2246 if (!PageUptodate(page)) {
2247 /* Zero any unwritten bytes at the end of the page */
2248 size_t endoff = (pos + copied) & ~PAGE_MASK;
2250 zero_user_segment(page, endoff, PAGE_SIZE);
2251 SetPageUptodate(page);
2254 fuse_write_update_size(inode, pos + copied);
2255 set_page_dirty(page);
2264 static int fuse_launder_page(struct page *page)
2267 if (clear_page_dirty_for_io(page)) {
2268 struct inode *inode = page->mapping->host;
2269 err = fuse_writepage_locked(page);
2271 fuse_wait_on_page_writeback(inode, page->index);
2277 * Write back dirty pages now, because there may not be any suitable
2280 static void fuse_vma_close(struct vm_area_struct *vma)
2282 filemap_write_and_wait(vma->vm_file->f_mapping);
2286 * Wait for writeback against this page to complete before allowing it
2287 * to be marked dirty again, and hence written back again, possibly
2288 * before the previous writepage completed.
2290 * Block here, instead of in ->writepage(), so that the userspace fs
2291 * can only block processes actually operating on the filesystem.
2293 * Otherwise unprivileged userspace fs would be able to block
2298 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2300 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
2302 struct page *page = vmf->page;
2303 struct inode *inode = file_inode(vmf->vma->vm_file);
2305 file_update_time(vmf->vma->vm_file);
2307 if (page->mapping != inode->i_mapping) {
2309 return VM_FAULT_NOPAGE;
2312 fuse_wait_on_page_writeback(inode, page->index);
2313 return VM_FAULT_LOCKED;
2316 static const struct vm_operations_struct fuse_file_vm_ops = {
2317 .close = fuse_vma_close,
2318 .fault = filemap_fault,
2319 .map_pages = filemap_map_pages,
2320 .page_mkwrite = fuse_page_mkwrite,
2323 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2325 struct fuse_file *ff = file->private_data;
2327 if (ff->open_flags & FOPEN_DIRECT_IO) {
2328 /* Can't provide the coherency needed for MAP_SHARED */
2329 if (vma->vm_flags & VM_MAYSHARE)
2332 invalidate_inode_pages2(file->f_mapping);
2334 return generic_file_mmap(file, vma);
2337 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2338 fuse_link_write_file(file);
2340 file_accessed(file);
2341 vma->vm_ops = &fuse_file_vm_ops;
2345 static int convert_fuse_file_lock(struct fuse_conn *fc,
2346 const struct fuse_file_lock *ffl,
2347 struct file_lock *fl)
2349 switch (ffl->type) {
2355 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2356 ffl->end < ffl->start)
2359 fl->fl_start = ffl->start;
2360 fl->fl_end = ffl->end;
2363 * Convert pid into init's pid namespace. The locks API will
2364 * translate it into the caller's pid namespace.
2367 fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
2374 fl->fl_type = ffl->type;
2378 static void fuse_lk_fill(struct fuse_args *args, struct file *file,
2379 const struct file_lock *fl, int opcode, pid_t pid,
2380 int flock, struct fuse_lk_in *inarg)
2382 struct inode *inode = file_inode(file);
2383 struct fuse_conn *fc = get_fuse_conn(inode);
2384 struct fuse_file *ff = file->private_data;
2386 memset(inarg, 0, sizeof(*inarg));
2388 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
2389 inarg->lk.start = fl->fl_start;
2390 inarg->lk.end = fl->fl_end;
2391 inarg->lk.type = fl->fl_type;
2392 inarg->lk.pid = pid;
2394 inarg->lk_flags |= FUSE_LK_FLOCK;
2395 args->opcode = opcode;
2396 args->nodeid = get_node_id(inode);
2397 args->in_numargs = 1;
2398 args->in_args[0].size = sizeof(*inarg);
2399 args->in_args[0].value = inarg;
2402 static int fuse_getlk(struct file *file, struct file_lock *fl)
2404 struct inode *inode = file_inode(file);
2405 struct fuse_conn *fc = get_fuse_conn(inode);
2407 struct fuse_lk_in inarg;
2408 struct fuse_lk_out outarg;
2411 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
2412 args.out_numargs = 1;
2413 args.out_args[0].size = sizeof(outarg);
2414 args.out_args[0].value = &outarg;
2415 err = fuse_simple_request(fc, &args);
2417 err = convert_fuse_file_lock(fc, &outarg.lk, fl);
2422 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
2424 struct inode *inode = file_inode(file);
2425 struct fuse_conn *fc = get_fuse_conn(inode);
2427 struct fuse_lk_in inarg;
2428 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
2429 struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL;
2430 pid_t pid_nr = pid_nr_ns(pid, fc->pid_ns);
2433 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
2434 /* NLM needs asynchronous locks, which we don't support yet */
2438 /* Unlock on close is handled by the flush method */
2439 if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX)
2442 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
2443 err = fuse_simple_request(fc, &args);
2445 /* locking is restartable */
2452 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2454 struct inode *inode = file_inode(file);
2455 struct fuse_conn *fc = get_fuse_conn(inode);
2458 if (cmd == F_CANCELLK) {
2460 } else if (cmd == F_GETLK) {
2462 posix_test_lock(file, fl);
2465 err = fuse_getlk(file, fl);
2468 err = posix_lock_file(file, fl, NULL);
2470 err = fuse_setlk(file, fl, 0);
2475 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2477 struct inode *inode = file_inode(file);
2478 struct fuse_conn *fc = get_fuse_conn(inode);
2482 err = locks_lock_file_wait(file, fl);
2484 struct fuse_file *ff = file->private_data;
2486 /* emulate flock with POSIX locks */
2488 err = fuse_setlk(file, fl, 1);
2494 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2496 struct inode *inode = mapping->host;
2497 struct fuse_conn *fc = get_fuse_conn(inode);
2499 struct fuse_bmap_in inarg;
2500 struct fuse_bmap_out outarg;
2503 if (!inode->i_sb->s_bdev || fc->no_bmap)
2506 memset(&inarg, 0, sizeof(inarg));
2507 inarg.block = block;
2508 inarg.blocksize = inode->i_sb->s_blocksize;
2509 args.opcode = FUSE_BMAP;
2510 args.nodeid = get_node_id(inode);
2511 args.in_numargs = 1;
2512 args.in_args[0].size = sizeof(inarg);
2513 args.in_args[0].value = &inarg;
2514 args.out_numargs = 1;
2515 args.out_args[0].size = sizeof(outarg);
2516 args.out_args[0].value = &outarg;
2517 err = fuse_simple_request(fc, &args);
2521 return err ? 0 : outarg.block;
2524 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2526 struct inode *inode = file->f_mapping->host;
2527 struct fuse_conn *fc = get_fuse_conn(inode);
2528 struct fuse_file *ff = file->private_data;
2530 struct fuse_lseek_in inarg = {
2535 struct fuse_lseek_out outarg;
2541 args.opcode = FUSE_LSEEK;
2542 args.nodeid = ff->nodeid;
2543 args.in_numargs = 1;
2544 args.in_args[0].size = sizeof(inarg);
2545 args.in_args[0].value = &inarg;
2546 args.out_numargs = 1;
2547 args.out_args[0].size = sizeof(outarg);
2548 args.out_args[0].value = &outarg;
2549 err = fuse_simple_request(fc, &args);
2551 if (err == -ENOSYS) {
2558 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2561 err = fuse_update_attributes(inode, file);
2563 return generic_file_llseek(file, offset, whence);
2568 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
2571 struct inode *inode = file_inode(file);
2576 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2577 retval = generic_file_llseek(file, offset, whence);
2581 retval = fuse_update_attributes(inode, file);
2583 retval = generic_file_llseek(file, offset, whence);
2584 inode_unlock(inode);
2589 retval = fuse_lseek(file, offset, whence);
2590 inode_unlock(inode);
2600 * CUSE servers compiled on 32bit broke on 64bit kernels because the
2601 * ABI was defined to be 'struct iovec' which is different on 32bit
2602 * and 64bit. Fortunately we can determine which structure the server
2603 * used from the size of the reply.
2605 static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
2606 size_t transferred, unsigned count,
2609 #ifdef CONFIG_COMPAT
2610 if (count * sizeof(struct compat_iovec) == transferred) {
2611 struct compat_iovec *ciov = src;
2615 * With this interface a 32bit server cannot support
2616 * non-compat (i.e. ones coming from 64bit apps) ioctl
2622 for (i = 0; i < count; i++) {
2623 dst[i].iov_base = compat_ptr(ciov[i].iov_base);
2624 dst[i].iov_len = ciov[i].iov_len;
2630 if (count * sizeof(struct iovec) != transferred)
2633 memcpy(dst, src, transferred);
2637 /* Make sure iov_length() won't overflow */
2638 static int fuse_verify_ioctl_iov(struct fuse_conn *fc, struct iovec *iov,
2642 u32 max = fc->max_pages << PAGE_SHIFT;
2644 for (n = 0; n < count; n++, iov++) {
2645 if (iov->iov_len > (size_t) max)
2647 max -= iov->iov_len;
2652 static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
2653 void *src, size_t transferred, unsigned count,
2657 struct fuse_ioctl_iovec *fiov = src;
2659 if (fc->minor < 16) {
2660 return fuse_copy_ioctl_iovec_old(dst, src, transferred,
2664 if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
2667 for (i = 0; i < count; i++) {
2668 /* Did the server supply an inappropriate value? */
2669 if (fiov[i].base != (unsigned long) fiov[i].base ||
2670 fiov[i].len != (unsigned long) fiov[i].len)
2673 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
2674 dst[i].iov_len = (size_t) fiov[i].len;
2676 #ifdef CONFIG_COMPAT
2678 (ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
2679 (compat_size_t) dst[i].iov_len != fiov[i].len))
2689 * For ioctls, there is no generic way to determine how much memory
2690 * needs to be read and/or written. Furthermore, ioctls are allowed
2691 * to dereference the passed pointer, so the parameter requires deep
2692 * copying but FUSE has no idea whatsoever about what to copy in or
2695 * This is solved by allowing FUSE server to retry ioctl with
2696 * necessary in/out iovecs. Let's assume the ioctl implementation
2697 * needs to read in the following structure.
2704 * On the first callout to FUSE server, inarg->in_size and
2705 * inarg->out_size will be NULL; then, the server completes the ioctl
2706 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
2707 * the actual iov array to
2709 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
2711 * which tells FUSE to copy in the requested area and retry the ioctl.
2712 * On the second round, the server has access to the structure and
2713 * from that it can tell what to look for next, so on the invocation,
2714 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
2716 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
2717 * { .iov_base = a.buf, .iov_len = a.buflen } }
2719 * FUSE will copy both struct a and the pointed buffer from the
2720 * process doing the ioctl and retry ioctl with both struct a and the
2723 * This time, FUSE server has everything it needs and completes ioctl
2724 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
2726 * Copying data out works the same way.
2728 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
2729 * automatically initializes in and out iovs by decoding @cmd with
2730 * _IOC_* macros and the server is not allowed to request RETRY. This
2731 * limits ioctl data transfers to well-formed ioctls and is the forced
2732 * behavior for all FUSE servers.
2734 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
2737 struct fuse_file *ff = file->private_data;
2738 struct fuse_conn *fc = ff->fc;
2739 struct fuse_ioctl_in inarg = {
2745 struct fuse_ioctl_out outarg;
2746 struct iovec *iov_page = NULL;
2747 struct iovec *in_iov = NULL, *out_iov = NULL;
2748 unsigned int in_iovs = 0, out_iovs = 0, max_pages;
2749 size_t in_size, out_size, c;
2750 ssize_t transferred;
2753 struct fuse_args_pages ap = {};
2755 #if BITS_PER_LONG == 32
2756 inarg.flags |= FUSE_IOCTL_32BIT;
2758 if (flags & FUSE_IOCTL_COMPAT) {
2759 inarg.flags |= FUSE_IOCTL_32BIT;
2760 #ifdef CONFIG_X86_X32
2761 if (in_x32_syscall())
2762 inarg.flags |= FUSE_IOCTL_COMPAT_X32;
2767 /* assume all the iovs returned by client always fits in a page */
2768 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
2771 ap.pages = fuse_pages_alloc(fc->max_pages, GFP_KERNEL, &ap.descs);
2772 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
2773 if (!ap.pages || !iov_page)
2776 fuse_page_descs_length_init(ap.descs, 0, fc->max_pages);
2779 * If restricted, initialize IO parameters as encoded in @cmd.
2780 * RETRY from server is not allowed.
2782 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
2783 struct iovec *iov = iov_page;
2785 iov->iov_base = (void __user *)arg;
2788 case FS_IOC_GETFLAGS:
2789 case FS_IOC_SETFLAGS:
2790 iov->iov_len = sizeof(int);
2793 iov->iov_len = _IOC_SIZE(cmd);
2797 if (_IOC_DIR(cmd) & _IOC_WRITE) {
2802 if (_IOC_DIR(cmd) & _IOC_READ) {
2809 inarg.in_size = in_size = iov_length(in_iov, in_iovs);
2810 inarg.out_size = out_size = iov_length(out_iov, out_iovs);
2813 * Out data can be used either for actual out data or iovs,
2814 * make sure there always is at least one page.
2816 out_size = max_t(size_t, out_size, PAGE_SIZE);
2817 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
2819 /* make sure there are enough buffer pages and init request with them */
2821 if (max_pages > fc->max_pages)
2823 while (ap.num_pages < max_pages) {
2824 ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2825 if (!ap.pages[ap.num_pages])
2831 /* okay, let's send it to the client */
2832 ap.args.opcode = FUSE_IOCTL;
2833 ap.args.nodeid = ff->nodeid;
2834 ap.args.in_numargs = 1;
2835 ap.args.in_args[0].size = sizeof(inarg);
2836 ap.args.in_args[0].value = &inarg;
2838 ap.args.in_numargs++;
2839 ap.args.in_args[1].size = in_size;
2840 ap.args.in_pages = true;
2843 iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size);
2844 for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
2845 c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
2846 if (c != PAGE_SIZE && iov_iter_count(&ii))
2851 ap.args.out_numargs = 2;
2852 ap.args.out_args[0].size = sizeof(outarg);
2853 ap.args.out_args[0].value = &outarg;
2854 ap.args.out_args[1].size = out_size;
2855 ap.args.out_pages = true;
2856 ap.args.out_argvar = true;
2858 transferred = fuse_simple_request(fc, &ap.args);
2860 if (transferred < 0)
2863 /* did it ask for retry? */
2864 if (outarg.flags & FUSE_IOCTL_RETRY) {
2867 /* no retry if in restricted mode */
2869 if (!(flags & FUSE_IOCTL_UNRESTRICTED))
2872 in_iovs = outarg.in_iovs;
2873 out_iovs = outarg.out_iovs;
2876 * Make sure things are in boundary, separate checks
2877 * are to protect against overflow.
2880 if (in_iovs > FUSE_IOCTL_MAX_IOV ||
2881 out_iovs > FUSE_IOCTL_MAX_IOV ||
2882 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
2885 vaddr = kmap_atomic(ap.pages[0]);
2886 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
2887 transferred, in_iovs + out_iovs,
2888 (flags & FUSE_IOCTL_COMPAT) != 0);
2889 kunmap_atomic(vaddr);
2894 out_iov = in_iov + in_iovs;
2896 err = fuse_verify_ioctl_iov(fc, in_iov, in_iovs);
2900 err = fuse_verify_ioctl_iov(fc, out_iov, out_iovs);
2908 if (transferred > inarg.out_size)
2912 iov_iter_init(&ii, READ, out_iov, out_iovs, transferred);
2913 for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
2914 c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
2915 if (c != PAGE_SIZE && iov_iter_count(&ii))
2920 free_page((unsigned long) iov_page);
2921 while (ap.num_pages)
2922 __free_page(ap.pages[--ap.num_pages]);
2925 return err ? err : outarg.result;
2927 EXPORT_SYMBOL_GPL(fuse_do_ioctl);
2929 long fuse_ioctl_common(struct file *file, unsigned int cmd,
2930 unsigned long arg, unsigned int flags)
2932 struct inode *inode = file_inode(file);
2933 struct fuse_conn *fc = get_fuse_conn(inode);
2935 if (!fuse_allow_current_process(fc))
2938 if (fuse_is_bad(inode))
2941 return fuse_do_ioctl(file, cmd, arg, flags);
2944 static long fuse_file_ioctl(struct file *file, unsigned int cmd,
2947 return fuse_ioctl_common(file, cmd, arg, 0);
2950 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
2953 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
2957 * All files which have been polled are linked to RB tree
2958 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2959 * find the matching one.
2961 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2962 struct rb_node **parent_out)
2964 struct rb_node **link = &fc->polled_files.rb_node;
2965 struct rb_node *last = NULL;
2968 struct fuse_file *ff;
2971 ff = rb_entry(last, struct fuse_file, polled_node);
2974 link = &last->rb_left;
2975 else if (kh > ff->kh)
2976 link = &last->rb_right;
2987 * The file is about to be polled. Make sure it's on the polled_files
2988 * RB tree. Note that files once added to the polled_files tree are
2989 * not removed before the file is released. This is because a file
2990 * polled once is likely to be polled again.
2992 static void fuse_register_polled_file(struct fuse_conn *fc,
2993 struct fuse_file *ff)
2995 spin_lock(&fc->lock);
2996 if (RB_EMPTY_NODE(&ff->polled_node)) {
2997 struct rb_node **link, *parent;
2999 link = fuse_find_polled_node(fc, ff->kh, &parent);
3001 rb_link_node(&ff->polled_node, parent, link);
3002 rb_insert_color(&ff->polled_node, &fc->polled_files);
3004 spin_unlock(&fc->lock);
3007 __poll_t fuse_file_poll(struct file *file, poll_table *wait)
3009 struct fuse_file *ff = file->private_data;
3010 struct fuse_conn *fc = ff->fc;
3011 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
3012 struct fuse_poll_out outarg;
3017 return DEFAULT_POLLMASK;
3019 poll_wait(file, &ff->poll_wait, wait);
3020 inarg.events = mangle_poll(poll_requested_events(wait));
3023 * Ask for notification iff there's someone waiting for it.
3024 * The client may ignore the flag and always notify.
3026 if (waitqueue_active(&ff->poll_wait)) {
3027 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
3028 fuse_register_polled_file(fc, ff);
3031 args.opcode = FUSE_POLL;
3032 args.nodeid = ff->nodeid;
3033 args.in_numargs = 1;
3034 args.in_args[0].size = sizeof(inarg);
3035 args.in_args[0].value = &inarg;
3036 args.out_numargs = 1;
3037 args.out_args[0].size = sizeof(outarg);
3038 args.out_args[0].value = &outarg;
3039 err = fuse_simple_request(fc, &args);
3042 return demangle_poll(outarg.revents);
3043 if (err == -ENOSYS) {
3045 return DEFAULT_POLLMASK;
3049 EXPORT_SYMBOL_GPL(fuse_file_poll);
3052 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
3053 * wakes up the poll waiters.
3055 int fuse_notify_poll_wakeup(struct fuse_conn *fc,
3056 struct fuse_notify_poll_wakeup_out *outarg)
3058 u64 kh = outarg->kh;
3059 struct rb_node **link;
3061 spin_lock(&fc->lock);
3063 link = fuse_find_polled_node(fc, kh, NULL);
3065 struct fuse_file *ff;
3067 ff = rb_entry(*link, struct fuse_file, polled_node);
3068 wake_up_interruptible_sync(&ff->poll_wait);
3071 spin_unlock(&fc->lock);
3075 static void fuse_do_truncate(struct file *file)
3077 struct inode *inode = file->f_mapping->host;
3080 attr.ia_valid = ATTR_SIZE;
3081 attr.ia_size = i_size_read(inode);
3083 attr.ia_file = file;
3084 attr.ia_valid |= ATTR_FILE;
3086 fuse_do_setattr(file_dentry(file), &attr, file);
3089 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
3091 return round_up(off, fc->max_pages << PAGE_SHIFT);
3095 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3097 DECLARE_COMPLETION_ONSTACK(wait);
3099 struct file *file = iocb->ki_filp;
3100 struct fuse_file *ff = file->private_data;
3102 struct inode *inode;
3104 size_t count = iov_iter_count(iter), shortened = 0;
3105 loff_t offset = iocb->ki_pos;
3106 struct fuse_io_priv *io;
3109 inode = file->f_mapping->host;
3110 i_size = i_size_read(inode);
3112 if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
3115 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
3118 spin_lock_init(&io->lock);
3119 kref_init(&io->refcnt);
3123 io->offset = offset;
3124 io->write = (iov_iter_rw(iter) == WRITE);
3127 * By default, we want to optimize all I/Os with async request
3128 * submission to the client filesystem if supported.
3130 io->async = ff->fc->async_dio;
3132 io->blocking = is_sync_kiocb(iocb);
3134 /* optimization for short read */
3135 if (io->async && !io->write && offset + count > i_size) {
3136 iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
3137 shortened = count - iov_iter_count(iter);
3142 * We cannot asynchronously extend the size of a file.
3143 * In such case the aio will behave exactly like sync io.
3145 if ((offset + count > i_size) && io->write)
3146 io->blocking = true;
3148 if (io->async && io->blocking) {
3150 * Additional reference to keep io around after
3151 * calling fuse_aio_complete()
3153 kref_get(&io->refcnt);
3157 if (iov_iter_rw(iter) == WRITE) {
3158 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
3159 fuse_invalidate_attr(inode);
3161 ret = __fuse_direct_read(io, iter, &pos);
3163 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
3166 bool blocking = io->blocking;
3168 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
3170 /* we have a non-extending, async request, so return */
3172 return -EIOCBQUEUED;
3174 wait_for_completion(&wait);
3175 ret = fuse_get_res_by_io(io);
3178 kref_put(&io->refcnt, fuse_io_release);
3180 if (iov_iter_rw(iter) == WRITE) {
3182 fuse_write_update_size(inode, pos);
3183 else if (ret < 0 && offset + count > i_size)
3184 fuse_do_truncate(file);
3190 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
3192 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
3195 fuse_sync_writes(inode);
3200 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3203 struct fuse_file *ff = file->private_data;
3204 struct inode *inode = file_inode(file);
3205 struct fuse_inode *fi = get_fuse_inode(inode);
3206 struct fuse_conn *fc = ff->fc;
3208 struct fuse_fallocate_in inarg = {
3215 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3218 if (fc->no_fallocate)
3222 if (mode & FALLOC_FL_PUNCH_HOLE) {
3223 loff_t endbyte = offset + length - 1;
3225 err = fuse_writeback_range(inode, offset, endbyte);
3230 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3231 offset + length > i_size_read(inode)) {
3232 err = inode_newsize_ok(inode, offset + length);
3237 err = file_modified(file);
3241 if (!(mode & FALLOC_FL_KEEP_SIZE))
3242 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3244 args.opcode = FUSE_FALLOCATE;
3245 args.nodeid = ff->nodeid;
3246 args.in_numargs = 1;
3247 args.in_args[0].size = sizeof(inarg);
3248 args.in_args[0].value = &inarg;
3249 err = fuse_simple_request(fc, &args);
3250 if (err == -ENOSYS) {
3251 fc->no_fallocate = 1;
3257 /* we could have extended the file */
3258 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3259 bool changed = fuse_write_update_size(inode, offset + length);
3261 if (changed && fc->writeback_cache)
3262 file_update_time(file);
3265 if (mode & FALLOC_FL_PUNCH_HOLE)
3266 truncate_pagecache_range(inode, offset, offset + length - 1);
3268 fuse_invalidate_attr(inode);
3271 if (!(mode & FALLOC_FL_KEEP_SIZE))
3272 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3274 inode_unlock(inode);
3279 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3280 struct file *file_out, loff_t pos_out,
3281 size_t len, unsigned int flags)
3283 struct fuse_file *ff_in = file_in->private_data;
3284 struct fuse_file *ff_out = file_out->private_data;
3285 struct inode *inode_in = file_inode(file_in);
3286 struct inode *inode_out = file_inode(file_out);
3287 struct fuse_inode *fi_out = get_fuse_inode(inode_out);
3288 struct fuse_conn *fc = ff_in->fc;
3290 struct fuse_copy_file_range_in inarg = {
3293 .nodeid_out = ff_out->nodeid,
3294 .fh_out = ff_out->fh,
3299 struct fuse_write_out outarg;
3301 /* mark unstable when write-back is not used, and file_out gets
3303 bool is_unstable = (!fc->writeback_cache) &&
3304 ((pos_out + len) > inode_out->i_size);
3306 if (fc->no_copy_file_range)
3309 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
3312 inode_lock(inode_in);
3313 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
3314 inode_unlock(inode_in);
3318 inode_lock(inode_out);
3320 err = file_modified(file_out);
3325 * Write out dirty pages in the destination file before sending the COPY
3326 * request to userspace. After the request is completed, truncate off
3327 * pages (including partial ones) from the cache that have been copied,
3328 * since these contain stale data at that point.
3330 * This should be mostly correct, but if the COPY writes to partial
3331 * pages (at the start or end) and the parts not covered by the COPY are
3332 * written through a memory map after calling fuse_writeback_range(),
3333 * then these partial page modifications will be lost on truncation.
3335 * It is unlikely that someone would rely on such mixed style
3336 * modifications. Yet this does give less guarantees than if the
3337 * copying was performed with write(2).
3339 * To fix this a i_mmap_sem style lock could be used to prevent new
3340 * faults while the copy is ongoing.
3342 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
3347 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3349 args.opcode = FUSE_COPY_FILE_RANGE;
3350 args.nodeid = ff_in->nodeid;
3351 args.in_numargs = 1;
3352 args.in_args[0].size = sizeof(inarg);
3353 args.in_args[0].value = &inarg;
3354 args.out_numargs = 1;
3355 args.out_args[0].size = sizeof(outarg);
3356 args.out_args[0].value = &outarg;
3357 err = fuse_simple_request(fc, &args);
3358 if (err == -ENOSYS) {
3359 fc->no_copy_file_range = 1;
3365 truncate_inode_pages_range(inode_out->i_mapping,
3366 ALIGN_DOWN(pos_out, PAGE_SIZE),
3367 ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
3369 if (fc->writeback_cache) {
3370 fuse_write_update_size(inode_out, pos_out + outarg.size);
3371 file_update_time(file_out);
3374 fuse_invalidate_attr(inode_out);
3379 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3381 inode_unlock(inode_out);
3382 file_accessed(file_in);
3387 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
3388 struct file *dst_file, loff_t dst_off,
3389 size_t len, unsigned int flags)
3393 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
3396 if (ret == -EOPNOTSUPP || ret == -EXDEV)
3397 ret = generic_copy_file_range(src_file, src_off, dst_file,
3398 dst_off, len, flags);
3402 static const struct file_operations fuse_file_operations = {
3403 .llseek = fuse_file_llseek,
3404 .read_iter = fuse_file_read_iter,
3405 .write_iter = fuse_file_write_iter,
3406 .mmap = fuse_file_mmap,
3408 .flush = fuse_flush,
3409 .release = fuse_release,
3410 .fsync = fuse_fsync,
3411 .lock = fuse_file_lock,
3412 .flock = fuse_file_flock,
3413 .splice_read = generic_file_splice_read,
3414 .splice_write = iter_file_splice_write,
3415 .unlocked_ioctl = fuse_file_ioctl,
3416 .compat_ioctl = fuse_file_compat_ioctl,
3417 .poll = fuse_file_poll,
3418 .fallocate = fuse_file_fallocate,
3419 .copy_file_range = fuse_copy_file_range,
3422 static const struct address_space_operations fuse_file_aops = {
3423 .readpage = fuse_readpage,
3424 .writepage = fuse_writepage,
3425 .writepages = fuse_writepages,
3426 .launder_page = fuse_launder_page,
3427 .readpages = fuse_readpages,
3428 .set_page_dirty = __set_page_dirty_nobuffers,
3430 .direct_IO = fuse_direct_IO,
3431 .write_begin = fuse_write_begin,
3432 .write_end = fuse_write_end,
3435 void fuse_init_file_inode(struct inode *inode)
3437 struct fuse_inode *fi = get_fuse_inode(inode);
3439 inode->i_fop = &fuse_file_operations;
3440 inode->i_data.a_ops = &fuse_file_aops;
3442 INIT_LIST_HEAD(&fi->write_files);
3443 INIT_LIST_HEAD(&fi->queued_writes);
3445 init_waitqueue_head(&fi->page_waitq);
3446 INIT_LIST_HEAD(&fi->writepages);