1 // SPDX-License-Identifier: GPL-2.0
3 * virtio-fs: Virtio Filesystem
4 * Copyright (C) 2018 Red Hat, Inc.
10 #include <linux/pfn_t.h>
11 #include <linux/module.h>
12 #include <linux/virtio.h>
13 #include <linux/virtio_fs.h>
14 #include <linux/delay.h>
15 #include <linux/fs_context.h>
16 #include <linux/fs_parser.h>
17 #include <linux/highmem.h>
18 #include <linux/uio.h>
21 /* Used to help calculate the FUSE connection's max_pages limit for a request's
22 * size. Parts of the struct fuse_req are sliced into scattergather lists in
23 * addition to the pages used, so this can help account for that overhead.
25 #define FUSE_HEADER_OVERHEAD 4
27 /* List of virtio-fs device instances and a lock for the list. Also provides
28 * mutual exclusion in device removal and mounting path
30 static DEFINE_MUTEX(virtio_fs_mutex);
31 static LIST_HEAD(virtio_fs_instances);
38 #define VQ_NAME_LEN 24
40 /* Per-virtqueue state */
43 struct virtqueue *vq; /* protected by ->lock */
44 struct work_struct done_work;
45 struct list_head queued_reqs;
46 struct list_head end_reqs; /* End these requests */
47 struct delayed_work dispatch_work;
51 struct completion in_flight_zero; /* No inflight requests */
52 char name[VQ_NAME_LEN];
53 } ____cacheline_aligned_in_smp;
55 /* A virtio-fs device instance */
58 struct list_head list; /* on virtio_fs_instances */
60 struct virtio_fs_vq *vqs;
61 unsigned int nvqs; /* number of virtqueues */
62 unsigned int num_request_queues; /* number of request queues */
63 struct dax_device *dax_dev;
65 /* DAX memory window where file contents are mapped */
67 phys_addr_t window_phys_addr;
71 struct virtio_fs_forget_req {
72 struct fuse_in_header ih;
73 struct fuse_forget_in arg;
76 struct virtio_fs_forget {
77 /* This request can be temporarily queued on virt queue */
78 struct list_head list;
79 struct virtio_fs_forget_req req;
82 struct virtio_fs_req_work {
84 struct virtio_fs_vq *fsvq;
85 struct work_struct done_work;
88 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
89 struct fuse_req *req, bool in_flight);
95 static const struct fs_parameter_spec virtio_fs_parameters[] = {
96 fsparam_flag("dax", OPT_DAX),
100 static int virtio_fs_parse_param(struct fs_context *fc,
101 struct fs_parameter *param)
103 struct fs_parse_result result;
104 struct fuse_fs_context *ctx = fc->fs_private;
107 opt = fs_parse(fc, virtio_fs_parameters, param, &result);
122 static void virtio_fs_free_fc(struct fs_context *fc)
124 struct fuse_fs_context *ctx = fc->fs_private;
129 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
131 struct virtio_fs *fs = vq->vdev->priv;
133 return &fs->vqs[vq->index];
136 /* Should be called with fsvq->lock held. */
137 static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
142 /* Should be called with fsvq->lock held. */
143 static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
145 WARN_ON(fsvq->in_flight <= 0);
147 if (!fsvq->in_flight)
148 complete(&fsvq->in_flight_zero);
151 static void release_virtio_fs_obj(struct kref *ref)
153 struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
159 /* Make sure virtiofs_mutex is held */
160 static void virtio_fs_put(struct virtio_fs *fs)
162 kref_put(&fs->refcount, release_virtio_fs_obj);
165 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
167 struct virtio_fs *vfs = fiq->priv;
169 mutex_lock(&virtio_fs_mutex);
171 mutex_unlock(&virtio_fs_mutex);
174 static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
176 WARN_ON(fsvq->in_flight < 0);
178 /* Wait for in flight requests to finish.*/
179 spin_lock(&fsvq->lock);
180 if (fsvq->in_flight) {
181 /* We are holding virtio_fs_mutex. There should not be any
182 * waiters waiting for completion.
184 reinit_completion(&fsvq->in_flight_zero);
185 spin_unlock(&fsvq->lock);
186 wait_for_completion(&fsvq->in_flight_zero);
188 spin_unlock(&fsvq->lock);
191 flush_work(&fsvq->done_work);
192 flush_delayed_work(&fsvq->dispatch_work);
195 static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
197 struct virtio_fs_vq *fsvq;
200 for (i = 0; i < fs->nvqs; i++) {
202 virtio_fs_drain_queue(fsvq);
206 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
208 /* Provides mutual exclusion between ->remove and ->kill_sb
209 * paths. We don't want both of these draining queue at the
210 * same time. Current completion logic reinits completion
211 * and that means there should not be any other thread
212 * doing reinit or waiting for completion already.
214 mutex_lock(&virtio_fs_mutex);
215 virtio_fs_drain_all_queues_locked(fs);
216 mutex_unlock(&virtio_fs_mutex);
219 static void virtio_fs_start_all_queues(struct virtio_fs *fs)
221 struct virtio_fs_vq *fsvq;
224 for (i = 0; i < fs->nvqs; i++) {
226 spin_lock(&fsvq->lock);
227 fsvq->connected = true;
228 spin_unlock(&fsvq->lock);
232 /* Add a new instance to the list or return -EEXIST if tag name exists*/
233 static int virtio_fs_add_instance(struct virtio_fs *fs)
235 struct virtio_fs *fs2;
236 bool duplicate = false;
238 mutex_lock(&virtio_fs_mutex);
240 list_for_each_entry(fs2, &virtio_fs_instances, list) {
241 if (strcmp(fs->tag, fs2->tag) == 0)
246 list_add_tail(&fs->list, &virtio_fs_instances);
248 mutex_unlock(&virtio_fs_mutex);
255 /* Return the virtio_fs with a given tag, or NULL */
256 static struct virtio_fs *virtio_fs_find_instance(const char *tag)
258 struct virtio_fs *fs;
260 mutex_lock(&virtio_fs_mutex);
262 list_for_each_entry(fs, &virtio_fs_instances, list) {
263 if (strcmp(fs->tag, tag) == 0) {
264 kref_get(&fs->refcount);
269 fs = NULL; /* not found */
272 mutex_unlock(&virtio_fs_mutex);
277 static void virtio_fs_free_devs(struct virtio_fs *fs)
281 for (i = 0; i < fs->nvqs; i++) {
282 struct virtio_fs_vq *fsvq = &fs->vqs[i];
287 fuse_dev_free(fsvq->fud);
292 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
293 static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
295 char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
299 virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
300 &tag_buf, sizeof(tag_buf));
301 end = memchr(tag_buf, '\0', sizeof(tag_buf));
303 return -EINVAL; /* empty tag */
305 end = &tag_buf[sizeof(tag_buf)];
308 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
311 memcpy(fs->tag, tag_buf, len);
316 /* Work function for hiprio completion */
317 static void virtio_fs_hiprio_done_work(struct work_struct *work)
319 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
321 struct virtqueue *vq = fsvq->vq;
323 /* Free completed FUSE_FORGET requests */
324 spin_lock(&fsvq->lock);
329 virtqueue_disable_cb(vq);
331 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
333 dec_in_flight_req(fsvq);
335 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
336 spin_unlock(&fsvq->lock);
339 static void virtio_fs_request_dispatch_work(struct work_struct *work)
341 struct fuse_req *req;
342 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
346 pr_debug("virtio-fs: worker %s called.\n", __func__);
348 spin_lock(&fsvq->lock);
349 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
352 spin_unlock(&fsvq->lock);
356 list_del_init(&req->list);
357 spin_unlock(&fsvq->lock);
358 fuse_request_end(req);
361 /* Dispatch pending requests */
363 spin_lock(&fsvq->lock);
364 req = list_first_entry_or_null(&fsvq->queued_reqs,
365 struct fuse_req, list);
367 spin_unlock(&fsvq->lock);
370 list_del_init(&req->list);
371 spin_unlock(&fsvq->lock);
373 ret = virtio_fs_enqueue_req(fsvq, req, true);
375 if (ret == -ENOMEM || ret == -ENOSPC) {
376 spin_lock(&fsvq->lock);
377 list_add_tail(&req->list, &fsvq->queued_reqs);
378 schedule_delayed_work(&fsvq->dispatch_work,
379 msecs_to_jiffies(1));
380 spin_unlock(&fsvq->lock);
383 req->out.h.error = ret;
384 spin_lock(&fsvq->lock);
385 dec_in_flight_req(fsvq);
386 spin_unlock(&fsvq->lock);
387 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
389 fuse_request_end(req);
395 * Returns 1 if queue is full and sender should wait a bit before sending
396 * next request, 0 otherwise.
398 static int send_forget_request(struct virtio_fs_vq *fsvq,
399 struct virtio_fs_forget *forget,
402 struct scatterlist sg;
403 struct virtqueue *vq;
406 struct virtio_fs_forget_req *req = &forget->req;
408 spin_lock(&fsvq->lock);
409 if (!fsvq->connected) {
411 dec_in_flight_req(fsvq);
416 sg_init_one(&sg, req, sizeof(*req));
418 dev_dbg(&vq->vdev->dev, "%s\n", __func__);
420 ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
422 if (ret == -ENOMEM || ret == -ENOSPC) {
423 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
425 list_add_tail(&forget->list, &fsvq->queued_reqs);
426 schedule_delayed_work(&fsvq->dispatch_work,
427 msecs_to_jiffies(1));
429 inc_in_flight_req(fsvq);
433 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
437 dec_in_flight_req(fsvq);
443 inc_in_flight_req(fsvq);
444 notify = virtqueue_kick_prepare(vq);
445 spin_unlock(&fsvq->lock);
448 virtqueue_notify(vq);
451 spin_unlock(&fsvq->lock);
455 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
457 struct virtio_fs_forget *forget;
458 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
460 pr_debug("virtio-fs: worker %s called.\n", __func__);
462 spin_lock(&fsvq->lock);
463 forget = list_first_entry_or_null(&fsvq->queued_reqs,
464 struct virtio_fs_forget, list);
466 spin_unlock(&fsvq->lock);
470 list_del(&forget->list);
471 spin_unlock(&fsvq->lock);
472 if (send_forget_request(fsvq, forget, true))
477 /* Allocate and copy args into req->argbuf */
478 static int copy_args_to_argbuf(struct fuse_req *req)
480 struct fuse_args *args = req->args;
481 unsigned int offset = 0;
483 unsigned int num_out;
487 num_in = args->in_numargs - args->in_pages;
488 num_out = args->out_numargs - args->out_pages;
489 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
490 fuse_len_args(num_out, args->out_args);
492 req->argbuf = kmalloc(len, GFP_ATOMIC);
496 for (i = 0; i < num_in; i++) {
497 memcpy(req->argbuf + offset,
498 args->in_args[i].value,
499 args->in_args[i].size);
500 offset += args->in_args[i].size;
506 /* Copy args out of and free req->argbuf */
507 static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
509 unsigned int remaining;
512 unsigned int num_out;
515 remaining = req->out.h.len - sizeof(req->out.h);
516 num_in = args->in_numargs - args->in_pages;
517 num_out = args->out_numargs - args->out_pages;
518 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
520 for (i = 0; i < num_out; i++) {
521 unsigned int argsize = args->out_args[i].size;
523 if (args->out_argvar &&
524 i == args->out_numargs - 1 &&
525 argsize > remaining) {
529 memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
532 if (i != args->out_numargs - 1)
533 remaining -= argsize;
536 /* Store the actual size of the variable-length arg */
537 if (args->out_argvar)
538 args->out_args[args->out_numargs - 1].size = remaining;
544 /* Work function for request completion */
545 static void virtio_fs_request_complete(struct fuse_req *req,
546 struct virtio_fs_vq *fsvq)
548 struct fuse_pqueue *fpq = &fsvq->fud->pq;
549 struct fuse_args *args;
550 struct fuse_args_pages *ap;
551 unsigned int len, i, thislen;
555 * TODO verify that server properly follows FUSE protocol
559 copy_args_from_argbuf(args, req);
561 if (args->out_pages && args->page_zeroing) {
562 len = args->out_args[args->out_numargs - 1].size;
563 ap = container_of(args, typeof(*ap), args);
564 for (i = 0; i < ap->num_pages; i++) {
565 thislen = ap->descs[i].length;
567 WARN_ON(ap->descs[i].offset);
569 zero_user_segment(page, len, thislen);
577 spin_lock(&fpq->lock);
578 clear_bit(FR_SENT, &req->flags);
579 spin_unlock(&fpq->lock);
581 fuse_request_end(req);
582 spin_lock(&fsvq->lock);
583 dec_in_flight_req(fsvq);
584 spin_unlock(&fsvq->lock);
587 static void virtio_fs_complete_req_work(struct work_struct *work)
589 struct virtio_fs_req_work *w =
590 container_of(work, typeof(*w), done_work);
592 virtio_fs_request_complete(w->req, w->fsvq);
596 static void virtio_fs_requests_done_work(struct work_struct *work)
598 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
600 struct fuse_pqueue *fpq = &fsvq->fud->pq;
601 struct virtqueue *vq = fsvq->vq;
602 struct fuse_req *req;
603 struct fuse_req *next;
607 /* Collect completed requests off the virtqueue */
608 spin_lock(&fsvq->lock);
610 virtqueue_disable_cb(vq);
612 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
613 spin_lock(&fpq->lock);
614 list_move_tail(&req->list, &reqs);
615 spin_unlock(&fpq->lock);
617 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
618 spin_unlock(&fsvq->lock);
621 list_for_each_entry_safe(req, next, &reqs, list) {
622 list_del_init(&req->list);
624 /* blocking async request completes in a worker context */
625 if (req->args->may_block) {
626 struct virtio_fs_req_work *w;
628 w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
629 INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
632 schedule_work(&w->done_work);
634 virtio_fs_request_complete(req, fsvq);
639 /* Virtqueue interrupt handler */
640 static void virtio_fs_vq_done(struct virtqueue *vq)
642 struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
644 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
646 schedule_work(&fsvq->done_work);
649 static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
652 strncpy(fsvq->name, name, VQ_NAME_LEN);
653 spin_lock_init(&fsvq->lock);
654 INIT_LIST_HEAD(&fsvq->queued_reqs);
655 INIT_LIST_HEAD(&fsvq->end_reqs);
656 init_completion(&fsvq->in_flight_zero);
658 if (vq_type == VQ_REQUEST) {
659 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
660 INIT_DELAYED_WORK(&fsvq->dispatch_work,
661 virtio_fs_request_dispatch_work);
663 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
664 INIT_DELAYED_WORK(&fsvq->dispatch_work,
665 virtio_fs_hiprio_dispatch_work);
669 /* Initialize virtqueues */
670 static int virtio_fs_setup_vqs(struct virtio_device *vdev,
671 struct virtio_fs *fs)
673 struct virtqueue **vqs;
674 vq_callback_t **callbacks;
679 virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
680 &fs->num_request_queues);
681 if (fs->num_request_queues == 0)
684 fs->nvqs = VQ_REQUEST + fs->num_request_queues;
685 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
689 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
690 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
692 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
693 if (!vqs || !callbacks || !names) {
698 /* Initialize the hiprio/forget request virtqueue */
699 callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
700 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
701 names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
703 /* Initialize the requests virtqueues */
704 for (i = VQ_REQUEST; i < fs->nvqs; i++) {
705 char vq_name[VQ_NAME_LEN];
707 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
708 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
709 callbacks[i] = virtio_fs_vq_done;
710 names[i] = fs->vqs[i].name;
713 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
717 for (i = 0; i < fs->nvqs; i++)
718 fs->vqs[i].vq = vqs[i];
720 virtio_fs_start_all_queues(fs);
730 /* Free virtqueues (device must already be reset) */
731 static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
732 struct virtio_fs *fs)
734 vdev->config->del_vqs(vdev);
737 /* Map a window offset to a page frame number. The window offset will have
738 * been produced by .iomap_begin(), which maps a file offset to a window
741 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
742 long nr_pages, void **kaddr, pfn_t *pfn)
744 struct virtio_fs *fs = dax_get_private(dax_dev);
745 phys_addr_t offset = PFN_PHYS(pgoff);
746 size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;
749 *kaddr = fs->window_kaddr + offset;
751 *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
753 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
756 static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
757 pgoff_t pgoff, void *addr,
758 size_t bytes, struct iov_iter *i)
760 return copy_from_iter(addr, bytes, i);
763 static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
764 pgoff_t pgoff, void *addr,
765 size_t bytes, struct iov_iter *i)
767 return copy_to_iter(addr, bytes, i);
770 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
771 pgoff_t pgoff, size_t nr_pages)
776 rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
779 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
780 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
784 static const struct dax_operations virtio_fs_dax_ops = {
785 .direct_access = virtio_fs_direct_access,
786 .copy_from_iter = virtio_fs_copy_from_iter,
787 .copy_to_iter = virtio_fs_copy_to_iter,
788 .zero_page_range = virtio_fs_zero_page_range,
791 static void virtio_fs_cleanup_dax(void *data)
793 struct dax_device *dax_dev = data;
799 static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
801 struct virtio_shm_region cache_reg;
802 struct dev_pagemap *pgmap;
805 if (!IS_ENABLED(CONFIG_FUSE_DAX))
808 /* Get cache region */
809 have_cache = virtio_get_shm_region(vdev, &cache_reg,
810 (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
812 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
816 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
817 dev_name(&vdev->dev))) {
818 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
819 cache_reg.addr, cache_reg.len);
823 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
826 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
830 pgmap->type = MEMORY_DEVICE_FS_DAX;
832 /* Ideally we would directly use the PCI BAR resource but
833 * devm_memremap_pages() wants its own copy in pgmap. So
834 * initialize a struct resource from scratch (only the start
835 * and end fields will be used).
837 pgmap->range = (struct range) {
838 .start = (phys_addr_t) cache_reg.addr,
839 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
843 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
844 if (IS_ERR(fs->window_kaddr))
845 return PTR_ERR(fs->window_kaddr);
847 fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
848 fs->window_len = (phys_addr_t) cache_reg.len;
850 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
851 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
853 fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0);
854 if (IS_ERR(fs->dax_dev))
855 return PTR_ERR(fs->dax_dev);
857 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
861 static int virtio_fs_probe(struct virtio_device *vdev)
863 struct virtio_fs *fs;
866 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
869 kref_init(&fs->refcount);
872 ret = virtio_fs_read_tag(vdev, fs);
876 ret = virtio_fs_setup_vqs(vdev, fs);
880 /* TODO vq affinity */
882 ret = virtio_fs_setup_dax(vdev, fs);
886 /* Bring the device online in case the filesystem is mounted and
887 * requests need to be sent before we return.
889 virtio_device_ready(vdev);
891 ret = virtio_fs_add_instance(fs);
898 vdev->config->reset(vdev);
899 virtio_fs_cleanup_vqs(vdev, fs);
908 static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
910 struct virtio_fs_vq *fsvq;
913 for (i = 0; i < fs->nvqs; i++) {
915 spin_lock(&fsvq->lock);
916 fsvq->connected = false;
917 spin_unlock(&fsvq->lock);
921 static void virtio_fs_remove(struct virtio_device *vdev)
923 struct virtio_fs *fs = vdev->priv;
925 mutex_lock(&virtio_fs_mutex);
926 /* This device is going away. No one should get new reference */
927 list_del_init(&fs->list);
928 virtio_fs_stop_all_queues(fs);
929 virtio_fs_drain_all_queues_locked(fs);
930 vdev->config->reset(vdev);
931 virtio_fs_cleanup_vqs(vdev, fs);
934 /* Put device reference on virtio_fs object */
936 mutex_unlock(&virtio_fs_mutex);
939 #ifdef CONFIG_PM_SLEEP
940 static int virtio_fs_freeze(struct virtio_device *vdev)
942 /* TODO need to save state here */
943 pr_warn("virtio-fs: suspend/resume not yet supported\n");
947 static int virtio_fs_restore(struct virtio_device *vdev)
949 /* TODO need to restore state here */
952 #endif /* CONFIG_PM_SLEEP */
954 static const struct virtio_device_id id_table[] = {
955 { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
959 static const unsigned int feature_table[] = {};
961 static struct virtio_driver virtio_fs_driver = {
962 .driver.name = KBUILD_MODNAME,
963 .driver.owner = THIS_MODULE,
964 .id_table = id_table,
965 .feature_table = feature_table,
966 .feature_table_size = ARRAY_SIZE(feature_table),
967 .probe = virtio_fs_probe,
968 .remove = virtio_fs_remove,
969 #ifdef CONFIG_PM_SLEEP
970 .freeze = virtio_fs_freeze,
971 .restore = virtio_fs_restore,
975 static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
976 __releases(fiq->lock)
978 struct fuse_forget_link *link;
979 struct virtio_fs_forget *forget;
980 struct virtio_fs_forget_req *req;
981 struct virtio_fs *fs;
982 struct virtio_fs_vq *fsvq;
985 link = fuse_dequeue_forget(fiq, 1, NULL);
986 unique = fuse_get_unique(fiq);
989 fsvq = &fs->vqs[VQ_HIPRIO];
990 spin_unlock(&fiq->lock);
992 /* Allocate a buffer for the request */
993 forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
996 req->ih = (struct fuse_in_header){
997 .opcode = FUSE_FORGET,
998 .nodeid = link->forget_one.nodeid,
1000 .len = sizeof(*req),
1002 req->arg = (struct fuse_forget_in){
1003 .nlookup = link->forget_one.nlookup,
1006 send_forget_request(fsvq, forget, false);
1010 static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
1011 __releases(fiq->lock)
1016 * Normal fs operations on a local filesystems aren't interruptible.
1017 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1018 * with shared lock between host and guest.
1020 spin_unlock(&fiq->lock);
1023 /* Count number of scatter-gather elements required */
1024 static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
1025 unsigned int num_pages,
1026 unsigned int total_len)
1029 unsigned int this_len;
1031 for (i = 0; i < num_pages && total_len; i++) {
1032 this_len = min(page_descs[i].length, total_len);
1033 total_len -= this_len;
1039 /* Return the number of scatter-gather list elements required */
1040 static unsigned int sg_count_fuse_req(struct fuse_req *req)
1042 struct fuse_args *args = req->args;
1043 struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
1044 unsigned int size, total_sgs = 1 /* fuse_in_header */;
1046 if (args->in_numargs - args->in_pages)
1049 if (args->in_pages) {
1050 size = args->in_args[args->in_numargs - 1].size;
1051 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1055 if (!test_bit(FR_ISREPLY, &req->flags))
1058 total_sgs += 1 /* fuse_out_header */;
1060 if (args->out_numargs - args->out_pages)
1063 if (args->out_pages) {
1064 size = args->out_args[args->out_numargs - 1].size;
1065 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1072 /* Add pages to scatter-gather list and return number of elements used */
1073 static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
1074 struct page **pages,
1075 struct fuse_page_desc *page_descs,
1076 unsigned int num_pages,
1077 unsigned int total_len)
1080 unsigned int this_len;
1082 for (i = 0; i < num_pages && total_len; i++) {
1083 sg_init_table(&sg[i], 1);
1084 this_len = min(page_descs[i].length, total_len);
1085 sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
1086 total_len -= this_len;
1092 /* Add args to scatter-gather list and return number of elements used */
1093 static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1094 struct fuse_req *req,
1095 struct fuse_arg *args,
1096 unsigned int numargs,
1099 unsigned int *len_used)
1101 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1102 unsigned int total_sgs = 0;
1105 len = fuse_len_args(numargs - argpages, args);
1107 sg_init_one(&sg[total_sgs++], argbuf, len);
1110 total_sgs += sg_init_fuse_pages(&sg[total_sgs],
1111 ap->pages, ap->descs,
1113 args[numargs - 1].size);
1121 /* Add a request to a virtqueue and kick the device */
1122 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
1123 struct fuse_req *req, bool in_flight)
1125 /* requests need at least 4 elements */
1126 struct scatterlist *stack_sgs[6];
1127 struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1128 struct scatterlist **sgs = stack_sgs;
1129 struct scatterlist *sg = stack_sg;
1130 struct virtqueue *vq;
1131 struct fuse_args *args = req->args;
1132 unsigned int argbuf_used = 0;
1133 unsigned int out_sgs = 0;
1134 unsigned int in_sgs = 0;
1135 unsigned int total_sgs;
1139 struct fuse_pqueue *fpq;
1141 /* Does the sglist fit on the stack? */
1142 total_sgs = sg_count_fuse_req(req);
1143 if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1144 sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
1145 sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
1152 /* Use a bounce buffer since stack args cannot be mapped */
1153 ret = copy_args_to_argbuf(req);
1157 /* Request elements */
1158 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1159 out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1160 (struct fuse_arg *)args->in_args,
1161 args->in_numargs, args->in_pages,
1162 req->argbuf, &argbuf_used);
1164 /* Reply elements */
1165 if (test_bit(FR_ISREPLY, &req->flags)) {
1166 sg_init_one(&sg[out_sgs + in_sgs++],
1167 &req->out.h, sizeof(req->out.h));
1168 in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1169 args->out_args, args->out_numargs,
1171 req->argbuf + argbuf_used, NULL);
1174 WARN_ON(out_sgs + in_sgs != total_sgs);
1176 for (i = 0; i < total_sgs; i++)
1179 spin_lock(&fsvq->lock);
1181 if (!fsvq->connected) {
1182 spin_unlock(&fsvq->lock);
1188 ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1190 spin_unlock(&fsvq->lock);
1194 /* Request successfully sent. */
1195 fpq = &fsvq->fud->pq;
1196 spin_lock(&fpq->lock);
1197 list_add_tail(&req->list, fpq->processing);
1198 spin_unlock(&fpq->lock);
1199 set_bit(FR_SENT, &req->flags);
1200 /* matches barrier in request_wait_answer() */
1201 smp_mb__after_atomic();
1204 inc_in_flight_req(fsvq);
1205 notify = virtqueue_kick_prepare(vq);
1207 spin_unlock(&fsvq->lock);
1210 virtqueue_notify(vq);
1213 if (ret < 0 && req->argbuf) {
1217 if (sgs != stack_sgs) {
1225 static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
1226 __releases(fiq->lock)
1228 unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
1229 struct virtio_fs *fs;
1230 struct fuse_req *req;
1231 struct virtio_fs_vq *fsvq;
1234 WARN_ON(list_empty(&fiq->pending));
1235 req = list_last_entry(&fiq->pending, struct fuse_req, list);
1236 clear_bit(FR_PENDING, &req->flags);
1237 list_del_init(&req->list);
1238 WARN_ON(!list_empty(&fiq->pending));
1239 spin_unlock(&fiq->lock);
1243 pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
1244 __func__, req->in.h.opcode, req->in.h.unique,
1245 req->in.h.nodeid, req->in.h.len,
1246 fuse_len_args(req->args->out_numargs, req->args->out_args));
1248 fsvq = &fs->vqs[queue_id];
1249 ret = virtio_fs_enqueue_req(fsvq, req, false);
1251 if (ret == -ENOMEM || ret == -ENOSPC) {
1253 * Virtqueue full. Retry submission from worker
1254 * context as we might be holding fc->bg_lock.
1256 spin_lock(&fsvq->lock);
1257 list_add_tail(&req->list, &fsvq->queued_reqs);
1258 inc_in_flight_req(fsvq);
1259 schedule_delayed_work(&fsvq->dispatch_work,
1260 msecs_to_jiffies(1));
1261 spin_unlock(&fsvq->lock);
1264 req->out.h.error = ret;
1265 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
1267 /* Can't end request in submission context. Use a worker */
1268 spin_lock(&fsvq->lock);
1269 list_add_tail(&req->list, &fsvq->end_reqs);
1270 schedule_delayed_work(&fsvq->dispatch_work, 0);
1271 spin_unlock(&fsvq->lock);
1276 static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1277 .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
1278 .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
1279 .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
1280 .release = virtio_fs_fiq_release,
1283 static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
1285 ctx->rootmode = S_IFDIR;
1286 ctx->default_permissions = 1;
1287 ctx->allow_other = 1;
1288 ctx->max_read = UINT_MAX;
1290 ctx->destroy = true;
1291 ctx->no_control = true;
1292 ctx->no_force_umount = true;
1295 static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
1297 struct fuse_mount *fm = get_fuse_mount_super(sb);
1298 struct fuse_conn *fc = fm->fc;
1299 struct virtio_fs *fs = fc->iq.priv;
1300 struct fuse_fs_context *ctx = fsc->fs_private;
1304 virtio_fs_ctx_set_defaults(ctx);
1305 mutex_lock(&virtio_fs_mutex);
1307 /* After holding mutex, make sure virtiofs device is still there.
1308 * Though we are holding a reference to it, drive ->remove might
1309 * still have cleaned up virtual queues. In that case bail out.
1312 if (list_empty(&fs->list)) {
1313 pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1318 /* Allocate fuse_dev for hiprio and notification queues */
1319 for (i = 0; i < fs->nvqs; i++) {
1320 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1322 fsvq->fud = fuse_dev_alloc();
1324 goto err_free_fuse_devs;
1327 /* virtiofs allocates and installs its own fuse devices */
1332 pr_err("virtio-fs: dax can't be enabled as filesystem"
1333 " device does not support it.\n");
1334 goto err_free_fuse_devs;
1336 ctx->dax_dev = fs->dax_dev;
1338 err = fuse_fill_super_common(sb, ctx);
1340 goto err_free_fuse_devs;
1342 for (i = 0; i < fs->nvqs; i++) {
1343 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1345 fuse_dev_install(fsvq->fud, fc);
1348 /* Previous unmount will stop all queues. Start these again */
1349 virtio_fs_start_all_queues(fs);
1351 mutex_unlock(&virtio_fs_mutex);
1355 virtio_fs_free_devs(fs);
1357 mutex_unlock(&virtio_fs_mutex);
1361 static void virtio_fs_conn_destroy(struct fuse_mount *fm)
1363 struct fuse_conn *fc = fm->fc;
1364 struct virtio_fs *vfs = fc->iq.priv;
1365 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
1367 /* Stop dax worker. Soon evict_inodes() will be called which
1368 * will free all memory ranges belonging to all inodes.
1370 if (IS_ENABLED(CONFIG_FUSE_DAX))
1371 fuse_dax_cancel_work(fc);
1373 /* Stop forget queue. Soon destroy will be sent */
1374 spin_lock(&fsvq->lock);
1375 fsvq->connected = false;
1376 spin_unlock(&fsvq->lock);
1377 virtio_fs_drain_all_queues(vfs);
1379 fuse_conn_destroy(fm);
1381 /* fuse_conn_destroy() must have sent destroy. Stop all queues
1382 * and drain one more time and free fuse devices. Freeing fuse
1383 * devices will drop their reference on fuse_conn and that in
1384 * turn will drop its reference on virtio_fs object.
1386 virtio_fs_stop_all_queues(vfs);
1387 virtio_fs_drain_all_queues(vfs);
1388 virtio_fs_free_devs(vfs);
1391 static void virtio_kill_sb(struct super_block *sb)
1393 struct fuse_mount *fm = get_fuse_mount_super(sb);
1396 /* If mount failed, we can still be called without any fc */
1398 last = fuse_mount_remove(fm);
1400 virtio_fs_conn_destroy(fm);
1402 kill_anon_super(sb);
1405 static int virtio_fs_test_super(struct super_block *sb,
1406 struct fs_context *fsc)
1408 struct fuse_mount *fsc_fm = fsc->s_fs_info;
1409 struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
1411 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
1414 static int virtio_fs_get_tree(struct fs_context *fsc)
1416 struct virtio_fs *fs;
1417 struct super_block *sb;
1418 struct fuse_conn *fc = NULL;
1419 struct fuse_mount *fm;
1420 unsigned int virtqueue_size;
1423 /* This gets a reference on virtio_fs object. This ptr gets installed
1424 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1425 * to drop the reference to this object.
1427 fs = virtio_fs_find_instance(fsc->source);
1429 pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1433 virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1434 if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1438 fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
1442 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1446 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
1447 fc->release = fuse_free_conn;
1448 fc->delete_stale = true;
1449 fc->auto_submounts = true;
1452 /* Tell FUSE to split requests that exceed the virtqueue's size */
1453 fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1454 virtqueue_size - FUSE_HEADER_OVERHEAD);
1456 fsc->s_fs_info = fm;
1457 sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
1458 if (fsc->s_fs_info) {
1466 err = virtio_fs_fill_super(sb, fsc);
1470 sb->s_fs_info = NULL;
1471 deactivate_locked_super(sb);
1475 sb->s_flags |= SB_ACTIVE;
1479 fsc->root = dget(sb->s_root);
1484 mutex_lock(&virtio_fs_mutex);
1486 mutex_unlock(&virtio_fs_mutex);
1490 static const struct fs_context_operations virtio_fs_context_ops = {
1491 .free = virtio_fs_free_fc,
1492 .parse_param = virtio_fs_parse_param,
1493 .get_tree = virtio_fs_get_tree,
1496 static int virtio_fs_init_fs_context(struct fs_context *fsc)
1498 struct fuse_fs_context *ctx;
1500 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1503 fsc->fs_private = ctx;
1504 fsc->ops = &virtio_fs_context_ops;
1508 static struct file_system_type virtio_fs_type = {
1509 .owner = THIS_MODULE,
1511 .init_fs_context = virtio_fs_init_fs_context,
1512 .kill_sb = virtio_kill_sb,
1515 static int __init virtio_fs_init(void)
1519 ret = register_virtio_driver(&virtio_fs_driver);
1523 ret = register_filesystem(&virtio_fs_type);
1525 unregister_virtio_driver(&virtio_fs_driver);
1531 module_init(virtio_fs_init);
1533 static void __exit virtio_fs_exit(void)
1535 unregister_filesystem(&virtio_fs_type);
1536 unregister_virtio_driver(&virtio_fs_driver);
1538 module_exit(virtio_fs_exit);
1540 MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1541 MODULE_DESCRIPTION("Virtio Filesystem");
1542 MODULE_LICENSE("GPL");
1543 MODULE_ALIAS_FS(KBUILD_MODNAME);
1544 MODULE_DEVICE_TABLE(virtio, id_table);