1 // SPDX-License-Identifier: GPL-2.0
3 * virtio-fs: Virtio Filesystem
4 * Copyright (C) 2018 Red Hat, Inc.
10 #include <linux/pfn_t.h>
11 #include <linux/module.h>
12 #include <linux/virtio.h>
13 #include <linux/virtio_fs.h>
14 #include <linux/delay.h>
15 #include <linux/fs_context.h>
16 #include <linux/fs_parser.h>
17 #include <linux/highmem.h>
18 #include <linux/uio.h>
21 /* Used to help calculate the FUSE connection's max_pages limit for a request's
22 * size. Parts of the struct fuse_req are sliced into scattergather lists in
23 * addition to the pages used, so this can help account for that overhead.
25 #define FUSE_HEADER_OVERHEAD 4
27 /* List of virtio-fs device instances and a lock for the list. Also provides
28 * mutual exclusion in device removal and mounting path
30 static DEFINE_MUTEX(virtio_fs_mutex);
31 static LIST_HEAD(virtio_fs_instances);
38 #define VQ_NAME_LEN 24
40 /* Per-virtqueue state */
43 struct virtqueue *vq; /* protected by ->lock */
44 struct work_struct done_work;
45 struct list_head queued_reqs;
46 struct list_head end_reqs; /* End these requests */
47 struct delayed_work dispatch_work;
51 struct completion in_flight_zero; /* No inflight requests */
52 char name[VQ_NAME_LEN];
53 } ____cacheline_aligned_in_smp;
55 /* A virtio-fs device instance */
58 struct list_head list; /* on virtio_fs_instances */
60 struct virtio_fs_vq *vqs;
61 unsigned int nvqs; /* number of virtqueues */
62 unsigned int num_request_queues; /* number of request queues */
63 struct dax_device *dax_dev;
65 /* DAX memory window where file contents are mapped */
67 phys_addr_t window_phys_addr;
71 struct virtio_fs_forget_req {
72 struct fuse_in_header ih;
73 struct fuse_forget_in arg;
76 struct virtio_fs_forget {
77 /* This request can be temporarily queued on virt queue */
78 struct list_head list;
79 struct virtio_fs_forget_req req;
82 struct virtio_fs_req_work {
84 struct virtio_fs_vq *fsvq;
85 struct work_struct done_work;
88 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
89 struct fuse_req *req, bool in_flight);
95 static const struct fs_parameter_spec virtio_fs_parameters[] = {
96 fsparam_flag("dax", OPT_DAX),
100 static int virtio_fs_parse_param(struct fs_context *fc,
101 struct fs_parameter *param)
103 struct fs_parse_result result;
104 struct fuse_fs_context *ctx = fc->fs_private;
107 opt = fs_parse(fc, virtio_fs_parameters, param, &result);
122 static void virtio_fs_free_fc(struct fs_context *fc)
124 struct fuse_fs_context *ctx = fc->fs_private;
129 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
131 struct virtio_fs *fs = vq->vdev->priv;
133 return &fs->vqs[vq->index];
136 static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
138 return &vq_to_fsvq(vq)->fud->pq;
141 /* Should be called with fsvq->lock held. */
142 static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
147 /* Should be called with fsvq->lock held. */
148 static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
150 WARN_ON(fsvq->in_flight <= 0);
152 if (!fsvq->in_flight)
153 complete(&fsvq->in_flight_zero);
156 static void release_virtio_fs_obj(struct kref *ref)
158 struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
164 /* Make sure virtiofs_mutex is held */
165 static void virtio_fs_put(struct virtio_fs *fs)
167 kref_put(&fs->refcount, release_virtio_fs_obj);
170 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
172 struct virtio_fs *vfs = fiq->priv;
174 mutex_lock(&virtio_fs_mutex);
176 mutex_unlock(&virtio_fs_mutex);
179 static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
181 WARN_ON(fsvq->in_flight < 0);
183 /* Wait for in flight requests to finish.*/
184 spin_lock(&fsvq->lock);
185 if (fsvq->in_flight) {
186 /* We are holding virtio_fs_mutex. There should not be any
187 * waiters waiting for completion.
189 reinit_completion(&fsvq->in_flight_zero);
190 spin_unlock(&fsvq->lock);
191 wait_for_completion(&fsvq->in_flight_zero);
193 spin_unlock(&fsvq->lock);
196 flush_work(&fsvq->done_work);
197 flush_delayed_work(&fsvq->dispatch_work);
200 static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
202 struct virtio_fs_vq *fsvq;
205 for (i = 0; i < fs->nvqs; i++) {
207 virtio_fs_drain_queue(fsvq);
211 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
213 /* Provides mutual exclusion between ->remove and ->kill_sb
214 * paths. We don't want both of these draining queue at the
215 * same time. Current completion logic reinits completion
216 * and that means there should not be any other thread
217 * doing reinit or waiting for completion already.
219 mutex_lock(&virtio_fs_mutex);
220 virtio_fs_drain_all_queues_locked(fs);
221 mutex_unlock(&virtio_fs_mutex);
224 static void virtio_fs_start_all_queues(struct virtio_fs *fs)
226 struct virtio_fs_vq *fsvq;
229 for (i = 0; i < fs->nvqs; i++) {
231 spin_lock(&fsvq->lock);
232 fsvq->connected = true;
233 spin_unlock(&fsvq->lock);
237 /* Add a new instance to the list or return -EEXIST if tag name exists*/
238 static int virtio_fs_add_instance(struct virtio_fs *fs)
240 struct virtio_fs *fs2;
241 bool duplicate = false;
243 mutex_lock(&virtio_fs_mutex);
245 list_for_each_entry(fs2, &virtio_fs_instances, list) {
246 if (strcmp(fs->tag, fs2->tag) == 0)
251 list_add_tail(&fs->list, &virtio_fs_instances);
253 mutex_unlock(&virtio_fs_mutex);
260 /* Return the virtio_fs with a given tag, or NULL */
261 static struct virtio_fs *virtio_fs_find_instance(const char *tag)
263 struct virtio_fs *fs;
265 mutex_lock(&virtio_fs_mutex);
267 list_for_each_entry(fs, &virtio_fs_instances, list) {
268 if (strcmp(fs->tag, tag) == 0) {
269 kref_get(&fs->refcount);
274 fs = NULL; /* not found */
277 mutex_unlock(&virtio_fs_mutex);
282 static void virtio_fs_free_devs(struct virtio_fs *fs)
286 for (i = 0; i < fs->nvqs; i++) {
287 struct virtio_fs_vq *fsvq = &fs->vqs[i];
292 fuse_dev_free(fsvq->fud);
297 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
298 static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
300 char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
304 virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
305 &tag_buf, sizeof(tag_buf));
306 end = memchr(tag_buf, '\0', sizeof(tag_buf));
308 return -EINVAL; /* empty tag */
310 end = &tag_buf[sizeof(tag_buf)];
313 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
316 memcpy(fs->tag, tag_buf, len);
321 /* Work function for hiprio completion */
322 static void virtio_fs_hiprio_done_work(struct work_struct *work)
324 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
326 struct virtqueue *vq = fsvq->vq;
328 /* Free completed FUSE_FORGET requests */
329 spin_lock(&fsvq->lock);
334 virtqueue_disable_cb(vq);
336 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
338 dec_in_flight_req(fsvq);
340 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
341 spin_unlock(&fsvq->lock);
344 static void virtio_fs_request_dispatch_work(struct work_struct *work)
346 struct fuse_req *req;
347 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
351 pr_debug("virtio-fs: worker %s called.\n", __func__);
353 spin_lock(&fsvq->lock);
354 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
357 spin_unlock(&fsvq->lock);
361 list_del_init(&req->list);
362 spin_unlock(&fsvq->lock);
363 fuse_request_end(req);
366 /* Dispatch pending requests */
368 spin_lock(&fsvq->lock);
369 req = list_first_entry_or_null(&fsvq->queued_reqs,
370 struct fuse_req, list);
372 spin_unlock(&fsvq->lock);
375 list_del_init(&req->list);
376 spin_unlock(&fsvq->lock);
378 ret = virtio_fs_enqueue_req(fsvq, req, true);
380 if (ret == -ENOMEM || ret == -ENOSPC) {
381 spin_lock(&fsvq->lock);
382 list_add_tail(&req->list, &fsvq->queued_reqs);
383 schedule_delayed_work(&fsvq->dispatch_work,
384 msecs_to_jiffies(1));
385 spin_unlock(&fsvq->lock);
388 req->out.h.error = ret;
389 spin_lock(&fsvq->lock);
390 dec_in_flight_req(fsvq);
391 spin_unlock(&fsvq->lock);
392 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
394 fuse_request_end(req);
400 * Returns 1 if queue is full and sender should wait a bit before sending
401 * next request, 0 otherwise.
403 static int send_forget_request(struct virtio_fs_vq *fsvq,
404 struct virtio_fs_forget *forget,
407 struct scatterlist sg;
408 struct virtqueue *vq;
411 struct virtio_fs_forget_req *req = &forget->req;
413 spin_lock(&fsvq->lock);
414 if (!fsvq->connected) {
416 dec_in_flight_req(fsvq);
421 sg_init_one(&sg, req, sizeof(*req));
423 dev_dbg(&vq->vdev->dev, "%s\n", __func__);
425 ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
427 if (ret == -ENOMEM || ret == -ENOSPC) {
428 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
430 list_add_tail(&forget->list, &fsvq->queued_reqs);
431 schedule_delayed_work(&fsvq->dispatch_work,
432 msecs_to_jiffies(1));
434 inc_in_flight_req(fsvq);
438 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
442 dec_in_flight_req(fsvq);
448 inc_in_flight_req(fsvq);
449 notify = virtqueue_kick_prepare(vq);
450 spin_unlock(&fsvq->lock);
453 virtqueue_notify(vq);
456 spin_unlock(&fsvq->lock);
460 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
462 struct virtio_fs_forget *forget;
463 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
465 pr_debug("virtio-fs: worker %s called.\n", __func__);
467 spin_lock(&fsvq->lock);
468 forget = list_first_entry_or_null(&fsvq->queued_reqs,
469 struct virtio_fs_forget, list);
471 spin_unlock(&fsvq->lock);
475 list_del(&forget->list);
476 spin_unlock(&fsvq->lock);
477 if (send_forget_request(fsvq, forget, true))
482 /* Allocate and copy args into req->argbuf */
483 static int copy_args_to_argbuf(struct fuse_req *req)
485 struct fuse_args *args = req->args;
486 unsigned int offset = 0;
488 unsigned int num_out;
492 num_in = args->in_numargs - args->in_pages;
493 num_out = args->out_numargs - args->out_pages;
494 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
495 fuse_len_args(num_out, args->out_args);
497 req->argbuf = kmalloc(len, GFP_ATOMIC);
501 for (i = 0; i < num_in; i++) {
502 memcpy(req->argbuf + offset,
503 args->in_args[i].value,
504 args->in_args[i].size);
505 offset += args->in_args[i].size;
511 /* Copy args out of and free req->argbuf */
512 static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
514 unsigned int remaining;
517 unsigned int num_out;
520 remaining = req->out.h.len - sizeof(req->out.h);
521 num_in = args->in_numargs - args->in_pages;
522 num_out = args->out_numargs - args->out_pages;
523 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
525 for (i = 0; i < num_out; i++) {
526 unsigned int argsize = args->out_args[i].size;
528 if (args->out_argvar &&
529 i == args->out_numargs - 1 &&
530 argsize > remaining) {
534 memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
537 if (i != args->out_numargs - 1)
538 remaining -= argsize;
541 /* Store the actual size of the variable-length arg */
542 if (args->out_argvar)
543 args->out_args[args->out_numargs - 1].size = remaining;
549 /* Work function for request completion */
550 static void virtio_fs_request_complete(struct fuse_req *req,
551 struct virtio_fs_vq *fsvq)
553 struct fuse_pqueue *fpq = &fsvq->fud->pq;
554 struct fuse_args *args;
555 struct fuse_args_pages *ap;
556 unsigned int len, i, thislen;
560 * TODO verify that server properly follows FUSE protocol
564 copy_args_from_argbuf(args, req);
566 if (args->out_pages && args->page_zeroing) {
567 len = args->out_args[args->out_numargs - 1].size;
568 ap = container_of(args, typeof(*ap), args);
569 for (i = 0; i < ap->num_pages; i++) {
570 thislen = ap->descs[i].length;
572 WARN_ON(ap->descs[i].offset);
574 zero_user_segment(page, len, thislen);
582 spin_lock(&fpq->lock);
583 clear_bit(FR_SENT, &req->flags);
584 spin_unlock(&fpq->lock);
586 fuse_request_end(req);
587 spin_lock(&fsvq->lock);
588 dec_in_flight_req(fsvq);
589 spin_unlock(&fsvq->lock);
592 static void virtio_fs_complete_req_work(struct work_struct *work)
594 struct virtio_fs_req_work *w =
595 container_of(work, typeof(*w), done_work);
597 virtio_fs_request_complete(w->req, w->fsvq);
601 static void virtio_fs_requests_done_work(struct work_struct *work)
603 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
605 struct fuse_pqueue *fpq = &fsvq->fud->pq;
606 struct virtqueue *vq = fsvq->vq;
607 struct fuse_req *req;
608 struct fuse_req *next;
612 /* Collect completed requests off the virtqueue */
613 spin_lock(&fsvq->lock);
615 virtqueue_disable_cb(vq);
617 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
618 spin_lock(&fpq->lock);
619 list_move_tail(&req->list, &reqs);
620 spin_unlock(&fpq->lock);
622 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
623 spin_unlock(&fsvq->lock);
626 list_for_each_entry_safe(req, next, &reqs, list) {
627 list_del_init(&req->list);
629 /* blocking async request completes in a worker context */
630 if (req->args->may_block) {
631 struct virtio_fs_req_work *w;
633 w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
634 INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
637 schedule_work(&w->done_work);
639 virtio_fs_request_complete(req, fsvq);
644 /* Virtqueue interrupt handler */
645 static void virtio_fs_vq_done(struct virtqueue *vq)
647 struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
649 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
651 schedule_work(&fsvq->done_work);
654 static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
657 strncpy(fsvq->name, name, VQ_NAME_LEN);
658 spin_lock_init(&fsvq->lock);
659 INIT_LIST_HEAD(&fsvq->queued_reqs);
660 INIT_LIST_HEAD(&fsvq->end_reqs);
661 init_completion(&fsvq->in_flight_zero);
663 if (vq_type == VQ_REQUEST) {
664 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
665 INIT_DELAYED_WORK(&fsvq->dispatch_work,
666 virtio_fs_request_dispatch_work);
668 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
669 INIT_DELAYED_WORK(&fsvq->dispatch_work,
670 virtio_fs_hiprio_dispatch_work);
674 /* Initialize virtqueues */
675 static int virtio_fs_setup_vqs(struct virtio_device *vdev,
676 struct virtio_fs *fs)
678 struct virtqueue **vqs;
679 vq_callback_t **callbacks;
684 virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
685 &fs->num_request_queues);
686 if (fs->num_request_queues == 0)
689 fs->nvqs = VQ_REQUEST + fs->num_request_queues;
690 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
694 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
695 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
697 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
698 if (!vqs || !callbacks || !names) {
703 /* Initialize the hiprio/forget request virtqueue */
704 callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
705 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
706 names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
708 /* Initialize the requests virtqueues */
709 for (i = VQ_REQUEST; i < fs->nvqs; i++) {
710 char vq_name[VQ_NAME_LEN];
712 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
713 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
714 callbacks[i] = virtio_fs_vq_done;
715 names[i] = fs->vqs[i].name;
718 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
722 for (i = 0; i < fs->nvqs; i++)
723 fs->vqs[i].vq = vqs[i];
725 virtio_fs_start_all_queues(fs);
735 /* Free virtqueues (device must already be reset) */
736 static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
737 struct virtio_fs *fs)
739 vdev->config->del_vqs(vdev);
742 /* Map a window offset to a page frame number. The window offset will have
743 * been produced by .iomap_begin(), which maps a file offset to a window
746 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
747 long nr_pages, void **kaddr, pfn_t *pfn)
749 struct virtio_fs *fs = dax_get_private(dax_dev);
750 phys_addr_t offset = PFN_PHYS(pgoff);
751 size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;
754 *kaddr = fs->window_kaddr + offset;
756 *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
758 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
761 static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
762 pgoff_t pgoff, void *addr,
763 size_t bytes, struct iov_iter *i)
765 return copy_from_iter(addr, bytes, i);
768 static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
769 pgoff_t pgoff, void *addr,
770 size_t bytes, struct iov_iter *i)
772 return copy_to_iter(addr, bytes, i);
775 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
776 pgoff_t pgoff, size_t nr_pages)
781 rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
784 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
785 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
789 static const struct dax_operations virtio_fs_dax_ops = {
790 .direct_access = virtio_fs_direct_access,
791 .copy_from_iter = virtio_fs_copy_from_iter,
792 .copy_to_iter = virtio_fs_copy_to_iter,
793 .zero_page_range = virtio_fs_zero_page_range,
796 static void virtio_fs_cleanup_dax(void *data)
798 struct dax_device *dax_dev = data;
804 static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
806 struct virtio_shm_region cache_reg;
807 struct dev_pagemap *pgmap;
810 if (!IS_ENABLED(CONFIG_FUSE_DAX))
813 /* Get cache region */
814 have_cache = virtio_get_shm_region(vdev, &cache_reg,
815 (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
817 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
821 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
822 dev_name(&vdev->dev))) {
823 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
824 cache_reg.addr, cache_reg.len);
828 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
831 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
835 pgmap->type = MEMORY_DEVICE_FS_DAX;
837 /* Ideally we would directly use the PCI BAR resource but
838 * devm_memremap_pages() wants its own copy in pgmap. So
839 * initialize a struct resource from scratch (only the start
840 * and end fields will be used).
842 pgmap->range = (struct range) {
843 .start = (phys_addr_t) cache_reg.addr,
844 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
848 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
849 if (IS_ERR(fs->window_kaddr))
850 return PTR_ERR(fs->window_kaddr);
852 fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
853 fs->window_len = (phys_addr_t) cache_reg.len;
855 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
856 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
858 fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0);
859 if (IS_ERR(fs->dax_dev))
860 return PTR_ERR(fs->dax_dev);
862 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
866 static int virtio_fs_probe(struct virtio_device *vdev)
868 struct virtio_fs *fs;
871 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
874 kref_init(&fs->refcount);
877 ret = virtio_fs_read_tag(vdev, fs);
881 ret = virtio_fs_setup_vqs(vdev, fs);
885 /* TODO vq affinity */
887 ret = virtio_fs_setup_dax(vdev, fs);
891 /* Bring the device online in case the filesystem is mounted and
892 * requests need to be sent before we return.
894 virtio_device_ready(vdev);
896 ret = virtio_fs_add_instance(fs);
903 vdev->config->reset(vdev);
904 virtio_fs_cleanup_vqs(vdev, fs);
913 static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
915 struct virtio_fs_vq *fsvq;
918 for (i = 0; i < fs->nvqs; i++) {
920 spin_lock(&fsvq->lock);
921 fsvq->connected = false;
922 spin_unlock(&fsvq->lock);
926 static void virtio_fs_remove(struct virtio_device *vdev)
928 struct virtio_fs *fs = vdev->priv;
930 mutex_lock(&virtio_fs_mutex);
931 /* This device is going away. No one should get new reference */
932 list_del_init(&fs->list);
933 virtio_fs_stop_all_queues(fs);
934 virtio_fs_drain_all_queues_locked(fs);
935 vdev->config->reset(vdev);
936 virtio_fs_cleanup_vqs(vdev, fs);
939 /* Put device reference on virtio_fs object */
941 mutex_unlock(&virtio_fs_mutex);
944 #ifdef CONFIG_PM_SLEEP
945 static int virtio_fs_freeze(struct virtio_device *vdev)
947 /* TODO need to save state here */
948 pr_warn("virtio-fs: suspend/resume not yet supported\n");
952 static int virtio_fs_restore(struct virtio_device *vdev)
954 /* TODO need to restore state here */
957 #endif /* CONFIG_PM_SLEEP */
959 static const struct virtio_device_id id_table[] = {
960 { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
964 static const unsigned int feature_table[] = {};
966 static struct virtio_driver virtio_fs_driver = {
967 .driver.name = KBUILD_MODNAME,
968 .driver.owner = THIS_MODULE,
969 .id_table = id_table,
970 .feature_table = feature_table,
971 .feature_table_size = ARRAY_SIZE(feature_table),
972 .probe = virtio_fs_probe,
973 .remove = virtio_fs_remove,
974 #ifdef CONFIG_PM_SLEEP
975 .freeze = virtio_fs_freeze,
976 .restore = virtio_fs_restore,
980 static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
981 __releases(fiq->lock)
983 struct fuse_forget_link *link;
984 struct virtio_fs_forget *forget;
985 struct virtio_fs_forget_req *req;
986 struct virtio_fs *fs;
987 struct virtio_fs_vq *fsvq;
990 link = fuse_dequeue_forget(fiq, 1, NULL);
991 unique = fuse_get_unique(fiq);
994 fsvq = &fs->vqs[VQ_HIPRIO];
995 spin_unlock(&fiq->lock);
997 /* Allocate a buffer for the request */
998 forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
1001 req->ih = (struct fuse_in_header){
1002 .opcode = FUSE_FORGET,
1003 .nodeid = link->forget_one.nodeid,
1005 .len = sizeof(*req),
1007 req->arg = (struct fuse_forget_in){
1008 .nlookup = link->forget_one.nlookup,
1011 send_forget_request(fsvq, forget, false);
1015 static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
1016 __releases(fiq->lock)
1021 * Normal fs operations on a local filesystems aren't interruptible.
1022 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1023 * with shared lock between host and guest.
1025 spin_unlock(&fiq->lock);
1028 /* Count number of scatter-gather elements required */
1029 static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
1030 unsigned int num_pages,
1031 unsigned int total_len)
1034 unsigned int this_len;
1036 for (i = 0; i < num_pages && total_len; i++) {
1037 this_len = min(page_descs[i].length, total_len);
1038 total_len -= this_len;
1044 /* Return the number of scatter-gather list elements required */
1045 static unsigned int sg_count_fuse_req(struct fuse_req *req)
1047 struct fuse_args *args = req->args;
1048 struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
1049 unsigned int size, total_sgs = 1 /* fuse_in_header */;
1051 if (args->in_numargs - args->in_pages)
1054 if (args->in_pages) {
1055 size = args->in_args[args->in_numargs - 1].size;
1056 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1060 if (!test_bit(FR_ISREPLY, &req->flags))
1063 total_sgs += 1 /* fuse_out_header */;
1065 if (args->out_numargs - args->out_pages)
1068 if (args->out_pages) {
1069 size = args->out_args[args->out_numargs - 1].size;
1070 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1077 /* Add pages to scatter-gather list and return number of elements used */
1078 static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
1079 struct page **pages,
1080 struct fuse_page_desc *page_descs,
1081 unsigned int num_pages,
1082 unsigned int total_len)
1085 unsigned int this_len;
1087 for (i = 0; i < num_pages && total_len; i++) {
1088 sg_init_table(&sg[i], 1);
1089 this_len = min(page_descs[i].length, total_len);
1090 sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
1091 total_len -= this_len;
1097 /* Add args to scatter-gather list and return number of elements used */
1098 static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1099 struct fuse_req *req,
1100 struct fuse_arg *args,
1101 unsigned int numargs,
1104 unsigned int *len_used)
1106 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1107 unsigned int total_sgs = 0;
1110 len = fuse_len_args(numargs - argpages, args);
1112 sg_init_one(&sg[total_sgs++], argbuf, len);
1115 total_sgs += sg_init_fuse_pages(&sg[total_sgs],
1116 ap->pages, ap->descs,
1118 args[numargs - 1].size);
1126 /* Add a request to a virtqueue and kick the device */
1127 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
1128 struct fuse_req *req, bool in_flight)
1130 /* requests need at least 4 elements */
1131 struct scatterlist *stack_sgs[6];
1132 struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1133 struct scatterlist **sgs = stack_sgs;
1134 struct scatterlist *sg = stack_sg;
1135 struct virtqueue *vq;
1136 struct fuse_args *args = req->args;
1137 unsigned int argbuf_used = 0;
1138 unsigned int out_sgs = 0;
1139 unsigned int in_sgs = 0;
1140 unsigned int total_sgs;
1144 struct fuse_pqueue *fpq;
1146 /* Does the sglist fit on the stack? */
1147 total_sgs = sg_count_fuse_req(req);
1148 if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1149 sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
1150 sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
1157 /* Use a bounce buffer since stack args cannot be mapped */
1158 ret = copy_args_to_argbuf(req);
1162 /* Request elements */
1163 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1164 out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1165 (struct fuse_arg *)args->in_args,
1166 args->in_numargs, args->in_pages,
1167 req->argbuf, &argbuf_used);
1169 /* Reply elements */
1170 if (test_bit(FR_ISREPLY, &req->flags)) {
1171 sg_init_one(&sg[out_sgs + in_sgs++],
1172 &req->out.h, sizeof(req->out.h));
1173 in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1174 args->out_args, args->out_numargs,
1176 req->argbuf + argbuf_used, NULL);
1179 WARN_ON(out_sgs + in_sgs != total_sgs);
1181 for (i = 0; i < total_sgs; i++)
1184 spin_lock(&fsvq->lock);
1186 if (!fsvq->connected) {
1187 spin_unlock(&fsvq->lock);
1193 ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1195 spin_unlock(&fsvq->lock);
1199 /* Request successfully sent. */
1200 fpq = &fsvq->fud->pq;
1201 spin_lock(&fpq->lock);
1202 list_add_tail(&req->list, fpq->processing);
1203 spin_unlock(&fpq->lock);
1204 set_bit(FR_SENT, &req->flags);
1205 /* matches barrier in request_wait_answer() */
1206 smp_mb__after_atomic();
1209 inc_in_flight_req(fsvq);
1210 notify = virtqueue_kick_prepare(vq);
1212 spin_unlock(&fsvq->lock);
1215 virtqueue_notify(vq);
1218 if (ret < 0 && req->argbuf) {
1222 if (sgs != stack_sgs) {
1230 static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
1231 __releases(fiq->lock)
1233 unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
1234 struct virtio_fs *fs;
1235 struct fuse_req *req;
1236 struct virtio_fs_vq *fsvq;
1239 WARN_ON(list_empty(&fiq->pending));
1240 req = list_last_entry(&fiq->pending, struct fuse_req, list);
1241 clear_bit(FR_PENDING, &req->flags);
1242 list_del_init(&req->list);
1243 WARN_ON(!list_empty(&fiq->pending));
1244 spin_unlock(&fiq->lock);
1248 pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
1249 __func__, req->in.h.opcode, req->in.h.unique,
1250 req->in.h.nodeid, req->in.h.len,
1251 fuse_len_args(req->args->out_numargs, req->args->out_args));
1253 fsvq = &fs->vqs[queue_id];
1254 ret = virtio_fs_enqueue_req(fsvq, req, false);
1256 if (ret == -ENOMEM || ret == -ENOSPC) {
1258 * Virtqueue full. Retry submission from worker
1259 * context as we might be holding fc->bg_lock.
1261 spin_lock(&fsvq->lock);
1262 list_add_tail(&req->list, &fsvq->queued_reqs);
1263 inc_in_flight_req(fsvq);
1264 schedule_delayed_work(&fsvq->dispatch_work,
1265 msecs_to_jiffies(1));
1266 spin_unlock(&fsvq->lock);
1269 req->out.h.error = ret;
1270 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
1272 /* Can't end request in submission context. Use a worker */
1273 spin_lock(&fsvq->lock);
1274 list_add_tail(&req->list, &fsvq->end_reqs);
1275 schedule_delayed_work(&fsvq->dispatch_work, 0);
1276 spin_unlock(&fsvq->lock);
1281 static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1282 .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
1283 .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
1284 .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
1285 .release = virtio_fs_fiq_release,
1288 static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
1290 ctx->rootmode = S_IFDIR;
1291 ctx->default_permissions = 1;
1292 ctx->allow_other = 1;
1293 ctx->max_read = UINT_MAX;
1295 ctx->destroy = true;
1296 ctx->no_control = true;
1297 ctx->no_force_umount = true;
1300 static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
1302 struct fuse_mount *fm = get_fuse_mount_super(sb);
1303 struct fuse_conn *fc = fm->fc;
1304 struct virtio_fs *fs = fc->iq.priv;
1305 struct fuse_fs_context *ctx = fsc->fs_private;
1309 virtio_fs_ctx_set_defaults(ctx);
1310 mutex_lock(&virtio_fs_mutex);
1312 /* After holding mutex, make sure virtiofs device is still there.
1313 * Though we are holding a reference to it, drive ->remove might
1314 * still have cleaned up virtual queues. In that case bail out.
1317 if (list_empty(&fs->list)) {
1318 pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1323 /* Allocate fuse_dev for hiprio and notification queues */
1324 for (i = 0; i < fs->nvqs; i++) {
1325 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1327 fsvq->fud = fuse_dev_alloc();
1329 goto err_free_fuse_devs;
1332 /* virtiofs allocates and installs its own fuse devices */
1337 pr_err("virtio-fs: dax can't be enabled as filesystem"
1338 " device does not support it.\n");
1339 goto err_free_fuse_devs;
1341 ctx->dax_dev = fs->dax_dev;
1343 err = fuse_fill_super_common(sb, ctx);
1345 goto err_free_fuse_devs;
1347 for (i = 0; i < fs->nvqs; i++) {
1348 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1350 fuse_dev_install(fsvq->fud, fc);
1353 /* Previous unmount will stop all queues. Start these again */
1354 virtio_fs_start_all_queues(fs);
1356 mutex_unlock(&virtio_fs_mutex);
1360 virtio_fs_free_devs(fs);
1362 mutex_unlock(&virtio_fs_mutex);
1366 static void virtio_fs_conn_destroy(struct fuse_mount *fm)
1368 struct fuse_conn *fc = fm->fc;
1369 struct virtio_fs *vfs = fc->iq.priv;
1370 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
1372 /* Stop dax worker. Soon evict_inodes() will be called which
1373 * will free all memory ranges belonging to all inodes.
1375 if (IS_ENABLED(CONFIG_FUSE_DAX))
1376 fuse_dax_cancel_work(fc);
1378 /* Stop forget queue. Soon destroy will be sent */
1379 spin_lock(&fsvq->lock);
1380 fsvq->connected = false;
1381 spin_unlock(&fsvq->lock);
1382 virtio_fs_drain_all_queues(vfs);
1384 fuse_conn_destroy(fm);
1386 /* fuse_conn_destroy() must have sent destroy. Stop all queues
1387 * and drain one more time and free fuse devices. Freeing fuse
1388 * devices will drop their reference on fuse_conn and that in
1389 * turn will drop its reference on virtio_fs object.
1391 virtio_fs_stop_all_queues(vfs);
1392 virtio_fs_drain_all_queues(vfs);
1393 virtio_fs_free_devs(vfs);
1396 static void virtio_kill_sb(struct super_block *sb)
1398 struct fuse_mount *fm = get_fuse_mount_super(sb);
1401 /* If mount failed, we can still be called without any fc */
1403 last = fuse_mount_remove(fm);
1405 virtio_fs_conn_destroy(fm);
1407 kill_anon_super(sb);
1410 static int virtio_fs_test_super(struct super_block *sb,
1411 struct fs_context *fsc)
1413 struct fuse_mount *fsc_fm = fsc->s_fs_info;
1414 struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
1416 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
1419 static int virtio_fs_set_super(struct super_block *sb,
1420 struct fs_context *fsc)
1424 err = get_anon_bdev(&sb->s_dev);
1426 fuse_mount_get(fsc->s_fs_info);
1431 static int virtio_fs_get_tree(struct fs_context *fsc)
1433 struct virtio_fs *fs;
1434 struct super_block *sb;
1435 struct fuse_conn *fc = NULL;
1436 struct fuse_mount *fm;
1437 unsigned int virtqueue_size;
1440 /* This gets a reference on virtio_fs object. This ptr gets installed
1441 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1442 * to drop the reference to this object.
1444 fs = virtio_fs_find_instance(fsc->source);
1446 pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1450 virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1451 if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1455 fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
1459 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1463 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
1464 fc->release = fuse_free_conn;
1465 fc->delete_stale = true;
1466 fc->auto_submounts = true;
1468 /* Tell FUSE to split requests that exceed the virtqueue's size */
1469 fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1470 virtqueue_size - FUSE_HEADER_OVERHEAD);
1472 fsc->s_fs_info = fm;
1473 sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super);
1479 err = virtio_fs_fill_super(sb, fsc);
1482 sb->s_fs_info = NULL;
1483 deactivate_locked_super(sb);
1487 sb->s_flags |= SB_ACTIVE;
1491 fsc->root = dget(sb->s_root);
1496 mutex_lock(&virtio_fs_mutex);
1498 mutex_unlock(&virtio_fs_mutex);
1502 static const struct fs_context_operations virtio_fs_context_ops = {
1503 .free = virtio_fs_free_fc,
1504 .parse_param = virtio_fs_parse_param,
1505 .get_tree = virtio_fs_get_tree,
1508 static int virtio_fs_init_fs_context(struct fs_context *fsc)
1510 struct fuse_fs_context *ctx;
1512 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1515 fsc->fs_private = ctx;
1516 fsc->ops = &virtio_fs_context_ops;
1520 static struct file_system_type virtio_fs_type = {
1521 .owner = THIS_MODULE,
1523 .init_fs_context = virtio_fs_init_fs_context,
1524 .kill_sb = virtio_kill_sb,
1527 static int __init virtio_fs_init(void)
1531 ret = register_virtio_driver(&virtio_fs_driver);
1535 ret = register_filesystem(&virtio_fs_type);
1537 unregister_virtio_driver(&virtio_fs_driver);
1543 module_init(virtio_fs_init);
1545 static void __exit virtio_fs_exit(void)
1547 unregister_filesystem(&virtio_fs_type);
1548 unregister_virtio_driver(&virtio_fs_driver);
1550 module_exit(virtio_fs_exit);
1552 MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1553 MODULE_DESCRIPTION("Virtio Filesystem");
1554 MODULE_LICENSE("GPL");
1555 MODULE_ALIAS_FS(KBUILD_MODNAME);
1556 MODULE_DEVICE_TABLE(virtio, id_table);