1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
34 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
37 struct vhost_dev vdev;
38 struct iommu_domain *domain;
39 struct vhost_virtqueue *vqs;
40 struct completion completion;
41 struct vdpa_device *vdpa;
48 struct eventfd_ctx *config_ctx;
50 struct vdpa_iova_range range;
53 static DEFINE_IDA(vhost_vdpa_ida);
55 static dev_t vhost_vdpa_major;
57 static void handle_vq_kick(struct vhost_work *work)
59 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
61 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 const struct vdpa_config_ops *ops = v->vdpa->config;
64 ops->kick_vq(v->vdpa, vq - v->vqs);
67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
69 struct vhost_virtqueue *vq = private;
70 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
73 eventfd_signal(call_ctx, 1);
78 static irqreturn_t vhost_vdpa_config_cb(void *private)
80 struct vhost_vdpa *v = private;
81 struct eventfd_ctx *config_ctx = v->config_ctx;
84 eventfd_signal(config_ctx, 1);
89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
91 struct vhost_virtqueue *vq = &v->vqs[qid];
92 const struct vdpa_config_ops *ops = v->vdpa->config;
93 struct vdpa_device *vdpa = v->vdpa;
99 irq = ops->get_vq_irq(vdpa, qid);
103 irq_bypass_unregister_producer(&vq->call_ctx.producer);
104 if (!vq->call_ctx.ctx)
107 vq->call_ctx.producer.token = vq->call_ctx.ctx;
108 vq->call_ctx.producer.irq = irq;
109 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
111 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
112 qid, vq->call_ctx.producer.token, ret);
115 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
117 struct vhost_virtqueue *vq = &v->vqs[qid];
119 irq_bypass_unregister_producer(&vq->call_ctx.producer);
122 static void vhost_vdpa_reset(struct vhost_vdpa *v)
124 struct vdpa_device *vdpa = v->vdpa;
130 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
132 struct vdpa_device *vdpa = v->vdpa;
133 const struct vdpa_config_ops *ops = vdpa->config;
136 device_id = ops->get_device_id(vdpa);
138 if (copy_to_user(argp, &device_id, sizeof(device_id)))
144 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
146 struct vdpa_device *vdpa = v->vdpa;
147 const struct vdpa_config_ops *ops = vdpa->config;
150 status = ops->get_status(vdpa);
152 if (copy_to_user(statusp, &status, sizeof(status)))
158 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
160 struct vdpa_device *vdpa = v->vdpa;
161 const struct vdpa_config_ops *ops = vdpa->config;
162 u8 status, status_old;
166 if (copy_from_user(&status, statusp, sizeof(status)))
169 status_old = ops->get_status(vdpa);
172 * Userspace shouldn't remove status bits unless reset the
175 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
178 ops->set_status(vdpa, status);
180 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
181 for (i = 0; i < nvqs; i++)
182 vhost_vdpa_setup_vq_irq(v, i);
184 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
185 for (i = 0; i < nvqs; i++)
186 vhost_vdpa_unsetup_vq_irq(v, i);
191 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
192 struct vhost_vdpa_config *c)
196 switch (v->virtio_id) {
198 size = sizeof(struct virtio_net_config);
202 if (c->len == 0 || c->off > size)
205 if (c->len > size - c->off)
211 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
212 struct vhost_vdpa_config __user *c)
214 struct vdpa_device *vdpa = v->vdpa;
215 struct vhost_vdpa_config config;
216 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
219 if (copy_from_user(&config, c, size))
221 if (vhost_vdpa_config_validate(v, &config))
223 buf = kvzalloc(config.len, GFP_KERNEL);
227 vdpa_get_config(vdpa, config.off, buf, config.len);
229 if (copy_to_user(c->buf, buf, config.len)) {
238 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
239 struct vhost_vdpa_config __user *c)
241 struct vdpa_device *vdpa = v->vdpa;
242 const struct vdpa_config_ops *ops = vdpa->config;
243 struct vhost_vdpa_config config;
244 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
247 if (copy_from_user(&config, c, size))
249 if (vhost_vdpa_config_validate(v, &config))
251 buf = kvzalloc(config.len, GFP_KERNEL);
255 if (copy_from_user(buf, c->buf, config.len)) {
260 ops->set_config(vdpa, config.off, buf, config.len);
266 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
268 struct vdpa_device *vdpa = v->vdpa;
269 const struct vdpa_config_ops *ops = vdpa->config;
272 features = ops->get_features(vdpa);
274 if (copy_to_user(featurep, &features, sizeof(features)))
280 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
282 struct vdpa_device *vdpa = v->vdpa;
283 const struct vdpa_config_ops *ops = vdpa->config;
287 * It's not allowed to change the features after they have
290 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
293 if (copy_from_user(&features, featurep, sizeof(features)))
296 if (vdpa_set_features(vdpa, features))
302 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
304 struct vdpa_device *vdpa = v->vdpa;
305 const struct vdpa_config_ops *ops = vdpa->config;
308 num = ops->get_vq_num_max(vdpa);
310 if (copy_to_user(argp, &num, sizeof(num)))
316 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
319 eventfd_ctx_put(v->config_ctx);
320 v->config_ctx = NULL;
324 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
326 struct vdpa_callback cb;
328 struct eventfd_ctx *ctx;
330 cb.callback = vhost_vdpa_config_cb;
332 if (copy_from_user(&fd, argp, sizeof(fd)))
335 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
336 swap(ctx, v->config_ctx);
338 if (!IS_ERR_OR_NULL(ctx))
339 eventfd_ctx_put(ctx);
341 if (IS_ERR(v->config_ctx)) {
342 long ret = PTR_ERR(v->config_ctx);
344 v->config_ctx = NULL;
348 v->vdpa->config->set_config_cb(v->vdpa, &cb);
353 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
355 struct vhost_vdpa_iova_range range = {
356 .first = v->range.first,
357 .last = v->range.last,
360 if (copy_to_user(argp, &range, sizeof(range)))
365 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
368 struct vdpa_device *vdpa = v->vdpa;
369 const struct vdpa_config_ops *ops = vdpa->config;
370 struct vdpa_vq_state vq_state;
371 struct vdpa_callback cb;
372 struct vhost_virtqueue *vq;
373 struct vhost_vring_state s;
377 r = get_user(idx, (u32 __user *)argp);
384 idx = array_index_nospec(idx, v->nvqs);
388 case VHOST_VDPA_SET_VRING_ENABLE:
389 if (copy_from_user(&s, argp, sizeof(s)))
391 ops->set_vq_ready(vdpa, idx, s.num);
393 case VHOST_GET_VRING_BASE:
394 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
398 vq->last_avail_idx = vq_state.avail_index;
402 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
407 case VHOST_SET_VRING_ADDR:
408 if (ops->set_vq_address(vdpa, idx,
409 (u64)(uintptr_t)vq->desc,
410 (u64)(uintptr_t)vq->avail,
411 (u64)(uintptr_t)vq->used))
415 case VHOST_SET_VRING_BASE:
416 vq_state.avail_index = vq->last_avail_idx;
417 if (ops->set_vq_state(vdpa, idx, &vq_state))
421 case VHOST_SET_VRING_CALL:
422 if (vq->call_ctx.ctx) {
423 cb.callback = vhost_vdpa_virtqueue_cb;
429 ops->set_vq_cb(vdpa, idx, &cb);
430 vhost_vdpa_setup_vq_irq(v, idx);
433 case VHOST_SET_VRING_NUM:
434 ops->set_vq_num(vdpa, idx, vq->num);
441 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
442 unsigned int cmd, unsigned long arg)
444 struct vhost_vdpa *v = filep->private_data;
445 struct vhost_dev *d = &v->vdev;
446 void __user *argp = (void __user *)arg;
447 u64 __user *featurep = argp;
451 if (cmd == VHOST_SET_BACKEND_FEATURES) {
452 if (copy_from_user(&features, featurep, sizeof(features)))
454 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
456 vhost_set_backend_features(&v->vdev, features);
460 mutex_lock(&d->mutex);
463 case VHOST_VDPA_GET_DEVICE_ID:
464 r = vhost_vdpa_get_device_id(v, argp);
466 case VHOST_VDPA_GET_STATUS:
467 r = vhost_vdpa_get_status(v, argp);
469 case VHOST_VDPA_SET_STATUS:
470 r = vhost_vdpa_set_status(v, argp);
472 case VHOST_VDPA_GET_CONFIG:
473 r = vhost_vdpa_get_config(v, argp);
475 case VHOST_VDPA_SET_CONFIG:
476 r = vhost_vdpa_set_config(v, argp);
478 case VHOST_GET_FEATURES:
479 r = vhost_vdpa_get_features(v, argp);
481 case VHOST_SET_FEATURES:
482 r = vhost_vdpa_set_features(v, argp);
484 case VHOST_VDPA_GET_VRING_NUM:
485 r = vhost_vdpa_get_vring_num(v, argp);
487 case VHOST_SET_LOG_BASE:
488 case VHOST_SET_LOG_FD:
491 case VHOST_VDPA_SET_CONFIG_CALL:
492 r = vhost_vdpa_set_config_call(v, argp);
494 case VHOST_GET_BACKEND_FEATURES:
495 features = VHOST_VDPA_BACKEND_FEATURES;
496 if (copy_to_user(featurep, &features, sizeof(features)))
499 case VHOST_VDPA_GET_IOVA_RANGE:
500 r = vhost_vdpa_get_iova_range(v, argp);
503 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
504 if (r == -ENOIOCTLCMD)
505 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
509 mutex_unlock(&d->mutex);
513 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
515 struct vhost_dev *dev = &v->vdev;
516 struct vhost_iotlb *iotlb = dev->iotlb;
517 struct vhost_iotlb_map *map;
519 unsigned long pfn, pinned;
521 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
522 pinned = map->size >> PAGE_SHIFT;
523 for (pfn = map->addr >> PAGE_SHIFT;
524 pinned > 0; pfn++, pinned--) {
525 page = pfn_to_page(pfn);
526 if (map->perm & VHOST_ACCESS_WO)
527 set_page_dirty_lock(page);
528 unpin_user_page(page);
530 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
531 vhost_iotlb_map_free(iotlb, map);
535 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
537 struct vhost_dev *dev = &v->vdev;
539 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
544 static int perm_to_iommu_flags(u32 perm)
549 case VHOST_ACCESS_WO:
550 flags |= IOMMU_WRITE;
552 case VHOST_ACCESS_RO:
555 case VHOST_ACCESS_RW:
556 flags |= (IOMMU_WRITE | IOMMU_READ);
559 WARN(1, "invalidate vhost IOTLB permission\n");
563 return flags | IOMMU_CACHE;
566 static int vhost_vdpa_map(struct vhost_vdpa *v,
567 u64 iova, u64 size, u64 pa, u32 perm)
569 struct vhost_dev *dev = &v->vdev;
570 struct vdpa_device *vdpa = v->vdpa;
571 const struct vdpa_config_ops *ops = vdpa->config;
574 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
580 r = ops->dma_map(vdpa, iova, size, pa, perm);
581 } else if (ops->set_map) {
583 r = ops->set_map(vdpa, dev->iotlb);
585 r = iommu_map(v->domain, iova, pa, size,
586 perm_to_iommu_flags(perm));
590 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
592 atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
597 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
599 struct vhost_dev *dev = &v->vdev;
600 struct vdpa_device *vdpa = v->vdpa;
601 const struct vdpa_config_ops *ops = vdpa->config;
603 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
606 ops->dma_unmap(vdpa, iova, size);
607 } else if (ops->set_map) {
609 ops->set_map(vdpa, dev->iotlb);
611 iommu_unmap(v->domain, iova, size);
615 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
616 struct vhost_iotlb_msg *msg)
618 struct vhost_dev *dev = &v->vdev;
619 struct vhost_iotlb *iotlb = dev->iotlb;
620 struct page **page_list;
621 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
622 unsigned int gup_flags = FOLL_LONGTERM;
623 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
624 unsigned long lock_limit, sz2pin, nchunks, i;
625 u64 iova = msg->iova;
629 if (msg->iova < v->range.first || !msg->size ||
630 msg->iova > U64_MAX - msg->size + 1 ||
631 msg->iova + msg->size - 1 > v->range.last)
634 if (vhost_iotlb_itree_first(iotlb, msg->iova,
635 msg->iova + msg->size - 1))
638 /* Limit the use of memory for bookkeeping */
639 page_list = (struct page **) __get_free_page(GFP_KERNEL);
643 if (msg->perm & VHOST_ACCESS_WO)
644 gup_flags |= FOLL_WRITE;
646 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
652 mmap_read_lock(dev->mm);
654 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
655 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
660 cur_base = msg->uaddr & PAGE_MASK;
665 sz2pin = min_t(unsigned long, npages, list_size);
666 pinned = pin_user_pages(cur_base, sz2pin,
667 gup_flags, page_list, NULL);
668 if (sz2pin != pinned) {
672 unpin_user_pages(page_list, pinned);
680 map_pfn = page_to_pfn(page_list[0]);
682 for (i = 0; i < pinned; i++) {
683 unsigned long this_pfn = page_to_pfn(page_list[i]);
686 if (last_pfn && (this_pfn != last_pfn + 1)) {
687 /* Pin a contiguous chunk of memory */
688 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
689 ret = vhost_vdpa_map(v, iova, csize,
690 map_pfn << PAGE_SHIFT,
694 * Unpin the pages that are left unmapped
695 * from this point on in the current
696 * page_list. The remaining outstanding
697 * ones which may stride across several
698 * chunks will be covered in the common
699 * error path subsequently.
701 unpin_user_pages(&page_list[i],
714 cur_base += pinned << PAGE_SHIFT;
718 /* Pin the rest chunk */
719 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
720 map_pfn << PAGE_SHIFT, msg->perm);
727 * Unpin the outstanding pages which are yet to be
728 * mapped but haven't due to vdpa_map() or
729 * pin_user_pages() failure.
731 * Mapped pages are accounted in vdpa_map(), hence
732 * the corresponding unpinning will be handled by
736 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
737 unpin_user_page(pfn_to_page(pfn));
739 vhost_vdpa_unmap(v, msg->iova, msg->size);
742 mmap_read_unlock(dev->mm);
744 free_page((unsigned long)page_list);
748 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
749 struct vhost_iotlb_msg *msg)
751 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
752 struct vdpa_device *vdpa = v->vdpa;
753 const struct vdpa_config_ops *ops = vdpa->config;
756 mutex_lock(&dev->mutex);
758 r = vhost_dev_check_owner(dev);
763 case VHOST_IOTLB_UPDATE:
764 r = vhost_vdpa_process_iotlb_update(v, msg);
766 case VHOST_IOTLB_INVALIDATE:
767 vhost_vdpa_unmap(v, msg->iova, msg->size);
769 case VHOST_IOTLB_BATCH_BEGIN:
772 case VHOST_IOTLB_BATCH_END:
773 if (v->in_batch && ops->set_map)
774 ops->set_map(vdpa, dev->iotlb);
782 mutex_unlock(&dev->mutex);
787 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
788 struct iov_iter *from)
790 struct file *file = iocb->ki_filp;
791 struct vhost_vdpa *v = file->private_data;
792 struct vhost_dev *dev = &v->vdev;
794 return vhost_chr_write_iter(dev, from);
797 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
799 struct vdpa_device *vdpa = v->vdpa;
800 const struct vdpa_config_ops *ops = vdpa->config;
801 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
802 struct bus_type *bus;
805 /* Device want to do DMA by itself */
806 if (ops->set_map || ops->dma_map)
813 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
816 v->domain = iommu_domain_alloc(bus);
820 ret = iommu_attach_device(v->domain, dma_dev);
827 iommu_domain_free(v->domain);
831 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
833 struct vdpa_device *vdpa = v->vdpa;
834 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
837 iommu_detach_device(v->domain, dma_dev);
838 iommu_domain_free(v->domain);
844 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
846 struct vdpa_iova_range *range = &v->range;
847 struct iommu_domain_geometry geo;
848 struct vdpa_device *vdpa = v->vdpa;
849 const struct vdpa_config_ops *ops = vdpa->config;
851 if (ops->get_iova_range) {
852 *range = ops->get_iova_range(vdpa);
853 } else if (v->domain &&
854 !iommu_domain_get_attr(v->domain,
855 DOMAIN_ATTR_GEOMETRY, &geo) &&
856 geo.force_aperture) {
857 range->first = geo.aperture_start;
858 range->last = geo.aperture_end;
861 range->last = ULLONG_MAX;
865 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
867 struct vhost_vdpa *v;
868 struct vhost_dev *dev;
869 struct vhost_virtqueue **vqs;
870 int nvqs, i, r, opened;
872 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
874 opened = atomic_cmpxchg(&v->opened, 0, 1);
881 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
888 for (i = 0; i < nvqs; i++) {
890 vqs[i]->handle_kick = handle_vq_kick;
892 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
893 vhost_vdpa_process_iotlb_msg);
895 dev->iotlb = vhost_iotlb_alloc(0, 0);
901 r = vhost_vdpa_alloc_domain(v);
905 vhost_vdpa_set_iova_range(v);
907 filep->private_data = v;
912 vhost_dev_cleanup(&v->vdev);
915 atomic_dec(&v->opened);
919 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
923 for (i = 0; i < v->nvqs; i++)
924 vhost_vdpa_unsetup_vq_irq(v, i);
927 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
929 struct vhost_vdpa *v = filep->private_data;
930 struct vhost_dev *d = &v->vdev;
932 mutex_lock(&d->mutex);
933 filep->private_data = NULL;
935 vhost_dev_stop(&v->vdev);
936 vhost_vdpa_iotlb_free(v);
937 vhost_vdpa_free_domain(v);
938 vhost_vdpa_config_put(v);
939 vhost_vdpa_clean_irq(v);
940 vhost_dev_cleanup(&v->vdev);
942 mutex_unlock(&d->mutex);
944 atomic_dec(&v->opened);
945 complete(&v->completion);
951 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
953 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
954 struct vdpa_device *vdpa = v->vdpa;
955 const struct vdpa_config_ops *ops = vdpa->config;
956 struct vdpa_notification_area notify;
957 struct vm_area_struct *vma = vmf->vma;
958 u16 index = vma->vm_pgoff;
960 notify = ops->get_vq_notification(vdpa, index);
962 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
963 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
964 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
966 return VM_FAULT_SIGBUS;
968 return VM_FAULT_NOPAGE;
971 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
972 .fault = vhost_vdpa_fault,
975 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
977 struct vhost_vdpa *v = vma->vm_file->private_data;
978 struct vdpa_device *vdpa = v->vdpa;
979 const struct vdpa_config_ops *ops = vdpa->config;
980 struct vdpa_notification_area notify;
981 unsigned long index = vma->vm_pgoff;
983 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
985 if ((vma->vm_flags & VM_SHARED) == 0)
987 if (vma->vm_flags & VM_READ)
991 if (!ops->get_vq_notification)
994 /* To be safe and easily modelled by userspace, We only
995 * support the doorbell which sits on the page boundary and
996 * does not share the page with other registers.
998 notify = ops->get_vq_notification(vdpa, index);
999 if (notify.addr & (PAGE_SIZE - 1))
1001 if (vma->vm_end - vma->vm_start != notify.size)
1004 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1005 vma->vm_ops = &vhost_vdpa_vm_ops;
1008 #endif /* CONFIG_MMU */
1010 static const struct file_operations vhost_vdpa_fops = {
1011 .owner = THIS_MODULE,
1012 .open = vhost_vdpa_open,
1013 .release = vhost_vdpa_release,
1014 .write_iter = vhost_vdpa_chr_write_iter,
1015 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1017 .mmap = vhost_vdpa_mmap,
1018 #endif /* CONFIG_MMU */
1019 .compat_ioctl = compat_ptr_ioctl,
1022 static void vhost_vdpa_release_dev(struct device *device)
1024 struct vhost_vdpa *v =
1025 container_of(device, struct vhost_vdpa, dev);
1027 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1032 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1034 const struct vdpa_config_ops *ops = vdpa->config;
1035 struct vhost_vdpa *v;
1039 /* Currently, we only accept the network devices. */
1040 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
1043 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1047 minor = ida_simple_get(&vhost_vdpa_ida, 0,
1048 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1054 atomic_set(&v->opened, 0);
1057 v->nvqs = vdpa->nvqs;
1058 v->virtio_id = ops->get_device_id(vdpa);
1060 device_initialize(&v->dev);
1061 v->dev.release = vhost_vdpa_release_dev;
1062 v->dev.parent = &vdpa->dev;
1063 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1064 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1071 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1075 cdev_init(&v->cdev, &vhost_vdpa_fops);
1076 v->cdev.owner = THIS_MODULE;
1078 r = cdev_device_add(&v->cdev, &v->dev);
1082 init_completion(&v->completion);
1083 vdpa_set_drvdata(vdpa, v);
1088 put_device(&v->dev);
1092 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1094 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1097 cdev_device_del(&v->cdev, &v->dev);
1100 opened = atomic_cmpxchg(&v->opened, 0, 1);
1103 wait_for_completion(&v->completion);
1106 put_device(&v->dev);
1109 static struct vdpa_driver vhost_vdpa_driver = {
1111 .name = "vhost_vdpa",
1113 .probe = vhost_vdpa_probe,
1114 .remove = vhost_vdpa_remove,
1117 static int __init vhost_vdpa_init(void)
1121 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1124 goto err_alloc_chrdev;
1126 r = vdpa_register_driver(&vhost_vdpa_driver);
1128 goto err_vdpa_register_driver;
1132 err_vdpa_register_driver:
1133 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1137 module_init(vhost_vdpa_init);
1139 static void __exit vhost_vdpa_exit(void)
1141 vdpa_unregister_driver(&vhost_vdpa_driver);
1142 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1144 module_exit(vhost_vdpa_exit);
1146 MODULE_VERSION("0.0.1");
1147 MODULE_LICENSE("GPL v2");
1148 MODULE_AUTHOR("Intel Corporation");
1149 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");