GNU Linux-libre 5.15.54-gnu
[releases.git] / drivers / vhost / vdpa.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Intel Corporation.
4  * Copyright (C) 2020 Red Hat, Inc.
5  *
6  * Author: Tiwei Bie <tiwei.bie@intel.com>
7  *         Jason Wang <jasowang@redhat.com>
8  *
9  * Thanks Michael S. Tsirkin for the valuable comments and
10  * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
11  * their supports.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/mm.h>
19 #include <linux/slab.h>
20 #include <linux/iommu.h>
21 #include <linux/uuid.h>
22 #include <linux/vdpa.h>
23 #include <linux/nospec.h>
24 #include <linux/vhost.h>
25
26 #include "vhost.h"
27
28 enum {
29         VHOST_VDPA_BACKEND_FEATURES =
30         (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31         (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
32 };
33
34 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
35
36 struct vhost_vdpa {
37         struct vhost_dev vdev;
38         struct iommu_domain *domain;
39         struct vhost_virtqueue *vqs;
40         struct completion completion;
41         struct vdpa_device *vdpa;
42         struct device dev;
43         struct cdev cdev;
44         atomic_t opened;
45         int nvqs;
46         int virtio_id;
47         int minor;
48         struct eventfd_ctx *config_ctx;
49         int in_batch;
50         struct vdpa_iova_range range;
51 };
52
53 static DEFINE_IDA(vhost_vdpa_ida);
54
55 static dev_t vhost_vdpa_major;
56
57 static void handle_vq_kick(struct vhost_work *work)
58 {
59         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
60                                                   poll.work);
61         struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62         const struct vdpa_config_ops *ops = v->vdpa->config;
63
64         ops->kick_vq(v->vdpa, vq - v->vqs);
65 }
66
67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
68 {
69         struct vhost_virtqueue *vq = private;
70         struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
71
72         if (call_ctx)
73                 eventfd_signal(call_ctx, 1);
74
75         return IRQ_HANDLED;
76 }
77
78 static irqreturn_t vhost_vdpa_config_cb(void *private)
79 {
80         struct vhost_vdpa *v = private;
81         struct eventfd_ctx *config_ctx = v->config_ctx;
82
83         if (config_ctx)
84                 eventfd_signal(config_ctx, 1);
85
86         return IRQ_HANDLED;
87 }
88
89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
90 {
91         struct vhost_virtqueue *vq = &v->vqs[qid];
92         const struct vdpa_config_ops *ops = v->vdpa->config;
93         struct vdpa_device *vdpa = v->vdpa;
94         int ret, irq;
95
96         if (!ops->get_vq_irq)
97                 return;
98
99         irq = ops->get_vq_irq(vdpa, qid);
100         if (irq < 0)
101                 return;
102
103         irq_bypass_unregister_producer(&vq->call_ctx.producer);
104         if (!vq->call_ctx.ctx)
105                 return;
106
107         vq->call_ctx.producer.token = vq->call_ctx.ctx;
108         vq->call_ctx.producer.irq = irq;
109         ret = irq_bypass_register_producer(&vq->call_ctx.producer);
110         if (unlikely(ret))
111                 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret =  %d\n",
112                          qid, vq->call_ctx.producer.token, ret);
113 }
114
115 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
116 {
117         struct vhost_virtqueue *vq = &v->vqs[qid];
118
119         irq_bypass_unregister_producer(&vq->call_ctx.producer);
120 }
121
122 static int vhost_vdpa_reset(struct vhost_vdpa *v)
123 {
124         struct vdpa_device *vdpa = v->vdpa;
125
126         v->in_batch = 0;
127
128         return vdpa_reset(vdpa);
129 }
130
131 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
132 {
133         struct vdpa_device *vdpa = v->vdpa;
134         const struct vdpa_config_ops *ops = vdpa->config;
135         u32 device_id;
136
137         device_id = ops->get_device_id(vdpa);
138
139         if (copy_to_user(argp, &device_id, sizeof(device_id)))
140                 return -EFAULT;
141
142         return 0;
143 }
144
145 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
146 {
147         struct vdpa_device *vdpa = v->vdpa;
148         const struct vdpa_config_ops *ops = vdpa->config;
149         u8 status;
150
151         status = ops->get_status(vdpa);
152
153         if (copy_to_user(statusp, &status, sizeof(status)))
154                 return -EFAULT;
155
156         return 0;
157 }
158
159 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
160 {
161         struct vdpa_device *vdpa = v->vdpa;
162         const struct vdpa_config_ops *ops = vdpa->config;
163         u8 status, status_old;
164         int ret, nvqs = v->nvqs;
165         u16 i;
166
167         if (copy_from_user(&status, statusp, sizeof(status)))
168                 return -EFAULT;
169
170         status_old = ops->get_status(vdpa);
171
172         /*
173          * Userspace shouldn't remove status bits unless reset the
174          * status to 0.
175          */
176         if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
177                 return -EINVAL;
178
179         if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
180                 for (i = 0; i < nvqs; i++)
181                         vhost_vdpa_unsetup_vq_irq(v, i);
182
183         if (status == 0) {
184                 ret = ops->reset(vdpa);
185                 if (ret)
186                         return ret;
187         } else
188                 ops->set_status(vdpa, status);
189
190         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
191                 for (i = 0; i < nvqs; i++)
192                         vhost_vdpa_setup_vq_irq(v, i);
193
194         return 0;
195 }
196
197 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
198                                       struct vhost_vdpa_config *c)
199 {
200         struct vdpa_device *vdpa = v->vdpa;
201         long size = vdpa->config->get_config_size(vdpa);
202
203         if (c->len == 0 || c->off > size)
204                 return -EINVAL;
205
206         if (c->len > size - c->off)
207                 return -E2BIG;
208
209         return 0;
210 }
211
212 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
213                                   struct vhost_vdpa_config __user *c)
214 {
215         struct vdpa_device *vdpa = v->vdpa;
216         struct vhost_vdpa_config config;
217         unsigned long size = offsetof(struct vhost_vdpa_config, buf);
218         u8 *buf;
219
220         if (copy_from_user(&config, c, size))
221                 return -EFAULT;
222         if (vhost_vdpa_config_validate(v, &config))
223                 return -EINVAL;
224         buf = kvzalloc(config.len, GFP_KERNEL);
225         if (!buf)
226                 return -ENOMEM;
227
228         vdpa_get_config(vdpa, config.off, buf, config.len);
229
230         if (copy_to_user(c->buf, buf, config.len)) {
231                 kvfree(buf);
232                 return -EFAULT;
233         }
234
235         kvfree(buf);
236         return 0;
237 }
238
239 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
240                                   struct vhost_vdpa_config __user *c)
241 {
242         struct vdpa_device *vdpa = v->vdpa;
243         const struct vdpa_config_ops *ops = vdpa->config;
244         struct vhost_vdpa_config config;
245         unsigned long size = offsetof(struct vhost_vdpa_config, buf);
246         u8 *buf;
247
248         if (copy_from_user(&config, c, size))
249                 return -EFAULT;
250         if (vhost_vdpa_config_validate(v, &config))
251                 return -EINVAL;
252
253         buf = vmemdup_user(c->buf, config.len);
254         if (IS_ERR(buf))
255                 return PTR_ERR(buf);
256
257         ops->set_config(vdpa, config.off, buf, config.len);
258
259         kvfree(buf);
260         return 0;
261 }
262
263 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
264 {
265         struct vdpa_device *vdpa = v->vdpa;
266         const struct vdpa_config_ops *ops = vdpa->config;
267         u64 features;
268
269         features = ops->get_features(vdpa);
270
271         if (copy_to_user(featurep, &features, sizeof(features)))
272                 return -EFAULT;
273
274         return 0;
275 }
276
277 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
278 {
279         struct vdpa_device *vdpa = v->vdpa;
280         const struct vdpa_config_ops *ops = vdpa->config;
281         u64 features;
282
283         /*
284          * It's not allowed to change the features after they have
285          * been negotiated.
286          */
287         if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
288                 return -EBUSY;
289
290         if (copy_from_user(&features, featurep, sizeof(features)))
291                 return -EFAULT;
292
293         if (vdpa_set_features(vdpa, features))
294                 return -EINVAL;
295
296         return 0;
297 }
298
299 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
300 {
301         struct vdpa_device *vdpa = v->vdpa;
302         const struct vdpa_config_ops *ops = vdpa->config;
303         u16 num;
304
305         num = ops->get_vq_num_max(vdpa);
306
307         if (copy_to_user(argp, &num, sizeof(num)))
308                 return -EFAULT;
309
310         return 0;
311 }
312
313 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
314 {
315         if (v->config_ctx) {
316                 eventfd_ctx_put(v->config_ctx);
317                 v->config_ctx = NULL;
318         }
319 }
320
321 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
322 {
323         struct vdpa_callback cb;
324         int fd;
325         struct eventfd_ctx *ctx;
326
327         cb.callback = vhost_vdpa_config_cb;
328         cb.private = v;
329         if (copy_from_user(&fd, argp, sizeof(fd)))
330                 return  -EFAULT;
331
332         ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
333         swap(ctx, v->config_ctx);
334
335         if (!IS_ERR_OR_NULL(ctx))
336                 eventfd_ctx_put(ctx);
337
338         if (IS_ERR(v->config_ctx)) {
339                 long ret = PTR_ERR(v->config_ctx);
340
341                 v->config_ctx = NULL;
342                 return ret;
343         }
344
345         v->vdpa->config->set_config_cb(v->vdpa, &cb);
346
347         return 0;
348 }
349
350 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
351 {
352         struct vhost_vdpa_iova_range range = {
353                 .first = v->range.first,
354                 .last = v->range.last,
355         };
356
357         if (copy_to_user(argp, &range, sizeof(range)))
358                 return -EFAULT;
359         return 0;
360 }
361
362 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
363                                    void __user *argp)
364 {
365         struct vdpa_device *vdpa = v->vdpa;
366         const struct vdpa_config_ops *ops = vdpa->config;
367         struct vdpa_vq_state vq_state;
368         struct vdpa_callback cb;
369         struct vhost_virtqueue *vq;
370         struct vhost_vring_state s;
371         u32 idx;
372         long r;
373
374         r = get_user(idx, (u32 __user *)argp);
375         if (r < 0)
376                 return r;
377
378         if (idx >= v->nvqs)
379                 return -ENOBUFS;
380
381         idx = array_index_nospec(idx, v->nvqs);
382         vq = &v->vqs[idx];
383
384         switch (cmd) {
385         case VHOST_VDPA_SET_VRING_ENABLE:
386                 if (copy_from_user(&s, argp, sizeof(s)))
387                         return -EFAULT;
388                 ops->set_vq_ready(vdpa, idx, s.num);
389                 return 0;
390         case VHOST_GET_VRING_BASE:
391                 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
392                 if (r)
393                         return r;
394
395                 vq->last_avail_idx = vq_state.split.avail_index;
396                 break;
397         }
398
399         r = vhost_vring_ioctl(&v->vdev, cmd, argp);
400         if (r)
401                 return r;
402
403         switch (cmd) {
404         case VHOST_SET_VRING_ADDR:
405                 if (ops->set_vq_address(vdpa, idx,
406                                         (u64)(uintptr_t)vq->desc,
407                                         (u64)(uintptr_t)vq->avail,
408                                         (u64)(uintptr_t)vq->used))
409                         r = -EINVAL;
410                 break;
411
412         case VHOST_SET_VRING_BASE:
413                 vq_state.split.avail_index = vq->last_avail_idx;
414                 if (ops->set_vq_state(vdpa, idx, &vq_state))
415                         r = -EINVAL;
416                 break;
417
418         case VHOST_SET_VRING_CALL:
419                 if (vq->call_ctx.ctx) {
420                         cb.callback = vhost_vdpa_virtqueue_cb;
421                         cb.private = vq;
422                 } else {
423                         cb.callback = NULL;
424                         cb.private = NULL;
425                 }
426                 ops->set_vq_cb(vdpa, idx, &cb);
427                 vhost_vdpa_setup_vq_irq(v, idx);
428                 break;
429
430         case VHOST_SET_VRING_NUM:
431                 ops->set_vq_num(vdpa, idx, vq->num);
432                 break;
433         }
434
435         return r;
436 }
437
438 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
439                                       unsigned int cmd, unsigned long arg)
440 {
441         struct vhost_vdpa *v = filep->private_data;
442         struct vhost_dev *d = &v->vdev;
443         void __user *argp = (void __user *)arg;
444         u64 __user *featurep = argp;
445         u64 features;
446         long r = 0;
447
448         if (cmd == VHOST_SET_BACKEND_FEATURES) {
449                 if (copy_from_user(&features, featurep, sizeof(features)))
450                         return -EFAULT;
451                 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
452                         return -EOPNOTSUPP;
453                 vhost_set_backend_features(&v->vdev, features);
454                 return 0;
455         }
456
457         mutex_lock(&d->mutex);
458
459         switch (cmd) {
460         case VHOST_VDPA_GET_DEVICE_ID:
461                 r = vhost_vdpa_get_device_id(v, argp);
462                 break;
463         case VHOST_VDPA_GET_STATUS:
464                 r = vhost_vdpa_get_status(v, argp);
465                 break;
466         case VHOST_VDPA_SET_STATUS:
467                 r = vhost_vdpa_set_status(v, argp);
468                 break;
469         case VHOST_VDPA_GET_CONFIG:
470                 r = vhost_vdpa_get_config(v, argp);
471                 break;
472         case VHOST_VDPA_SET_CONFIG:
473                 r = vhost_vdpa_set_config(v, argp);
474                 break;
475         case VHOST_GET_FEATURES:
476                 r = vhost_vdpa_get_features(v, argp);
477                 break;
478         case VHOST_SET_FEATURES:
479                 r = vhost_vdpa_set_features(v, argp);
480                 break;
481         case VHOST_VDPA_GET_VRING_NUM:
482                 r = vhost_vdpa_get_vring_num(v, argp);
483                 break;
484         case VHOST_SET_LOG_BASE:
485         case VHOST_SET_LOG_FD:
486                 r = -ENOIOCTLCMD;
487                 break;
488         case VHOST_VDPA_SET_CONFIG_CALL:
489                 r = vhost_vdpa_set_config_call(v, argp);
490                 break;
491         case VHOST_GET_BACKEND_FEATURES:
492                 features = VHOST_VDPA_BACKEND_FEATURES;
493                 if (copy_to_user(featurep, &features, sizeof(features)))
494                         r = -EFAULT;
495                 break;
496         case VHOST_VDPA_GET_IOVA_RANGE:
497                 r = vhost_vdpa_get_iova_range(v, argp);
498                 break;
499         default:
500                 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
501                 if (r == -ENOIOCTLCMD)
502                         r = vhost_vdpa_vring_ioctl(v, cmd, argp);
503                 break;
504         }
505
506         mutex_unlock(&d->mutex);
507         return r;
508 }
509
510 static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
511 {
512         struct vhost_dev *dev = &v->vdev;
513         struct vhost_iotlb *iotlb = dev->iotlb;
514         struct vhost_iotlb_map *map;
515         struct page *page;
516         unsigned long pfn, pinned;
517
518         while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
519                 pinned = PFN_DOWN(map->size);
520                 for (pfn = PFN_DOWN(map->addr);
521                      pinned > 0; pfn++, pinned--) {
522                         page = pfn_to_page(pfn);
523                         if (map->perm & VHOST_ACCESS_WO)
524                                 set_page_dirty_lock(page);
525                         unpin_user_page(page);
526                 }
527                 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
528                 vhost_iotlb_map_free(iotlb, map);
529         }
530 }
531
532 static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
533 {
534         struct vhost_dev *dev = &v->vdev;
535         struct vhost_iotlb *iotlb = dev->iotlb;
536         struct vhost_iotlb_map *map;
537         struct vdpa_map_file *map_file;
538
539         while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
540                 map_file = (struct vdpa_map_file *)map->opaque;
541                 fput(map_file->file);
542                 kfree(map_file);
543                 vhost_iotlb_map_free(iotlb, map);
544         }
545 }
546
547 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
548 {
549         struct vdpa_device *vdpa = v->vdpa;
550
551         if (vdpa->use_va)
552                 return vhost_vdpa_va_unmap(v, start, last);
553
554         return vhost_vdpa_pa_unmap(v, start, last);
555 }
556
557 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
558 {
559         struct vhost_dev *dev = &v->vdev;
560
561         vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
562         kfree(dev->iotlb);
563         dev->iotlb = NULL;
564 }
565
566 static int perm_to_iommu_flags(u32 perm)
567 {
568         int flags = 0;
569
570         switch (perm) {
571         case VHOST_ACCESS_WO:
572                 flags |= IOMMU_WRITE;
573                 break;
574         case VHOST_ACCESS_RO:
575                 flags |= IOMMU_READ;
576                 break;
577         case VHOST_ACCESS_RW:
578                 flags |= (IOMMU_WRITE | IOMMU_READ);
579                 break;
580         default:
581                 WARN(1, "invalidate vhost IOTLB permission\n");
582                 break;
583         }
584
585         return flags | IOMMU_CACHE;
586 }
587
588 static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
589                           u64 size, u64 pa, u32 perm, void *opaque)
590 {
591         struct vhost_dev *dev = &v->vdev;
592         struct vdpa_device *vdpa = v->vdpa;
593         const struct vdpa_config_ops *ops = vdpa->config;
594         int r = 0;
595
596         r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
597                                       pa, perm, opaque);
598         if (r)
599                 return r;
600
601         if (ops->dma_map) {
602                 r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
603         } else if (ops->set_map) {
604                 if (!v->in_batch)
605                         r = ops->set_map(vdpa, dev->iotlb);
606         } else {
607                 r = iommu_map(v->domain, iova, pa, size,
608                               perm_to_iommu_flags(perm));
609         }
610         if (r) {
611                 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
612                 return r;
613         }
614
615         if (!vdpa->use_va)
616                 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
617
618         return 0;
619 }
620
621 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
622 {
623         struct vhost_dev *dev = &v->vdev;
624         struct vdpa_device *vdpa = v->vdpa;
625         const struct vdpa_config_ops *ops = vdpa->config;
626
627         vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
628
629         if (ops->dma_map) {
630                 ops->dma_unmap(vdpa, iova, size);
631         } else if (ops->set_map) {
632                 if (!v->in_batch)
633                         ops->set_map(vdpa, dev->iotlb);
634         } else {
635                 iommu_unmap(v->domain, iova, size);
636         }
637 }
638
639 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
640                              u64 iova, u64 size, u64 uaddr, u32 perm)
641 {
642         struct vhost_dev *dev = &v->vdev;
643         u64 offset, map_size, map_iova = iova;
644         struct vdpa_map_file *map_file;
645         struct vm_area_struct *vma;
646         int ret = 0;
647
648         mmap_read_lock(dev->mm);
649
650         while (size) {
651                 vma = find_vma(dev->mm, uaddr);
652                 if (!vma) {
653                         ret = -EINVAL;
654                         break;
655                 }
656                 map_size = min(size, vma->vm_end - uaddr);
657                 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
658                         !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
659                         goto next;
660
661                 map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
662                 if (!map_file) {
663                         ret = -ENOMEM;
664                         break;
665                 }
666                 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
667                 map_file->offset = offset;
668                 map_file->file = get_file(vma->vm_file);
669                 ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
670                                      perm, map_file);
671                 if (ret) {
672                         fput(map_file->file);
673                         kfree(map_file);
674                         break;
675                 }
676 next:
677                 size -= map_size;
678                 uaddr += map_size;
679                 map_iova += map_size;
680         }
681         if (ret)
682                 vhost_vdpa_unmap(v, iova, map_iova - iova);
683
684         mmap_read_unlock(dev->mm);
685
686         return ret;
687 }
688
689 static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
690                              u64 iova, u64 size, u64 uaddr, u32 perm)
691 {
692         struct vhost_dev *dev = &v->vdev;
693         struct page **page_list;
694         unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
695         unsigned int gup_flags = FOLL_LONGTERM;
696         unsigned long npages, cur_base, map_pfn, last_pfn = 0;
697         unsigned long lock_limit, sz2pin, nchunks, i;
698         u64 start = iova;
699         long pinned;
700         int ret = 0;
701
702         /* Limit the use of memory for bookkeeping */
703         page_list = (struct page **) __get_free_page(GFP_KERNEL);
704         if (!page_list)
705                 return -ENOMEM;
706
707         if (perm & VHOST_ACCESS_WO)
708                 gup_flags |= FOLL_WRITE;
709
710         npages = PFN_UP(size + (iova & ~PAGE_MASK));
711         if (!npages) {
712                 ret = -EINVAL;
713                 goto free;
714         }
715
716         mmap_read_lock(dev->mm);
717
718         lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
719         if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
720                 ret = -ENOMEM;
721                 goto unlock;
722         }
723
724         cur_base = uaddr & PAGE_MASK;
725         iova &= PAGE_MASK;
726         nchunks = 0;
727
728         while (npages) {
729                 sz2pin = min_t(unsigned long, npages, list_size);
730                 pinned = pin_user_pages(cur_base, sz2pin,
731                                         gup_flags, page_list, NULL);
732                 if (sz2pin != pinned) {
733                         if (pinned < 0) {
734                                 ret = pinned;
735                         } else {
736                                 unpin_user_pages(page_list, pinned);
737                                 ret = -ENOMEM;
738                         }
739                         goto out;
740                 }
741                 nchunks++;
742
743                 if (!last_pfn)
744                         map_pfn = page_to_pfn(page_list[0]);
745
746                 for (i = 0; i < pinned; i++) {
747                         unsigned long this_pfn = page_to_pfn(page_list[i]);
748                         u64 csize;
749
750                         if (last_pfn && (this_pfn != last_pfn + 1)) {
751                                 /* Pin a contiguous chunk of memory */
752                                 csize = PFN_PHYS(last_pfn - map_pfn + 1);
753                                 ret = vhost_vdpa_map(v, iova, csize,
754                                                      PFN_PHYS(map_pfn),
755                                                      perm, NULL);
756                                 if (ret) {
757                                         /*
758                                          * Unpin the pages that are left unmapped
759                                          * from this point on in the current
760                                          * page_list. The remaining outstanding
761                                          * ones which may stride across several
762                                          * chunks will be covered in the common
763                                          * error path subsequently.
764                                          */
765                                         unpin_user_pages(&page_list[i],
766                                                          pinned - i);
767                                         goto out;
768                                 }
769
770                                 map_pfn = this_pfn;
771                                 iova += csize;
772                                 nchunks = 0;
773                         }
774
775                         last_pfn = this_pfn;
776                 }
777
778                 cur_base += PFN_PHYS(pinned);
779                 npages -= pinned;
780         }
781
782         /* Pin the rest chunk */
783         ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
784                              PFN_PHYS(map_pfn), perm, NULL);
785 out:
786         if (ret) {
787                 if (nchunks) {
788                         unsigned long pfn;
789
790                         /*
791                          * Unpin the outstanding pages which are yet to be
792                          * mapped but haven't due to vdpa_map() or
793                          * pin_user_pages() failure.
794                          *
795                          * Mapped pages are accounted in vdpa_map(), hence
796                          * the corresponding unpinning will be handled by
797                          * vdpa_unmap().
798                          */
799                         WARN_ON(!last_pfn);
800                         for (pfn = map_pfn; pfn <= last_pfn; pfn++)
801                                 unpin_user_page(pfn_to_page(pfn));
802                 }
803                 vhost_vdpa_unmap(v, start, size);
804         }
805 unlock:
806         mmap_read_unlock(dev->mm);
807 free:
808         free_page((unsigned long)page_list);
809         return ret;
810
811 }
812
813 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
814                                            struct vhost_iotlb_msg *msg)
815 {
816         struct vhost_dev *dev = &v->vdev;
817         struct vdpa_device *vdpa = v->vdpa;
818         struct vhost_iotlb *iotlb = dev->iotlb;
819
820         if (msg->iova < v->range.first || !msg->size ||
821             msg->iova > U64_MAX - msg->size + 1 ||
822             msg->iova + msg->size - 1 > v->range.last)
823                 return -EINVAL;
824
825         if (vhost_iotlb_itree_first(iotlb, msg->iova,
826                                     msg->iova + msg->size - 1))
827                 return -EEXIST;
828
829         if (vdpa->use_va)
830                 return vhost_vdpa_va_map(v, msg->iova, msg->size,
831                                          msg->uaddr, msg->perm);
832
833         return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
834                                  msg->perm);
835 }
836
837 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
838                                         struct vhost_iotlb_msg *msg)
839 {
840         struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
841         struct vdpa_device *vdpa = v->vdpa;
842         const struct vdpa_config_ops *ops = vdpa->config;
843         int r = 0;
844
845         mutex_lock(&dev->mutex);
846
847         r = vhost_dev_check_owner(dev);
848         if (r)
849                 goto unlock;
850
851         switch (msg->type) {
852         case VHOST_IOTLB_UPDATE:
853                 r = vhost_vdpa_process_iotlb_update(v, msg);
854                 break;
855         case VHOST_IOTLB_INVALIDATE:
856                 vhost_vdpa_unmap(v, msg->iova, msg->size);
857                 break;
858         case VHOST_IOTLB_BATCH_BEGIN:
859                 v->in_batch = true;
860                 break;
861         case VHOST_IOTLB_BATCH_END:
862                 if (v->in_batch && ops->set_map)
863                         ops->set_map(vdpa, dev->iotlb);
864                 v->in_batch = false;
865                 break;
866         default:
867                 r = -EINVAL;
868                 break;
869         }
870 unlock:
871         mutex_unlock(&dev->mutex);
872
873         return r;
874 }
875
876 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
877                                          struct iov_iter *from)
878 {
879         struct file *file = iocb->ki_filp;
880         struct vhost_vdpa *v = file->private_data;
881         struct vhost_dev *dev = &v->vdev;
882
883         return vhost_chr_write_iter(dev, from);
884 }
885
886 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
887 {
888         struct vdpa_device *vdpa = v->vdpa;
889         const struct vdpa_config_ops *ops = vdpa->config;
890         struct device *dma_dev = vdpa_get_dma_dev(vdpa);
891         struct bus_type *bus;
892         int ret;
893
894         /* Device want to do DMA by itself */
895         if (ops->set_map || ops->dma_map)
896                 return 0;
897
898         bus = dma_dev->bus;
899         if (!bus)
900                 return -EFAULT;
901
902         if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
903                 return -ENOTSUPP;
904
905         v->domain = iommu_domain_alloc(bus);
906         if (!v->domain)
907                 return -EIO;
908
909         ret = iommu_attach_device(v->domain, dma_dev);
910         if (ret)
911                 goto err_attach;
912
913         return 0;
914
915 err_attach:
916         iommu_domain_free(v->domain);
917         return ret;
918 }
919
920 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
921 {
922         struct vdpa_device *vdpa = v->vdpa;
923         struct device *dma_dev = vdpa_get_dma_dev(vdpa);
924
925         if (v->domain) {
926                 iommu_detach_device(v->domain, dma_dev);
927                 iommu_domain_free(v->domain);
928         }
929
930         v->domain = NULL;
931 }
932
933 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
934 {
935         struct vdpa_iova_range *range = &v->range;
936         struct vdpa_device *vdpa = v->vdpa;
937         const struct vdpa_config_ops *ops = vdpa->config;
938
939         if (ops->get_iova_range) {
940                 *range = ops->get_iova_range(vdpa);
941         } else if (v->domain && v->domain->geometry.force_aperture) {
942                 range->first = v->domain->geometry.aperture_start;
943                 range->last = v->domain->geometry.aperture_end;
944         } else {
945                 range->first = 0;
946                 range->last = ULLONG_MAX;
947         }
948 }
949
950 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
951 {
952         struct vhost_vdpa *v;
953         struct vhost_dev *dev;
954         struct vhost_virtqueue **vqs;
955         int nvqs, i, r, opened;
956
957         v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
958
959         opened = atomic_cmpxchg(&v->opened, 0, 1);
960         if (opened)
961                 return -EBUSY;
962
963         nvqs = v->nvqs;
964         r = vhost_vdpa_reset(v);
965         if (r)
966                 goto err;
967
968         vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
969         if (!vqs) {
970                 r = -ENOMEM;
971                 goto err;
972         }
973
974         dev = &v->vdev;
975         for (i = 0; i < nvqs; i++) {
976                 vqs[i] = &v->vqs[i];
977                 vqs[i]->handle_kick = handle_vq_kick;
978         }
979         vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
980                        vhost_vdpa_process_iotlb_msg);
981
982         dev->iotlb = vhost_iotlb_alloc(0, 0);
983         if (!dev->iotlb) {
984                 r = -ENOMEM;
985                 goto err_init_iotlb;
986         }
987
988         r = vhost_vdpa_alloc_domain(v);
989         if (r)
990                 goto err_init_iotlb;
991
992         vhost_vdpa_set_iova_range(v);
993
994         filep->private_data = v;
995
996         return 0;
997
998 err_init_iotlb:
999         vhost_dev_cleanup(&v->vdev);
1000         kfree(vqs);
1001 err:
1002         atomic_dec(&v->opened);
1003         return r;
1004 }
1005
1006 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1007 {
1008         int i;
1009
1010         for (i = 0; i < v->nvqs; i++)
1011                 vhost_vdpa_unsetup_vq_irq(v, i);
1012 }
1013
1014 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1015 {
1016         struct vhost_vdpa *v = filep->private_data;
1017         struct vhost_dev *d = &v->vdev;
1018
1019         mutex_lock(&d->mutex);
1020         filep->private_data = NULL;
1021         vhost_vdpa_reset(v);
1022         vhost_dev_stop(&v->vdev);
1023         vhost_vdpa_iotlb_free(v);
1024         vhost_vdpa_free_domain(v);
1025         vhost_vdpa_config_put(v);
1026         vhost_vdpa_clean_irq(v);
1027         vhost_dev_cleanup(&v->vdev);
1028         kfree(v->vdev.vqs);
1029         mutex_unlock(&d->mutex);
1030
1031         atomic_dec(&v->opened);
1032         complete(&v->completion);
1033
1034         return 0;
1035 }
1036
1037 #ifdef CONFIG_MMU
1038 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1039 {
1040         struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1041         struct vdpa_device *vdpa = v->vdpa;
1042         const struct vdpa_config_ops *ops = vdpa->config;
1043         struct vdpa_notification_area notify;
1044         struct vm_area_struct *vma = vmf->vma;
1045         u16 index = vma->vm_pgoff;
1046
1047         notify = ops->get_vq_notification(vdpa, index);
1048
1049         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1050         if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
1051                             PFN_DOWN(notify.addr), PAGE_SIZE,
1052                             vma->vm_page_prot))
1053                 return VM_FAULT_SIGBUS;
1054
1055         return VM_FAULT_NOPAGE;
1056 }
1057
1058 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1059         .fault = vhost_vdpa_fault,
1060 };
1061
1062 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1063 {
1064         struct vhost_vdpa *v = vma->vm_file->private_data;
1065         struct vdpa_device *vdpa = v->vdpa;
1066         const struct vdpa_config_ops *ops = vdpa->config;
1067         struct vdpa_notification_area notify;
1068         unsigned long index = vma->vm_pgoff;
1069
1070         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1071                 return -EINVAL;
1072         if ((vma->vm_flags & VM_SHARED) == 0)
1073                 return -EINVAL;
1074         if (vma->vm_flags & VM_READ)
1075                 return -EINVAL;
1076         if (index > 65535)
1077                 return -EINVAL;
1078         if (!ops->get_vq_notification)
1079                 return -ENOTSUPP;
1080
1081         /* To be safe and easily modelled by userspace, We only
1082          * support the doorbell which sits on the page boundary and
1083          * does not share the page with other registers.
1084          */
1085         notify = ops->get_vq_notification(vdpa, index);
1086         if (notify.addr & (PAGE_SIZE - 1))
1087                 return -EINVAL;
1088         if (vma->vm_end - vma->vm_start != notify.size)
1089                 return -ENOTSUPP;
1090
1091         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1092         vma->vm_ops = &vhost_vdpa_vm_ops;
1093         return 0;
1094 }
1095 #endif /* CONFIG_MMU */
1096
1097 static const struct file_operations vhost_vdpa_fops = {
1098         .owner          = THIS_MODULE,
1099         .open           = vhost_vdpa_open,
1100         .release        = vhost_vdpa_release,
1101         .write_iter     = vhost_vdpa_chr_write_iter,
1102         .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1103 #ifdef CONFIG_MMU
1104         .mmap           = vhost_vdpa_mmap,
1105 #endif /* CONFIG_MMU */
1106         .compat_ioctl   = compat_ptr_ioctl,
1107 };
1108
1109 static void vhost_vdpa_release_dev(struct device *device)
1110 {
1111         struct vhost_vdpa *v =
1112                container_of(device, struct vhost_vdpa, dev);
1113
1114         ida_simple_remove(&vhost_vdpa_ida, v->minor);
1115         kfree(v->vqs);
1116         kfree(v);
1117 }
1118
1119 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1120 {
1121         const struct vdpa_config_ops *ops = vdpa->config;
1122         struct vhost_vdpa *v;
1123         int minor;
1124         int r;
1125
1126         v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1127         if (!v)
1128                 return -ENOMEM;
1129
1130         minor = ida_simple_get(&vhost_vdpa_ida, 0,
1131                                VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1132         if (minor < 0) {
1133                 kfree(v);
1134                 return minor;
1135         }
1136
1137         atomic_set(&v->opened, 0);
1138         v->minor = minor;
1139         v->vdpa = vdpa;
1140         v->nvqs = vdpa->nvqs;
1141         v->virtio_id = ops->get_device_id(vdpa);
1142
1143         device_initialize(&v->dev);
1144         v->dev.release = vhost_vdpa_release_dev;
1145         v->dev.parent = &vdpa->dev;
1146         v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1147         v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1148                                GFP_KERNEL);
1149         if (!v->vqs) {
1150                 r = -ENOMEM;
1151                 goto err;
1152         }
1153
1154         r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1155         if (r)
1156                 goto err;
1157
1158         cdev_init(&v->cdev, &vhost_vdpa_fops);
1159         v->cdev.owner = THIS_MODULE;
1160
1161         r = cdev_device_add(&v->cdev, &v->dev);
1162         if (r)
1163                 goto err;
1164
1165         init_completion(&v->completion);
1166         vdpa_set_drvdata(vdpa, v);
1167
1168         return 0;
1169
1170 err:
1171         put_device(&v->dev);
1172         return r;
1173 }
1174
1175 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1176 {
1177         struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1178         int opened;
1179
1180         cdev_device_del(&v->cdev, &v->dev);
1181
1182         do {
1183                 opened = atomic_cmpxchg(&v->opened, 0, 1);
1184                 if (!opened)
1185                         break;
1186                 wait_for_completion(&v->completion);
1187         } while (1);
1188
1189         put_device(&v->dev);
1190 }
1191
1192 static struct vdpa_driver vhost_vdpa_driver = {
1193         .driver = {
1194                 .name   = "vhost_vdpa",
1195         },
1196         .probe  = vhost_vdpa_probe,
1197         .remove = vhost_vdpa_remove,
1198 };
1199
1200 static int __init vhost_vdpa_init(void)
1201 {
1202         int r;
1203
1204         r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1205                                 "vhost-vdpa");
1206         if (r)
1207                 goto err_alloc_chrdev;
1208
1209         r = vdpa_register_driver(&vhost_vdpa_driver);
1210         if (r)
1211                 goto err_vdpa_register_driver;
1212
1213         return 0;
1214
1215 err_vdpa_register_driver:
1216         unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1217 err_alloc_chrdev:
1218         return r;
1219 }
1220 module_init(vhost_vdpa_init);
1221
1222 static void __exit vhost_vdpa_exit(void)
1223 {
1224         vdpa_unregister_driver(&vhost_vdpa_driver);
1225         unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1226 }
1227 module_exit(vhost_vdpa_exit);
1228
1229 MODULE_VERSION("0.0.1");
1230 MODULE_LICENSE("GPL v2");
1231 MODULE_AUTHOR("Intel Corporation");
1232 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");