1 // SPDX-License-Identifier: GPL-2.0-only
3 * MMU-based software IOTLB.
5 * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
7 * Author: Xie Yongji <xieyongji@bytedance.com>
11 #include <linux/slab.h>
12 #include <linux/file.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/highmem.h>
15 #include <linux/vmalloc.h>
16 #include <linux/vdpa.h>
18 #include "iova_domain.h"
20 static int vduse_iotlb_add_range(struct vduse_iova_domain *domain,
22 u64 addr, unsigned int perm,
23 struct file *file, u64 offset)
25 struct vdpa_map_file *map_file;
28 map_file = kmalloc(sizeof(*map_file), GFP_ATOMIC);
32 map_file->file = get_file(file);
33 map_file->offset = offset;
35 ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last,
36 addr, perm, map_file);
45 static void vduse_iotlb_del_range(struct vduse_iova_domain *domain,
48 struct vdpa_map_file *map_file;
49 struct vhost_iotlb_map *map;
51 while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) {
52 map_file = (struct vdpa_map_file *)map->opaque;
55 vhost_iotlb_map_free(domain->iotlb, map);
59 int vduse_domain_set_map(struct vduse_iova_domain *domain,
60 struct vhost_iotlb *iotlb)
62 struct vdpa_map_file *map_file;
63 struct vhost_iotlb_map *map;
64 u64 start = 0ULL, last = ULLONG_MAX;
67 spin_lock(&domain->iotlb_lock);
68 vduse_iotlb_del_range(domain, start, last);
70 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
71 map = vhost_iotlb_itree_next(map, start, last)) {
72 map_file = (struct vdpa_map_file *)map->opaque;
73 ret = vduse_iotlb_add_range(domain, map->start, map->last,
80 spin_unlock(&domain->iotlb_lock);
84 vduse_iotlb_del_range(domain, start, last);
85 spin_unlock(&domain->iotlb_lock);
89 void vduse_domain_clear_map(struct vduse_iova_domain *domain,
90 struct vhost_iotlb *iotlb)
92 struct vhost_iotlb_map *map;
93 u64 start = 0ULL, last = ULLONG_MAX;
95 spin_lock(&domain->iotlb_lock);
96 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
97 map = vhost_iotlb_itree_next(map, start, last)) {
98 vduse_iotlb_del_range(domain, map->start, map->last);
100 spin_unlock(&domain->iotlb_lock);
103 static int vduse_domain_map_bounce_page(struct vduse_iova_domain *domain,
104 u64 iova, u64 size, u64 paddr)
106 struct vduse_bounce_map *map;
107 u64 last = iova + size - 1;
109 while (iova <= last) {
110 map = &domain->bounce_maps[iova >> PAGE_SHIFT];
111 if (!map->bounce_page) {
112 map->bounce_page = alloc_page(GFP_ATOMIC);
113 if (!map->bounce_page)
116 map->orig_phys = paddr;
123 static void vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain,
126 struct vduse_bounce_map *map;
127 u64 last = iova + size - 1;
129 while (iova <= last) {
130 map = &domain->bounce_maps[iova >> PAGE_SHIFT];
131 map->orig_phys = INVALID_PHYS_ADDR;
136 static void do_bounce(phys_addr_t orig, void *addr, size_t size,
137 enum dma_data_direction dir)
139 unsigned long pfn = PFN_DOWN(orig);
140 unsigned int offset = offset_in_page(orig);
145 sz = min_t(size_t, PAGE_SIZE - offset, size);
147 page = pfn_to_page(pfn);
148 if (dir == DMA_TO_DEVICE)
149 memcpy_from_page(addr, page, offset, sz);
151 memcpy_to_page(page, offset, addr, sz);
160 static void vduse_domain_bounce(struct vduse_iova_domain *domain,
161 dma_addr_t iova, size_t size,
162 enum dma_data_direction dir)
164 struct vduse_bounce_map *map;
169 if (iova >= domain->bounce_size)
173 map = &domain->bounce_maps[iova >> PAGE_SHIFT];
174 offset = offset_in_page(iova);
175 sz = min_t(size_t, PAGE_SIZE - offset, size);
177 if (WARN_ON(!map->bounce_page ||
178 map->orig_phys == INVALID_PHYS_ADDR))
181 addr = kmap_local_page(map->bounce_page);
182 do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
190 vduse_domain_get_coherent_page(struct vduse_iova_domain *domain, u64 iova)
192 u64 start = iova & PAGE_MASK;
193 u64 last = start + PAGE_SIZE - 1;
194 struct vhost_iotlb_map *map;
195 struct page *page = NULL;
197 spin_lock(&domain->iotlb_lock);
198 map = vhost_iotlb_itree_first(domain->iotlb, start, last);
202 page = pfn_to_page((map->addr + iova - map->start) >> PAGE_SHIFT);
205 spin_unlock(&domain->iotlb_lock);
211 vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
213 struct vduse_bounce_map *map;
214 struct page *page = NULL;
216 read_lock(&domain->bounce_lock);
217 map = &domain->bounce_maps[iova >> PAGE_SHIFT];
218 if (domain->user_bounce_pages || !map->bounce_page)
221 page = map->bounce_page;
224 read_unlock(&domain->bounce_lock);
230 vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
232 struct vduse_bounce_map *map;
233 unsigned long pfn, bounce_pfns;
235 bounce_pfns = domain->bounce_size >> PAGE_SHIFT;
237 for (pfn = 0; pfn < bounce_pfns; pfn++) {
238 map = &domain->bounce_maps[pfn];
239 if (WARN_ON(map->orig_phys != INVALID_PHYS_ADDR))
242 if (!map->bounce_page)
245 __free_page(map->bounce_page);
246 map->bounce_page = NULL;
250 int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
251 struct page **pages, int count)
253 struct vduse_bounce_map *map;
256 /* Now we don't support partial mapping */
257 if (count != (domain->bounce_size >> PAGE_SHIFT))
260 write_lock(&domain->bounce_lock);
262 if (domain->user_bounce_pages)
265 for (i = 0; i < count; i++) {
266 map = &domain->bounce_maps[i];
267 if (map->bounce_page) {
268 /* Copy kernel page to user page if it's in use */
269 if (map->orig_phys != INVALID_PHYS_ADDR)
270 memcpy_to_page(pages[i], 0,
271 page_address(map->bounce_page),
273 __free_page(map->bounce_page);
275 map->bounce_page = pages[i];
278 domain->user_bounce_pages = true;
281 write_unlock(&domain->bounce_lock);
286 void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
288 struct vduse_bounce_map *map;
289 unsigned long i, count;
291 write_lock(&domain->bounce_lock);
292 if (!domain->user_bounce_pages)
295 count = domain->bounce_size >> PAGE_SHIFT;
296 for (i = 0; i < count; i++) {
297 struct page *page = NULL;
299 map = &domain->bounce_maps[i];
300 if (WARN_ON(!map->bounce_page))
303 /* Copy user page to kernel page if it's in use */
304 if (map->orig_phys != INVALID_PHYS_ADDR) {
305 page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
306 memcpy_from_page(page_address(page),
307 map->bounce_page, 0, PAGE_SIZE);
309 put_page(map->bounce_page);
310 map->bounce_page = page;
312 domain->user_bounce_pages = false;
314 write_unlock(&domain->bounce_lock);
317 void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
319 if (!domain->bounce_map)
322 spin_lock(&domain->iotlb_lock);
323 if (!domain->bounce_map)
326 vduse_iotlb_del_range(domain, 0, domain->bounce_size - 1);
327 domain->bounce_map = 0;
329 spin_unlock(&domain->iotlb_lock);
332 static int vduse_domain_init_bounce_map(struct vduse_iova_domain *domain)
336 if (domain->bounce_map)
339 spin_lock(&domain->iotlb_lock);
340 if (domain->bounce_map)
343 ret = vduse_iotlb_add_range(domain, 0, domain->bounce_size - 1,
344 0, VHOST_MAP_RW, domain->file, 0);
348 domain->bounce_map = 1;
350 spin_unlock(&domain->iotlb_lock);
355 vduse_domain_alloc_iova(struct iova_domain *iovad,
356 unsigned long size, unsigned long limit)
358 unsigned long shift = iova_shift(iovad);
359 unsigned long iova_len = iova_align(iovad, size) >> shift;
360 unsigned long iova_pfn;
362 iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
364 return (dma_addr_t)iova_pfn << shift;
367 static void vduse_domain_free_iova(struct iova_domain *iovad,
368 dma_addr_t iova, size_t size)
370 unsigned long shift = iova_shift(iovad);
371 unsigned long iova_len = iova_align(iovad, size) >> shift;
373 free_iova_fast(iovad, iova >> shift, iova_len);
376 dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
377 struct page *page, unsigned long offset,
378 size_t size, enum dma_data_direction dir,
381 struct iova_domain *iovad = &domain->stream_iovad;
382 unsigned long limit = domain->bounce_size - 1;
383 phys_addr_t pa = page_to_phys(page) + offset;
384 dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
387 return DMA_MAPPING_ERROR;
389 if (vduse_domain_init_bounce_map(domain))
392 read_lock(&domain->bounce_lock);
393 if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
396 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
397 vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
399 read_unlock(&domain->bounce_lock);
403 read_unlock(&domain->bounce_lock);
405 vduse_domain_free_iova(iovad, iova, size);
406 return DMA_MAPPING_ERROR;
409 void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
410 dma_addr_t dma_addr, size_t size,
411 enum dma_data_direction dir, unsigned long attrs)
413 struct iova_domain *iovad = &domain->stream_iovad;
415 read_lock(&domain->bounce_lock);
416 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
417 vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
419 vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
420 read_unlock(&domain->bounce_lock);
421 vduse_domain_free_iova(iovad, dma_addr, size);
424 void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
425 size_t size, dma_addr_t *dma_addr,
426 gfp_t flag, unsigned long attrs)
428 struct iova_domain *iovad = &domain->consistent_iovad;
429 unsigned long limit = domain->iova_limit;
430 dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
431 void *orig = alloc_pages_exact(size, flag);
436 spin_lock(&domain->iotlb_lock);
437 if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1,
438 virt_to_phys(orig), VHOST_MAP_RW,
439 domain->file, (u64)iova)) {
440 spin_unlock(&domain->iotlb_lock);
443 spin_unlock(&domain->iotlb_lock);
449 *dma_addr = DMA_MAPPING_ERROR;
451 free_pages_exact(orig, size);
453 vduse_domain_free_iova(iovad, iova, size);
458 void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
459 void *vaddr, dma_addr_t dma_addr,
462 struct iova_domain *iovad = &domain->consistent_iovad;
463 struct vhost_iotlb_map *map;
464 struct vdpa_map_file *map_file;
467 spin_lock(&domain->iotlb_lock);
468 map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr,
469 (u64)dma_addr + size - 1);
471 spin_unlock(&domain->iotlb_lock);
474 map_file = (struct vdpa_map_file *)map->opaque;
475 fput(map_file->file);
478 vhost_iotlb_map_free(domain->iotlb, map);
479 spin_unlock(&domain->iotlb_lock);
481 vduse_domain_free_iova(iovad, dma_addr, size);
482 free_pages_exact(phys_to_virt(pa), size);
485 static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf)
487 struct vduse_iova_domain *domain = vmf->vma->vm_private_data;
488 unsigned long iova = vmf->pgoff << PAGE_SHIFT;
492 return VM_FAULT_SIGBUS;
494 if (iova < domain->bounce_size)
495 page = vduse_domain_get_bounce_page(domain, iova);
497 page = vduse_domain_get_coherent_page(domain, iova);
500 return VM_FAULT_SIGBUS;
507 static const struct vm_operations_struct vduse_domain_mmap_ops = {
508 .fault = vduse_domain_mmap_fault,
511 static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
513 struct vduse_iova_domain *domain = file->private_data;
515 vm_flags_set(vma, VM_DONTDUMP | VM_DONTEXPAND);
516 vma->vm_private_data = domain;
517 vma->vm_ops = &vduse_domain_mmap_ops;
522 static int vduse_domain_release(struct inode *inode, struct file *file)
524 struct vduse_iova_domain *domain = file->private_data;
526 spin_lock(&domain->iotlb_lock);
527 vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
528 vduse_domain_remove_user_bounce_pages(domain);
529 vduse_domain_free_kernel_bounce_pages(domain);
530 spin_unlock(&domain->iotlb_lock);
531 put_iova_domain(&domain->stream_iovad);
532 put_iova_domain(&domain->consistent_iovad);
533 vhost_iotlb_free(domain->iotlb);
534 vfree(domain->bounce_maps);
540 static const struct file_operations vduse_domain_fops = {
541 .owner = THIS_MODULE,
542 .mmap = vduse_domain_mmap,
543 .release = vduse_domain_release,
546 void vduse_domain_destroy(struct vduse_iova_domain *domain)
551 struct vduse_iova_domain *
552 vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
554 struct vduse_iova_domain *domain;
556 struct vduse_bounce_map *map;
557 unsigned long pfn, bounce_pfns;
560 bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
561 if (iova_limit <= bounce_size)
564 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
568 domain->iotlb = vhost_iotlb_alloc(0, 0);
572 domain->iova_limit = iova_limit;
573 domain->bounce_size = PAGE_ALIGN(bounce_size);
574 domain->bounce_maps = vzalloc(bounce_pfns *
575 sizeof(struct vduse_bounce_map));
576 if (!domain->bounce_maps)
579 for (pfn = 0; pfn < bounce_pfns; pfn++) {
580 map = &domain->bounce_maps[pfn];
581 map->orig_phys = INVALID_PHYS_ADDR;
583 file = anon_inode_getfile("[vduse-domain]", &vduse_domain_fops,
589 rwlock_init(&domain->bounce_lock);
590 spin_lock_init(&domain->iotlb_lock);
591 init_iova_domain(&domain->stream_iovad,
592 PAGE_SIZE, IOVA_START_PFN);
593 ret = iova_domain_init_rcaches(&domain->stream_iovad);
595 goto err_iovad_stream;
596 init_iova_domain(&domain->consistent_iovad,
597 PAGE_SIZE, bounce_pfns);
598 ret = iova_domain_init_rcaches(&domain->consistent_iovad);
600 goto err_iovad_consistent;
603 err_iovad_consistent:
604 put_iova_domain(&domain->stream_iovad);
608 vfree(domain->bounce_maps);
610 vhost_iotlb_free(domain->iotlb);
616 int vduse_domain_init(void)
618 return iova_cache_get();
621 void vduse_domain_exit(void)