2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2013 Cisco Systems. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/dma-mapping.h>
37 #include <linux/sched.h>
38 #include <linux/hugetlb.h>
39 #include <linux/dma-attrs.h>
40 #include <linux/iommu.h>
41 #include <linux/workqueue.h>
42 #include <linux/list.h>
43 #include <linux/pci.h>
45 #include "usnic_log.h"
46 #include "usnic_uiom.h"
47 #include "usnic_uiom_interval_tree.h"
49 static struct workqueue_struct *usnic_uiom_wq;
51 #define USNIC_UIOM_PAGE_CHUNK \
52 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
53 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
54 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
56 static void usnic_uiom_reg_account(struct work_struct *work)
58 struct usnic_uiom_reg *umem = container_of(work,
59 struct usnic_uiom_reg, work);
61 down_write(&umem->mm->mmap_sem);
62 umem->mm->locked_vm -= umem->diff;
63 up_write(&umem->mm->mmap_sem);
68 static int usnic_uiom_dma_fault(struct iommu_domain *domain,
70 unsigned long iova, int flags,
73 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
79 static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
81 struct usnic_uiom_chunk *chunk, *tmp;
83 struct scatterlist *sg;
87 list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
88 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
92 set_page_dirty_lock(page);
94 usnic_dbg("pa: %pa\n", &pa);
100 static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
101 int dmasync, struct list_head *chunk_list)
103 struct page **page_list;
104 struct scatterlist *sg;
105 struct usnic_uiom_chunk *chunk;
106 unsigned long locked;
107 unsigned long lock_limit;
108 unsigned long cur_base;
109 unsigned long npages;
115 DEFINE_DMA_ATTRS(attrs);
116 unsigned int gup_flags;
119 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
124 INIT_LIST_HEAD(chunk_list);
126 page_list = (struct page **) __get_free_page(GFP_KERNEL);
130 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
132 down_write(¤t->mm->mmap_sem);
134 locked = npages + current->mm->locked_vm;
135 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
137 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
142 flags = IOMMU_READ | IOMMU_CACHE;
143 flags |= (writable) ? IOMMU_WRITE : 0;
144 gup_flags = FOLL_WRITE;
145 gup_flags |= (writable) ? 0 : FOLL_FORCE;
146 cur_base = addr & PAGE_MASK;
150 ret = get_user_pages(current, current->mm, cur_base,
151 min_t(unsigned long, npages,
152 PAGE_SIZE / sizeof(struct page *)),
153 gup_flags, page_list, NULL);
162 chunk = kmalloc(sizeof(*chunk) +
163 sizeof(struct scatterlist) *
164 min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
171 chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
172 sg_init_table(chunk->page_list, chunk->nents);
173 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
174 sg_set_page(sg, page_list[i + off],
177 usnic_dbg("va: 0x%lx pa: %pa\n",
178 cur_base + i*PAGE_SIZE, &pa);
180 cur_base += chunk->nents * PAGE_SIZE;
183 list_add_tail(&chunk->list, chunk_list);
191 usnic_uiom_put_pages(chunk_list, 0);
193 current->mm->locked_vm = locked;
195 up_write(¤t->mm->mmap_sem);
196 free_page((unsigned long) page_list);
200 static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
201 struct usnic_uiom_pd *pd)
203 struct usnic_uiom_interval_node *interval, *tmp;
204 long unsigned va, size;
206 list_for_each_entry_safe(interval, tmp, intervals, link) {
207 va = interval->start << PAGE_SHIFT;
208 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
210 /* Workaround for RH 970401 */
211 usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
212 iommu_unmap(pd->domain, va, PAGE_SIZE);
219 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
220 struct usnic_uiom_reg *uiomr,
224 unsigned long vpn_start, vpn_last;
225 struct usnic_uiom_interval_node *interval, *tmp;
227 LIST_HEAD(rm_intervals);
229 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
230 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
231 vpn_last = vpn_start + npages - 1;
233 spin_lock(&pd->lock);
234 usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
235 vpn_last, &rm_intervals);
236 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
238 list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
239 if (interval->flags & IOMMU_WRITE)
241 list_del(&interval->link);
245 usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
246 spin_unlock(&pd->lock);
249 static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
250 struct usnic_uiom_reg *uiomr)
254 struct usnic_uiom_chunk *chunk;
255 struct usnic_uiom_interval_node *interval_node;
257 dma_addr_t pa_start = 0;
258 dma_addr_t pa_end = 0;
259 long int va_start = -EINVAL;
260 struct usnic_uiom_pd *pd = uiomr->pd;
261 long int va = uiomr->va & PAGE_MASK;
262 int flags = IOMMU_READ | IOMMU_CACHE;
264 flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
265 chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
267 list_for_each_entry(interval_node, intervals, link) {
269 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
270 pa = sg_phys(&chunk->page_list[i]);
271 if ((va >> PAGE_SHIFT) < interval_node->start)
274 if ((va >> PAGE_SHIFT) == interval_node->start) {
275 /* First page of the interval */
281 WARN_ON(va_start == -EINVAL);
283 if ((pa_end + PAGE_SIZE != pa) &&
285 /* PAs are not contiguous */
286 size = pa_end - pa_start + PAGE_SIZE;
287 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
288 va_start, &pa_start, size, flags);
289 err = iommu_map(pd->domain, va_start, pa_start,
292 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
293 va_start, &pa_start, size, err);
301 if ((va >> PAGE_SHIFT) == interval_node->last) {
302 /* Last page of the interval */
303 size = pa - pa_start + PAGE_SIZE;
304 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
305 va_start, &pa_start, size, flags);
306 err = iommu_map(pd->domain, va_start, pa_start,
309 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
310 va_start, &pa_start, size, err);
320 if (i == chunk->nents) {
322 * Hit last entry of the chunk,
323 * hence advance to next chunk
325 chunk = list_first_entry(&chunk->list,
326 struct usnic_uiom_chunk,
335 usnic_uiom_unmap_sorted_intervals(intervals, pd);
339 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
340 unsigned long addr, size_t size,
341 int writable, int dmasync)
343 struct usnic_uiom_reg *uiomr;
344 unsigned long va_base, vpn_start, vpn_last;
345 unsigned long npages;
347 LIST_HEAD(sorted_diff_intervals);
350 * Intel IOMMU map throws an error if a translation entry is
351 * changed from read to write. This module may not unmap
352 * and then remap the entry after fixing the permission
353 * b/c this open up a small windows where hw DMA may page fault
354 * Hence, make all entries to be writable.
358 va_base = addr & PAGE_MASK;
359 offset = addr & ~PAGE_MASK;
360 npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
361 vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
362 vpn_last = vpn_start + npages - 1;
364 uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
366 return ERR_PTR(-ENOMEM);
369 uiomr->offset = offset;
370 uiomr->length = size;
371 uiomr->writable = writable;
374 err = usnic_uiom_get_pages(addr, size, writable, dmasync,
377 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
378 vpn_start, vpn_last, err);
382 spin_lock(&pd->lock);
383 err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
384 (writable) ? IOMMU_WRITE : 0,
387 &sorted_diff_intervals);
389 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
390 vpn_start, vpn_last, err);
394 err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
396 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
397 vpn_start, vpn_last, err);
398 goto out_put_intervals;
402 err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
403 (writable) ? IOMMU_WRITE : 0);
405 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
406 vpn_start, vpn_last, err);
407 goto out_unmap_intervals;
410 usnic_uiom_put_interval_set(&sorted_diff_intervals);
411 spin_unlock(&pd->lock);
416 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
418 usnic_uiom_put_interval_set(&sorted_diff_intervals);
420 usnic_uiom_put_pages(&uiomr->chunk_list, 0);
421 spin_unlock(&pd->lock);
427 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
429 struct mm_struct *mm;
432 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
434 mm = get_task_mm(current);
440 diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
443 * We may be called with the mm's mmap_sem already held. This
444 * can happen when a userspace munmap() is the call that drops
445 * the last reference to our file and calls our release
446 * method. If there are memory regions to destroy, we'll end
447 * up here and not be able to take the mmap_sem. In that case
448 * we defer the vm_locked accounting to the system workqueue.
451 if (!down_write_trylock(&mm->mmap_sem)) {
452 INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
456 queue_work(usnic_uiom_wq, &uiomr->work);
460 down_write(&mm->mmap_sem);
462 current->mm->locked_vm -= diff;
463 up_write(&mm->mmap_sem);
468 struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
470 struct usnic_uiom_pd *pd;
473 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
475 return ERR_PTR(-ENOMEM);
477 pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
479 usnic_err("Failed to allocate IOMMU domain");
481 return ERR_PTR(-ENOMEM);
484 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
486 spin_lock_init(&pd->lock);
487 INIT_LIST_HEAD(&pd->devs);
492 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
494 iommu_domain_free(pd->domain);
498 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
500 struct usnic_uiom_dev *uiom_dev;
503 uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
508 err = iommu_attach_device(pd->domain, dev);
512 if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
513 usnic_err("IOMMU of %s does not support cache coherency\n",
516 goto out_detach_device;
519 spin_lock(&pd->lock);
520 list_add_tail(&uiom_dev->link, &pd->devs);
522 spin_unlock(&pd->lock);
527 iommu_detach_device(pd->domain, dev);
533 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
535 struct usnic_uiom_dev *uiom_dev;
538 spin_lock(&pd->lock);
539 list_for_each_entry(uiom_dev, &pd->devs, link) {
540 if (uiom_dev->dev == dev) {
547 usnic_err("Unable to free dev %s - not found\n",
549 spin_unlock(&pd->lock);
553 list_del(&uiom_dev->link);
555 spin_unlock(&pd->lock);
557 return iommu_detach_device(pd->domain, dev);
560 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
562 struct usnic_uiom_dev *uiom_dev;
563 struct device **devs;
566 spin_lock(&pd->lock);
567 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
569 devs = ERR_PTR(-ENOMEM);
573 list_for_each_entry(uiom_dev, &pd->devs, link) {
574 devs[i++] = uiom_dev->dev;
577 spin_unlock(&pd->lock);
581 void usnic_uiom_free_dev_list(struct device **devs)
586 int usnic_uiom_init(char *drv_name)
588 if (!iommu_present(&pci_bus_type)) {
589 usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
593 usnic_uiom_wq = create_workqueue(drv_name);
594 if (!usnic_uiom_wq) {
595 usnic_err("Unable to alloc wq for drv %s\n", drv_name);
602 void usnic_uiom_fini(void)
604 flush_workqueue(usnic_uiom_wq);
605 destroy_workqueue(usnic_uiom_wq);