2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 #include <linux/interval_tree_generic.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_umem.h>
46 #include <rdma/ib_umem_odp.h>
49 * The ib_umem list keeps track of memory regions for which the HW
50 * device request to receive notification when the related memory
53 * ib_umem_lock protects the list.
56 static u64 node_start(struct umem_odp_node *n)
58 struct ib_umem_odp *umem_odp =
59 container_of(n, struct ib_umem_odp, interval_tree);
61 return ib_umem_start(umem_odp->umem);
64 /* Note that the representation of the intervals in the interval tree
65 * considers the ending point as contained in the interval, while the
66 * function ib_umem_end returns the first address which is not contained
69 static u64 node_last(struct umem_odp_node *n)
71 struct ib_umem_odp *umem_odp =
72 container_of(n, struct ib_umem_odp, interval_tree);
74 return ib_umem_end(umem_odp->umem) - 1;
77 INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
78 node_start, node_last, static, rbt_ib_umem)
80 static void ib_umem_notifier_start_account(struct ib_umem *item)
82 mutex_lock(&item->odp_data->umem_mutex);
84 /* Only update private counters for this umem if it has them.
85 * Otherwise skip it. All page faults will be delayed for this umem. */
86 if (item->odp_data->mn_counters_active) {
87 int notifiers_count = item->odp_data->notifiers_count++;
89 if (notifiers_count == 0)
90 /* Initialize the completion object for waiting on
91 * notifiers. Since notifier_count is zero, no one
92 * should be waiting right now. */
93 reinit_completion(&item->odp_data->notifier_completion);
95 mutex_unlock(&item->odp_data->umem_mutex);
98 static void ib_umem_notifier_end_account(struct ib_umem *item)
100 mutex_lock(&item->odp_data->umem_mutex);
102 /* Only update private counters for this umem if it has them.
103 * Otherwise skip it. All page faults will be delayed for this umem. */
104 if (item->odp_data->mn_counters_active) {
106 * This sequence increase will notify the QP page fault that
107 * the page that is going to be mapped in the spte could have
110 ++item->odp_data->notifiers_seq;
111 if (--item->odp_data->notifiers_count == 0)
112 complete_all(&item->odp_data->notifier_completion);
114 mutex_unlock(&item->odp_data->umem_mutex);
117 /* Account for a new mmu notifier in an ib_ucontext. */
118 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
120 atomic_inc(&context->notifier_count);
123 /* Account for a terminating mmu notifier in an ib_ucontext.
125 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
126 * the function takes the semaphore itself. */
127 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
129 int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
131 if (zero_notifiers &&
132 !list_empty(&context->no_private_counters)) {
133 /* No currently running mmu notifiers. Now is the chance to
134 * add private accounting to all previously added umems. */
135 struct ib_umem_odp *odp_data, *next;
137 /* Prevent concurrent mmu notifiers from working on the
138 * no_private_counters list. */
139 down_write(&context->umem_rwsem);
141 /* Read the notifier_count again, with the umem_rwsem
142 * semaphore taken for write. */
143 if (!atomic_read(&context->notifier_count)) {
144 list_for_each_entry_safe(odp_data, next,
145 &context->no_private_counters,
146 no_private_counters) {
147 mutex_lock(&odp_data->umem_mutex);
148 odp_data->mn_counters_active = true;
149 list_del(&odp_data->no_private_counters);
150 complete_all(&odp_data->notifier_completion);
151 mutex_unlock(&odp_data->umem_mutex);
155 up_write(&context->umem_rwsem);
159 static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
160 u64 end, void *cookie) {
162 * Increase the number of notifiers running, to
163 * prevent any further fault handling on this MR.
165 ib_umem_notifier_start_account(item);
166 item->odp_data->dying = 1;
167 /* Make sure that the fact the umem is dying is out before we release
168 * all pending page faults. */
170 complete_all(&item->odp_data->notifier_completion);
171 item->context->invalidate_range(item, ib_umem_start(item),
176 static void ib_umem_notifier_release(struct mmu_notifier *mn,
177 struct mm_struct *mm)
179 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
181 if (!context->invalidate_range)
184 ib_ucontext_notifier_start_account(context);
185 down_read(&context->umem_rwsem);
186 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
188 ib_umem_notifier_release_trampoline,
191 up_read(&context->umem_rwsem);
194 static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
195 u64 end, void *cookie)
197 ib_umem_notifier_start_account(item);
198 item->context->invalidate_range(item, start, start + PAGE_SIZE);
199 ib_umem_notifier_end_account(item);
203 static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
204 u64 end, void *cookie)
206 ib_umem_notifier_start_account(item);
207 item->context->invalidate_range(item, start, end);
211 static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
212 struct mm_struct *mm,
217 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
220 if (!context->invalidate_range)
224 down_read(&context->umem_rwsem);
225 else if (!down_read_trylock(&context->umem_rwsem))
228 ib_ucontext_notifier_start_account(context);
229 ret = rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
231 invalidate_range_start_trampoline,
233 up_read(&context->umem_rwsem);
238 static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
239 u64 end, void *cookie)
241 ib_umem_notifier_end_account(item);
245 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
246 struct mm_struct *mm,
250 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
252 if (!context->invalidate_range)
256 * TODO: we currently bail out if there is any sleepable work to be done
257 * in ib_umem_notifier_invalidate_range_start so we shouldn't really block
258 * here. But this is ugly and fragile.
260 down_read(&context->umem_rwsem);
261 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
263 invalidate_range_end_trampoline, true, NULL);
264 up_read(&context->umem_rwsem);
265 ib_ucontext_notifier_end_account(context);
268 static const struct mmu_notifier_ops ib_umem_notifiers = {
269 .release = ib_umem_notifier_release,
270 .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
271 .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
274 struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
278 struct ib_umem *umem;
279 struct ib_umem_odp *odp_data;
280 int pages = size >> PAGE_SHIFT;
283 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
285 return ERR_PTR(-ENOMEM);
287 umem->context = context;
289 umem->address = addr;
290 umem->page_shift = PAGE_SHIFT;
293 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
298 odp_data->umem = umem;
300 mutex_init(&odp_data->umem_mutex);
301 init_completion(&odp_data->notifier_completion);
303 odp_data->page_list =
304 vzalloc(array_size(pages, sizeof(*odp_data->page_list)));
305 if (!odp_data->page_list) {
311 vzalloc(array_size(pages, sizeof(*odp_data->dma_list)));
312 if (!odp_data->dma_list) {
317 down_write(&context->umem_rwsem);
318 context->odp_mrs_count++;
319 rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
320 if (likely(!atomic_read(&context->notifier_count)))
321 odp_data->mn_counters_active = true;
323 list_add(&odp_data->no_private_counters,
324 &context->no_private_counters);
325 up_write(&context->umem_rwsem);
327 umem->odp_data = odp_data;
332 vfree(odp_data->page_list);
339 EXPORT_SYMBOL(ib_alloc_odp_umem);
341 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
346 struct mm_struct *mm = get_task_mm(current);
351 if (access & IB_ACCESS_HUGETLB) {
352 struct vm_area_struct *vma;
355 down_read(&mm->mmap_sem);
356 vma = find_vma(mm, ib_umem_start(umem));
357 if (!vma || !is_vm_hugetlb_page(vma)) {
358 up_read(&mm->mmap_sem);
363 umem->page_shift = huge_page_shift(h);
364 up_read(&mm->mmap_sem);
370 /* Prevent creating ODP MRs in child processes */
372 our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
375 if (context->tgid != our_pid) {
380 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
381 if (!umem->odp_data) {
385 umem->odp_data->umem = umem;
387 mutex_init(&umem->odp_data->umem_mutex);
389 init_completion(&umem->odp_data->notifier_completion);
391 if (ib_umem_num_pages(umem)) {
392 umem->odp_data->page_list =
393 vzalloc(array_size(sizeof(*umem->odp_data->page_list),
394 ib_umem_num_pages(umem)));
395 if (!umem->odp_data->page_list) {
400 umem->odp_data->dma_list =
401 vzalloc(array_size(sizeof(*umem->odp_data->dma_list),
402 ib_umem_num_pages(umem)));
403 if (!umem->odp_data->dma_list) {
410 * When using MMU notifiers, we will get a
411 * notification before the "current" task (and MM) is
412 * destroyed. We use the umem_rwsem semaphore to synchronize.
414 down_write(&context->umem_rwsem);
415 context->odp_mrs_count++;
416 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
417 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
418 &context->umem_tree);
419 if (likely(!atomic_read(&context->notifier_count)) ||
420 context->odp_mrs_count == 1)
421 umem->odp_data->mn_counters_active = true;
423 list_add(&umem->odp_data->no_private_counters,
424 &context->no_private_counters);
425 downgrade_write(&context->umem_rwsem);
427 if (context->odp_mrs_count == 1) {
429 * Note that at this point, no MMU notifier is running
432 atomic_set(&context->notifier_count, 0);
433 INIT_HLIST_NODE(&context->mn.hlist);
434 context->mn.ops = &ib_umem_notifiers;
436 * Lock-dep detects a false positive for mmap_sem vs.
437 * umem_rwsem, due to not grasping downgrade_write correctly.
440 ret_val = mmu_notifier_register(&context->mn, mm);
443 pr_err("Failed to register mmu_notifier %d\n", ret_val);
449 up_read(&context->umem_rwsem);
452 * Note that doing an mmput can cause a notifier for the relevant mm.
453 * If the notifier is called while we hold the umem_rwsem, this will
454 * cause a deadlock. Therefore, we release the reference only after we
455 * released the semaphore.
461 up_read(&context->umem_rwsem);
462 vfree(umem->odp_data->dma_list);
464 vfree(umem->odp_data->page_list);
466 kfree(umem->odp_data);
472 void ib_umem_odp_release(struct ib_umem *umem)
474 struct ib_ucontext *context = umem->context;
477 * Ensure that no more pages are mapped in the umem.
479 * It is the driver's responsibility to ensure, before calling us,
480 * that the hardware will not attempt to access the MR any more.
482 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
485 down_write(&context->umem_rwsem);
486 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
487 rbt_ib_umem_remove(&umem->odp_data->interval_tree,
488 &context->umem_tree);
489 context->odp_mrs_count--;
490 if (!umem->odp_data->mn_counters_active) {
491 list_del(&umem->odp_data->no_private_counters);
492 complete_all(&umem->odp_data->notifier_completion);
496 * Downgrade the lock to a read lock. This ensures that the notifiers
497 * (who lock the mutex for reading) will be able to finish, and we
498 * will be able to enventually obtain the mmu notifiers SRCU. Note
499 * that since we are doing it atomically, no other user could register
500 * and unregister while we do the check.
502 downgrade_write(&context->umem_rwsem);
503 if (!context->odp_mrs_count) {
504 struct task_struct *owning_process = NULL;
505 struct mm_struct *owning_mm = NULL;
507 owning_process = get_pid_task(context->tgid,
509 if (owning_process == NULL)
511 * The process is already dead, notifier were removed
516 owning_mm = get_task_mm(owning_process);
517 if (owning_mm == NULL)
519 * The process' mm is already dead, notifier were
523 mmu_notifier_unregister(&context->mn, owning_mm);
528 put_task_struct(owning_process);
531 up_read(&context->umem_rwsem);
533 vfree(umem->odp_data->dma_list);
534 vfree(umem->odp_data->page_list);
535 kfree(umem->odp_data);
540 * Map for DMA and insert a single page into the on-demand paging page tables.
542 * @umem: the umem to insert the page to.
543 * @page_index: index in the umem to add the page to.
544 * @page: the page struct to map and add.
545 * @access_mask: access permissions needed for this page.
546 * @current_seq: sequence number for synchronization with invalidations.
547 * the sequence number is taken from
548 * umem->odp_data->notifiers_seq.
550 * The function returns -EFAULT if the DMA mapping operation fails. It returns
551 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
553 * The page is released via put_page even if the operation failed. For
554 * on-demand pinning, the page is released whenever it isn't stored in the
557 static int ib_umem_odp_map_dma_single_page(
558 struct ib_umem *umem,
562 unsigned long current_seq)
564 struct ib_device *dev = umem->context->device;
567 int remove_existing_mapping = 0;
571 * Note: we avoid writing if seq is different from the initial seq, to
572 * handle case of a racing notifier. This check also allows us to bail
573 * early if we have a notifier running in parallel with us.
575 if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
579 if (!(umem->odp_data->dma_list[page_index])) {
580 dma_addr = ib_dma_map_page(dev,
582 0, BIT(umem->page_shift),
584 if (ib_dma_mapping_error(dev, dma_addr)) {
588 umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
589 umem->odp_data->page_list[page_index] = page;
592 } else if (umem->odp_data->page_list[page_index] == page) {
593 umem->odp_data->dma_list[page_index] |= access_mask;
595 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
596 umem->odp_data->page_list[page_index], page);
597 /* Better remove the mapping now, to prevent any further
599 remove_existing_mapping = 1;
603 /* On Demand Paging - avoid pinning the page */
604 if (umem->context->invalidate_range || !stored_page)
607 if (remove_existing_mapping && umem->context->invalidate_range) {
608 invalidate_page_trampoline(
610 ib_umem_start(umem) + (page_index >> umem->page_shift),
611 ib_umem_start(umem) + ((page_index + 1) >>
621 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
623 * Pins the range of pages passed in the argument, and maps them to
624 * DMA addresses. The DMA addresses of the mapped pages is updated in
625 * umem->odp_data->dma_list.
627 * Returns the number of pages mapped in success, negative error code
629 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
630 * the function from completing its task.
631 * An -ENOENT error code indicates that userspace process is being terminated
632 * and mm was already destroyed.
633 * @umem: the umem to map and pin
634 * @user_virt: the address from which we need to map.
635 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
636 * bigger due to alignment, and may also be smaller in case of an error
637 * pinning or mapping a page. The actual pages mapped is returned in
639 * @access_mask: bit mask of the requested access permissions for the given
641 * @current_seq: the MMU notifiers sequance value for synchronization with
642 * invalidations. the sequance number is read from
643 * umem->odp_data->notifiers_seq before calling this function
645 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
646 u64 access_mask, unsigned long current_seq)
648 struct task_struct *owning_process = NULL;
649 struct mm_struct *owning_mm = NULL;
650 struct page **local_page_list = NULL;
652 int j, k, ret = 0, start_idx, npages = 0, page_shift;
653 unsigned int flags = 0;
656 if (access_mask == 0)
659 if (user_virt < ib_umem_start(umem) ||
660 user_virt + bcnt > ib_umem_end(umem))
663 local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
664 if (!local_page_list)
667 page_shift = umem->page_shift;
668 page_mask = ~(BIT(page_shift) - 1);
669 off = user_virt & (~page_mask);
670 user_virt = user_virt & page_mask;
671 bcnt += off; /* Charge for the first page offset as well. */
673 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
674 if (owning_process == NULL) {
679 owning_mm = get_task_mm(owning_process);
680 if (owning_mm == NULL) {
685 if (access_mask & ODP_WRITE_ALLOWED_BIT)
688 start_idx = (user_virt - ib_umem_start(umem)) >> page_shift;
692 const size_t gup_num_pages = min_t(size_t,
693 ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
694 PAGE_SIZE / sizeof(struct page *));
696 down_read(&owning_mm->mmap_sem);
698 * Note: this might result in redundent page getting. We can
699 * avoid this by checking dma_list to be 0 before calling
700 * get_user_pages. However, this make the code much more
701 * complex (and doesn't gain us much performance in most use
704 npages = get_user_pages_remote(owning_process, owning_mm,
705 user_virt, gup_num_pages,
706 flags, local_page_list, NULL, NULL);
707 up_read(&owning_mm->mmap_sem);
712 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
713 mutex_lock(&umem->odp_data->umem_mutex);
714 for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
715 if (user_virt & ~page_mask) {
717 if (page_to_phys(local_page_list[j]) != p) {
721 put_page(local_page_list[j]);
725 ret = ib_umem_odp_map_dma_single_page(
726 umem, k, local_page_list[j],
727 access_mask, current_seq);
731 p = page_to_phys(local_page_list[j]);
734 mutex_unlock(&umem->odp_data->umem_mutex);
737 /* Release left over pages when handling errors. */
738 for (++j; j < npages; ++j)
739 put_page(local_page_list[j]);
745 if (npages < 0 && k == start_idx)
753 put_task_struct(owning_process);
755 free_page((unsigned long)local_page_list);
758 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
760 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
765 struct ib_device *dev = umem->context->device;
767 virt = max_t(u64, virt, ib_umem_start(umem));
768 bound = min_t(u64, bound, ib_umem_end(umem));
769 /* Note that during the run of this function, the
770 * notifiers_count of the MR is > 0, preventing any racing
771 * faults from completion. We might be racing with other
772 * invalidations, so we must make sure we free each page only
774 mutex_lock(&umem->odp_data->umem_mutex);
775 for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
776 idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
777 if (umem->odp_data->page_list[idx]) {
778 struct page *page = umem->odp_data->page_list[idx];
779 dma_addr_t dma = umem->odp_data->dma_list[idx];
780 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
784 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
786 if (dma & ODP_WRITE_ALLOWED_BIT) {
787 struct page *head_page = compound_head(page);
789 * set_page_dirty prefers being called with
790 * the page lock. However, MMU notifiers are
791 * called sometimes with and sometimes without
792 * the lock. We rely on the umem_mutex instead
793 * to prevent other mmu notifiers from
794 * continuing and allowing the page mapping to
797 set_page_dirty(head_page);
799 /* on demand pinning support */
800 if (!umem->context->invalidate_range)
802 umem->odp_data->page_list[idx] = NULL;
803 umem->odp_data->dma_list[idx] = 0;
807 mutex_unlock(&umem->odp_data->umem_mutex);
809 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
811 /* @last is not a part of the interval. See comment for function
814 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
821 struct umem_odp_node *node, *next;
822 struct ib_umem_odp *umem;
824 if (unlikely(start == last))
827 for (node = rbt_ib_umem_iter_first(root, start, last - 1);
829 /* TODO move the blockable decision up to the callback */
832 next = rbt_ib_umem_iter_next(node, start, last - 1);
833 umem = container_of(node, struct ib_umem_odp, interval_tree);
834 ret_val = cb(umem->umem, start, last, cookie) || ret_val;
839 EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
841 struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
842 u64 addr, u64 length)
844 struct umem_odp_node *node;
846 node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
848 return container_of(node, struct ib_umem_odp, interval_tree);
852 EXPORT_SYMBOL(rbt_ib_umem_lookup);