1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2016 Intel Corporation.
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <rdma/ib_umem.h>
9 #include <rdma/rdma_vt.h>
15 * rvt_driver_mr_init - Init MR resources per driver
16 * @rdi: rvt dev struct
18 * Do any intilization needed when a driver registers with rdmavt.
20 * Return: 0 on success or errno on failure
22 int rvt_driver_mr_init(struct rvt_dev_info *rdi)
24 unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
29 * The top hfi1_lkey_table_size bits are used to index the
30 * table. The lower 8 bits can be owned by the user (copied from
31 * the LKEY). The remaining bits act as a generation number or tag.
36 spin_lock_init(&rdi->lkey_table.lock);
38 /* ensure generation is at least 4 bits */
39 if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
40 rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
41 lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
42 rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
43 lkey_table_size = rdi->dparms.lkey_table_size;
45 rdi->lkey_table.max = 1 << lkey_table_size;
46 rdi->lkey_table.shift = 32 - lkey_table_size;
47 lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
48 rdi->lkey_table.table = (struct rvt_mregion __rcu **)
49 vmalloc_node(lk_tab_size, rdi->dparms.node);
50 if (!rdi->lkey_table.table)
53 RCU_INIT_POINTER(rdi->dma_mr, NULL);
54 for (i = 0; i < rdi->lkey_table.max; i++)
55 RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
57 rdi->dparms.props.max_mr = rdi->lkey_table.max;
62 * rvt_mr_exit - clean up MR
63 * @rdi: rvt dev structure
65 * called when drivers have unregistered or perhaps failed to register with us
67 void rvt_mr_exit(struct rvt_dev_info *rdi)
70 rvt_pr_err(rdi, "DMA MR not null!\n");
72 vfree(rdi->lkey_table.table);
75 static void rvt_deinit_mregion(struct rvt_mregion *mr)
82 percpu_ref_exit(&mr->refcount);
85 static void __rvt_mregion_complete(struct percpu_ref *ref)
87 struct rvt_mregion *mr = container_of(ref, struct rvt_mregion,
93 static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
94 int count, unsigned int percpu_flags)
97 struct rvt_dev_info *dev = ib_to_rvt(pd->device);
100 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
102 mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
108 init_completion(&mr->comp);
109 /* count returning the ptr to user */
110 if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete,
111 percpu_flags, GFP_KERNEL))
114 atomic_set(&mr->lkey_invalid, 0);
116 mr->max_segs = count;
119 rvt_deinit_mregion(mr);
124 * rvt_alloc_lkey - allocate an lkey
125 * @mr: memory region that this lkey protects
126 * @dma_region: 0->normal key, 1->restricted DMA key
128 * Returns 0 if successful, otherwise returns -errno.
130 * Increments mr reference count as required.
132 * Sets the lkey field mr for non-dma regions.
135 static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
141 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
142 struct rvt_lkey_table *rkt = &dev->lkey_table;
145 spin_lock_irqsave(&rkt->lock, flags);
147 /* special case for dma_mr lkey == 0 */
149 struct rvt_mregion *tmr;
151 tmr = rcu_access_pointer(dev->dma_mr);
153 mr->lkey_published = 1;
154 /* Insure published written first */
155 rcu_assign_pointer(dev->dma_mr, mr);
161 /* Find the next available LKEY */
165 if (!rcu_access_pointer(rkt->table[r]))
167 r = (r + 1) & (rkt->max - 1);
171 rkt->next = (r + 1) & (rkt->max - 1);
173 * Make sure lkey is never zero which is reserved to indicate an
178 * bits are capped to ensure enough bits for generation number
180 mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
181 ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
187 mr->lkey_published = 1;
188 /* Insure published written first */
189 rcu_assign_pointer(rkt->table[r], mr);
191 spin_unlock_irqrestore(&rkt->lock, flags);
196 spin_unlock_irqrestore(&rkt->lock, flags);
202 * rvt_free_lkey - free an lkey
203 * @mr: mr to free from tables
205 static void rvt_free_lkey(struct rvt_mregion *mr)
210 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
211 struct rvt_lkey_table *rkt = &dev->lkey_table;
214 spin_lock_irqsave(&rkt->lock, flags);
216 if (mr->lkey_published) {
217 mr->lkey_published = 0;
218 /* insure published is written before pointer */
219 rcu_assign_pointer(dev->dma_mr, NULL);
223 if (!mr->lkey_published)
225 r = lkey >> (32 - dev->dparms.lkey_table_size);
226 mr->lkey_published = 0;
227 /* insure published is written before pointer */
228 rcu_assign_pointer(rkt->table[r], NULL);
232 spin_unlock_irqrestore(&rkt->lock, flags);
234 percpu_ref_kill(&mr->refcount);
237 static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
243 /* Allocate struct plus pointers to first level page tables. */
244 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
245 mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL);
249 rval = rvt_init_mregion(&mr->mr, pd, count, 0);
253 * ib_reg_phys_mr() will initialize mr->ibmr except for
256 rval = rvt_alloc_lkey(&mr->mr, 0);
259 mr->ibmr.lkey = mr->mr.lkey;
260 mr->ibmr.rkey = mr->mr.lkey;
265 rvt_deinit_mregion(&mr->mr);
272 static void __rvt_free_mr(struct rvt_mr *mr)
274 rvt_free_lkey(&mr->mr);
275 rvt_deinit_mregion(&mr->mr);
280 * rvt_get_dma_mr - get a DMA memory region
281 * @pd: protection domain for this memory region
284 * Return: the memory region on success, otherwise returns an errno.
286 struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
292 if (ibpd_to_rvtpd(pd)->user)
293 return ERR_PTR(-EPERM);
295 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
297 ret = ERR_PTR(-ENOMEM);
301 rval = rvt_init_mregion(&mr->mr, pd, 0, 0);
307 rval = rvt_alloc_lkey(&mr->mr, 1);
313 mr->mr.access_flags = acc;
319 rvt_deinit_mregion(&mr->mr);
326 * rvt_reg_user_mr - register a userspace memory region
327 * @pd: protection domain for this memory region
328 * @start: starting userspace address
329 * @length: length of region to register
330 * @virt_addr: associated virtual address
331 * @mr_access_flags: access flags for this memory region
332 * @udata: unused by the driver
334 * Return: the memory region on success, otherwise returns an errno.
336 struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
337 u64 virt_addr, int mr_access_flags,
338 struct ib_udata *udata)
341 struct ib_umem *umem;
342 struct sg_page_iter sg_iter;
347 return ERR_PTR(-EINVAL);
349 umem = ib_umem_get(pd->device, start, length, mr_access_flags);
353 n = ib_umem_num_pages(umem);
355 mr = __rvt_alloc_mr(n, pd);
357 ret = (struct ib_mr *)mr;
361 mr->mr.user_base = start;
362 mr->mr.iova = virt_addr;
363 mr->mr.length = length;
364 mr->mr.offset = ib_umem_offset(umem);
365 mr->mr.access_flags = mr_access_flags;
368 mr->mr.page_shift = PAGE_SHIFT;
371 for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
374 vaddr = page_address(sg_page_iter_page(&sg_iter));
376 ret = ERR_PTR(-EINVAL);
379 mr->mr.map[m]->segs[n].vaddr = vaddr;
380 mr->mr.map[m]->segs[n].length = PAGE_SIZE;
381 trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
382 if (++n == RVT_SEGSZ) {
393 ib_umem_release(umem);
399 * rvt_dereg_clean_qp_cb - callback from iterator
401 * @v: the mregion (as u64)
403 * This routine fields the callback for all QPs and
404 * for QPs in the same PD as the MR will call the
405 * rvt_qp_mr_clean() to potentially cleanup references.
407 static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v)
409 struct rvt_mregion *mr = (struct rvt_mregion *)v;
411 /* skip PDs that are not ours */
412 if (mr->pd != qp->ibqp.pd)
414 rvt_qp_mr_clean(qp, mr->lkey);
418 * rvt_dereg_clean_qps - find QPs for reference cleanup
419 * @mr: the MR that is being deregistered
421 * This routine iterates RC QPs looking for references
422 * to the lkey noted in mr.
424 static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
426 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
428 rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb);
432 * rvt_check_refs - check references
434 * @t: the caller identification
436 * This routine checks MRs holding a reference during
437 * when being de-registered.
439 * If the count is non-zero, the code calls a clean routine then
440 * waits for the timeout for the count to zero.
442 static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
444 unsigned long timeout;
445 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
449 rvt_dereg_clean_qps(mr);
450 /* @mr was indexed on rcu protected @lkey_table */
454 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
457 "%s timeout mr %p pd %p lkey %x refcount %ld\n",
458 t, mr, mr->pd, mr->lkey,
459 atomic_long_read(&mr->refcount.data->count));
467 * rvt_mr_has_lkey - is MR
471 bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
473 return mr && lkey == mr->lkey;
477 * rvt_ss_has_lkey - is mr in sge tests
481 * This code tests for an MR in the indicated
484 bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
492 rval = rvt_mr_has_lkey(ss->sge.mr, lkey);
494 for (i = 0; !rval && i < ss->num_sge - 1; i++)
495 rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey);
500 * rvt_dereg_mr - unregister and free a memory region
501 * @ibmr: the memory region to free
502 * @udata: unused by the driver
504 * Note that this is called to free MRs created by rvt_get_dma_mr()
505 * or rvt_reg_user_mr().
507 * Returns 0 on success.
509 int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
511 struct rvt_mr *mr = to_imr(ibmr);
514 rvt_free_lkey(&mr->mr);
516 rvt_put_mr(&mr->mr); /* will set completion if last */
517 ret = rvt_check_refs(&mr->mr, __func__);
520 rvt_deinit_mregion(&mr->mr);
521 ib_umem_release(mr->umem);
528 * rvt_alloc_mr - Allocate a memory region usable with the
529 * @pd: protection domain for this memory region
530 * @mr_type: mem region type
531 * @max_num_sg: Max number of segments allowed
533 * Return: the memory region on success, otherwise return an errno.
535 struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
540 if (mr_type != IB_MR_TYPE_MEM_REG)
541 return ERR_PTR(-EINVAL);
543 mr = __rvt_alloc_mr(max_num_sg, pd);
545 return (struct ib_mr *)mr;
551 * rvt_set_page - page assignment function called by ib_sg_to_pages
552 * @ibmr: memory region
553 * @addr: dma address of mapped page
555 * Return: 0 on success
557 static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
559 struct rvt_mr *mr = to_imr(ibmr);
560 u32 ps = 1 << mr->mr.page_shift;
561 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
564 if (unlikely(mapped_segs == mr->mr.max_segs))
567 m = mapped_segs / RVT_SEGSZ;
568 n = mapped_segs % RVT_SEGSZ;
569 mr->mr.map[m]->segs[n].vaddr = (void *)addr;
570 mr->mr.map[m]->segs[n].length = ps;
572 trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
578 * rvt_map_mr_sg - map sg list and set it the memory region
579 * @ibmr: memory region
580 * @sg: dma mapped scatterlist
581 * @sg_nents: number of entries in sg
582 * @sg_offset: offset in bytes into sg
584 * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
586 * Return: number of sg elements mapped to the memory region
588 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
589 int sg_nents, unsigned int *sg_offset)
591 struct rvt_mr *mr = to_imr(ibmr);
595 mr->mr.page_shift = PAGE_SHIFT;
596 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
597 mr->mr.user_base = ibmr->iova;
598 mr->mr.iova = ibmr->iova;
599 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
600 mr->mr.length = (size_t)ibmr->length;
601 trace_rvt_map_mr_sg(ibmr, sg_nents, sg_offset);
606 * rvt_fast_reg_mr - fast register physical MR
607 * @qp: the queue pair where the work request comes from
608 * @ibmr: the memory region to be registered
609 * @key: updated key for this memory region
610 * @access: access flags for this memory region
612 * Returns 0 on success.
614 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
617 struct rvt_mr *mr = to_imr(ibmr);
619 if (qp->ibqp.pd != mr->mr.pd)
622 /* not applicable to dma MR or user MR */
623 if (!mr->mr.lkey || mr->umem)
626 if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00))
632 mr->mr.access_flags = access;
633 mr->mr.iova = ibmr->iova;
634 atomic_set(&mr->mr.lkey_invalid, 0);
638 EXPORT_SYMBOL(rvt_fast_reg_mr);
641 * rvt_invalidate_rkey - invalidate an MR rkey
642 * @qp: queue pair associated with the invalidate op
643 * @rkey: rkey to invalidate
645 * Returns 0 on success.
647 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey)
649 struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
650 struct rvt_lkey_table *rkt = &dev->lkey_table;
651 struct rvt_mregion *mr;
657 mr = rcu_dereference(
658 rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
659 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
662 atomic_set(&mr->lkey_invalid, 1);
670 EXPORT_SYMBOL(rvt_invalidate_rkey);
673 * rvt_sge_adjacent - is isge compressible
674 * @last_sge: last outgoing SGE written
677 * If adjacent will update last_sge to add length.
679 * Return: true if isge is adjacent to last sge
681 static inline bool rvt_sge_adjacent(struct rvt_sge *last_sge,
684 if (last_sge && sge->lkey == last_sge->mr->lkey &&
685 ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) {
687 if (unlikely((sge->addr - last_sge->mr->user_base +
688 sge->length > last_sge->mr->length)))
689 return false; /* overrun, caller will catch */
691 last_sge->length += sge->length;
693 last_sge->sge_length += sge->length;
694 trace_rvt_sge_adjacent(last_sge, sge);
701 * rvt_lkey_ok - check IB SGE for validity and initialize
702 * @rkt: table containing lkey to check SGE against
703 * @pd: protection domain
704 * @isge: outgoing internal SGE
705 * @last_sge: last outgoing SGE written
709 * Check the IB SGE for validity and initialize our internal version
712 * Increments the reference count when a new sge is stored.
714 * Return: 0 if compressed, 1 if added , otherwise returns -errno.
716 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
717 struct rvt_sge *isge, struct rvt_sge *last_sge,
718 struct ib_sge *sge, int acc)
720 struct rvt_mregion *mr;
725 * We use LKEY == zero for kernel virtual addresses
726 * (see rvt_get_dma_mr()).
728 if (sge->lkey == 0) {
729 struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
733 if (rvt_sge_adjacent(last_sge, sge))
736 mr = rcu_dereference(dev->dma_mr);
743 isge->vaddr = (void *)sge->addr;
744 isge->length = sge->length;
745 isge->sge_length = sge->length;
750 if (rvt_sge_adjacent(last_sge, sge))
753 mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
757 if (!READ_ONCE(mr->lkey_published))
760 if (unlikely(atomic_read(&mr->lkey_invalid) ||
761 mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
764 off = sge->addr - mr->user_base;
765 if (unlikely(sge->addr < mr->user_base ||
766 off + sge->length > mr->length ||
767 (mr->access_flags & acc) != acc))
772 if (mr->page_shift) {
774 * page sizes are uniform power of 2 so no loop is necessary
775 * entries_spanned_by_off is the number of times the loop below
776 * would have executed.
778 size_t entries_spanned_by_off;
780 entries_spanned_by_off = off >> mr->page_shift;
781 off -= (entries_spanned_by_off << mr->page_shift);
782 m = entries_spanned_by_off / RVT_SEGSZ;
783 n = entries_spanned_by_off % RVT_SEGSZ;
787 while (off >= mr->map[m]->segs[n].length) {
788 off -= mr->map[m]->segs[n].length;
790 if (n >= RVT_SEGSZ) {
797 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
798 isge->length = mr->map[m]->segs[n].length - off;
799 isge->sge_length = sge->length;
803 trace_rvt_sge_new(isge, sge);
811 EXPORT_SYMBOL(rvt_lkey_ok);
814 * rvt_rkey_ok - check the IB virtual address, length, and RKEY
815 * @qp: qp for validation
817 * @len: length of data
818 * @vaddr: virtual address to place data
819 * @rkey: rkey to check
822 * Return: 1 if successful, otherwise 0.
824 * increments the reference count upon success
826 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
827 u32 len, u64 vaddr, u32 rkey, int acc)
829 struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
830 struct rvt_lkey_table *rkt = &dev->lkey_table;
831 struct rvt_mregion *mr;
836 * We use RKEY == zero for kernel virtual addresses
837 * (see rvt_get_dma_mr()).
841 struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
842 struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
846 mr = rcu_dereference(rdi->dma_mr);
853 sge->vaddr = (void *)vaddr;
855 sge->sge_length = len;
861 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
865 /* insure mr read is before test */
866 if (!READ_ONCE(mr->lkey_published))
868 if (unlikely(atomic_read(&mr->lkey_invalid) ||
869 mr->lkey != rkey || qp->ibqp.pd != mr->pd))
872 off = vaddr - mr->iova;
873 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
874 (mr->access_flags & acc) == 0))
879 if (mr->page_shift) {
881 * page sizes are uniform power of 2 so no loop is necessary
882 * entries_spanned_by_off is the number of times the loop below
883 * would have executed.
885 size_t entries_spanned_by_off;
887 entries_spanned_by_off = off >> mr->page_shift;
888 off -= (entries_spanned_by_off << mr->page_shift);
889 m = entries_spanned_by_off / RVT_SEGSZ;
890 n = entries_spanned_by_off % RVT_SEGSZ;
894 while (off >= mr->map[m]->segs[n].length) {
895 off -= mr->map[m]->segs[n].length;
897 if (n >= RVT_SEGSZ) {
904 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
905 sge->length = mr->map[m]->segs[n].length - off;
906 sge->sge_length = len;
917 EXPORT_SYMBOL(rvt_rkey_ok);