2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <rdma/ib_umem.h>
51 #include <rdma/rdma_vt.h>
57 * rvt_driver_mr_init - Init MR resources per driver
58 * @rdi: rvt dev struct
60 * Do any intilization needed when a driver registers with rdmavt.
62 * Return: 0 on success or errno on failure
64 int rvt_driver_mr_init(struct rvt_dev_info *rdi)
66 unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
71 * The top hfi1_lkey_table_size bits are used to index the
72 * table. The lower 8 bits can be owned by the user (copied from
73 * the LKEY). The remaining bits act as a generation number or tag.
78 spin_lock_init(&rdi->lkey_table.lock);
80 /* ensure generation is at least 4 bits */
81 if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
82 rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
83 lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
84 rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
85 lkey_table_size = rdi->dparms.lkey_table_size;
87 rdi->lkey_table.max = 1 << lkey_table_size;
88 rdi->lkey_table.shift = 32 - lkey_table_size;
89 lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
90 rdi->lkey_table.table = (struct rvt_mregion __rcu **)
91 vmalloc_node(lk_tab_size, rdi->dparms.node);
92 if (!rdi->lkey_table.table)
95 RCU_INIT_POINTER(rdi->dma_mr, NULL);
96 for (i = 0; i < rdi->lkey_table.max; i++)
97 RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
99 rdi->dparms.props.max_mr = rdi->lkey_table.max;
100 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
105 *rvt_mr_exit: clean up MR
106 *@rdi: rvt dev structure
108 * called when drivers have unregistered or perhaps failed to register with us
110 void rvt_mr_exit(struct rvt_dev_info *rdi)
113 rvt_pr_err(rdi, "DMA MR not null!\n");
115 vfree(rdi->lkey_table.table);
118 static void rvt_deinit_mregion(struct rvt_mregion *mr)
125 percpu_ref_exit(&mr->refcount);
128 static void __rvt_mregion_complete(struct percpu_ref *ref)
130 struct rvt_mregion *mr = container_of(ref, struct rvt_mregion,
136 static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
137 int count, unsigned int percpu_flags)
140 struct rvt_dev_info *dev = ib_to_rvt(pd->device);
143 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
145 mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
151 init_completion(&mr->comp);
152 /* count returning the ptr to user */
153 if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete,
154 percpu_flags, GFP_KERNEL))
157 atomic_set(&mr->lkey_invalid, 0);
159 mr->max_segs = count;
162 rvt_deinit_mregion(mr);
167 * rvt_alloc_lkey - allocate an lkey
168 * @mr: memory region that this lkey protects
169 * @dma_region: 0->normal key, 1->restricted DMA key
171 * Returns 0 if successful, otherwise returns -errno.
173 * Increments mr reference count as required.
175 * Sets the lkey field mr for non-dma regions.
178 static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
184 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
185 struct rvt_lkey_table *rkt = &dev->lkey_table;
188 spin_lock_irqsave(&rkt->lock, flags);
190 /* special case for dma_mr lkey == 0 */
192 struct rvt_mregion *tmr;
194 tmr = rcu_access_pointer(dev->dma_mr);
196 mr->lkey_published = 1;
197 /* Insure published written first */
198 rcu_assign_pointer(dev->dma_mr, mr);
204 /* Find the next available LKEY */
208 if (!rcu_access_pointer(rkt->table[r]))
210 r = (r + 1) & (rkt->max - 1);
214 rkt->next = (r + 1) & (rkt->max - 1);
216 * Make sure lkey is never zero which is reserved to indicate an
221 * bits are capped to ensure enough bits for generation number
223 mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
224 ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
230 mr->lkey_published = 1;
231 /* Insure published written first */
232 rcu_assign_pointer(rkt->table[r], mr);
234 spin_unlock_irqrestore(&rkt->lock, flags);
239 spin_unlock_irqrestore(&rkt->lock, flags);
245 * rvt_free_lkey - free an lkey
246 * @mr: mr to free from tables
248 static void rvt_free_lkey(struct rvt_mregion *mr)
253 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
254 struct rvt_lkey_table *rkt = &dev->lkey_table;
257 spin_lock_irqsave(&rkt->lock, flags);
259 if (mr->lkey_published) {
260 mr->lkey_published = 0;
261 /* insure published is written before pointer */
262 rcu_assign_pointer(dev->dma_mr, NULL);
266 if (!mr->lkey_published)
268 r = lkey >> (32 - dev->dparms.lkey_table_size);
269 mr->lkey_published = 0;
270 /* insure published is written before pointer */
271 rcu_assign_pointer(rkt->table[r], NULL);
275 spin_unlock_irqrestore(&rkt->lock, flags);
277 percpu_ref_kill(&mr->refcount);
280 static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
286 /* Allocate struct plus pointers to first level page tables. */
287 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
288 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
292 rval = rvt_init_mregion(&mr->mr, pd, count, 0);
296 * ib_reg_phys_mr() will initialize mr->ibmr except for
299 rval = rvt_alloc_lkey(&mr->mr, 0);
302 mr->ibmr.lkey = mr->mr.lkey;
303 mr->ibmr.rkey = mr->mr.lkey;
308 rvt_deinit_mregion(&mr->mr);
315 static void __rvt_free_mr(struct rvt_mr *mr)
317 rvt_free_lkey(&mr->mr);
318 rvt_deinit_mregion(&mr->mr);
323 * rvt_get_dma_mr - get a DMA memory region
324 * @pd: protection domain for this memory region
327 * Return: the memory region on success, otherwise returns an errno.
328 * Note that all DMA addresses should be created via the functions in
329 * struct dma_virt_ops.
331 struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
337 if (ibpd_to_rvtpd(pd)->user)
338 return ERR_PTR(-EPERM);
340 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
342 ret = ERR_PTR(-ENOMEM);
346 rval = rvt_init_mregion(&mr->mr, pd, 0, 0);
352 rval = rvt_alloc_lkey(&mr->mr, 1);
358 mr->mr.access_flags = acc;
364 rvt_deinit_mregion(&mr->mr);
371 * rvt_reg_user_mr - register a userspace memory region
372 * @pd: protection domain for this memory region
373 * @start: starting userspace address
374 * @length: length of region to register
375 * @mr_access_flags: access flags for this memory region
376 * @udata: unused by the driver
378 * Return: the memory region on success, otherwise returns an errno.
380 struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
381 u64 virt_addr, int mr_access_flags,
382 struct ib_udata *udata)
385 struct ib_umem *umem;
386 struct scatterlist *sg;
391 return ERR_PTR(-EINVAL);
393 umem = ib_umem_get(pd->uobject->context, start, length,
400 mr = __rvt_alloc_mr(n, pd);
402 ret = (struct ib_mr *)mr;
406 mr->mr.user_base = start;
407 mr->mr.iova = virt_addr;
408 mr->mr.length = length;
409 mr->mr.offset = ib_umem_offset(umem);
410 mr->mr.access_flags = mr_access_flags;
413 mr->mr.page_shift = umem->page_shift;
416 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
419 vaddr = page_address(sg_page(sg));
421 ret = ERR_PTR(-EINVAL);
424 mr->mr.map[m]->segs[n].vaddr = vaddr;
425 mr->mr.map[m]->segs[n].length = BIT(umem->page_shift);
426 trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr,
427 BIT(umem->page_shift));
429 if (n == RVT_SEGSZ) {
440 ib_umem_release(umem);
446 * rvt_dereg_clean_qp_cb - callback from iterator
448 * @v - the mregion (as u64)
450 * This routine fields the callback for all QPs and
451 * for QPs in the same PD as the MR will call the
452 * rvt_qp_mr_clean() to potentially cleanup references.
454 static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v)
456 struct rvt_mregion *mr = (struct rvt_mregion *)v;
458 /* skip PDs that are not ours */
459 if (mr->pd != qp->ibqp.pd)
461 rvt_qp_mr_clean(qp, mr->lkey);
465 * rvt_dereg_clean_qps - find QPs for reference cleanup
466 * @mr - the MR that is being deregistered
468 * This routine iterates RC QPs looking for references
469 * to the lkey noted in mr.
471 static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
473 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
475 rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb);
479 * rvt_check_refs - check references
481 * @t - the caller identification
483 * This routine checks MRs holding a reference during
484 * when being de-registered.
486 * If the count is non-zero, the code calls a clean routine then
487 * waits for the timeout for the count to zero.
489 static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
491 unsigned long timeout;
492 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
496 rvt_dereg_clean_qps(mr);
497 /* @mr was indexed on rcu protected @lkey_table */
501 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
504 "%s timeout mr %p pd %p lkey %x refcount %ld\n",
505 t, mr, mr->pd, mr->lkey,
506 atomic_long_read(&mr->refcount.count));
514 * rvt_mr_has_lkey - is MR
518 bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
520 return mr && lkey == mr->lkey;
524 * rvt_ss_has_lkey - is mr in sge tests
525 * @ss - the sge state
528 * This code tests for an MR in the indicated
531 bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
539 rval = rvt_mr_has_lkey(ss->sge.mr, lkey);
541 for (i = 0; !rval && i < ss->num_sge - 1; i++)
542 rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey);
547 * rvt_dereg_mr - unregister and free a memory region
548 * @ibmr: the memory region to free
551 * Note that this is called to free MRs created by rvt_get_dma_mr()
552 * or rvt_reg_user_mr().
554 * Returns 0 on success.
556 int rvt_dereg_mr(struct ib_mr *ibmr)
558 struct rvt_mr *mr = to_imr(ibmr);
561 rvt_free_lkey(&mr->mr);
563 rvt_put_mr(&mr->mr); /* will set completion if last */
564 ret = rvt_check_refs(&mr->mr, __func__);
567 rvt_deinit_mregion(&mr->mr);
569 ib_umem_release(mr->umem);
576 * rvt_alloc_mr - Allocate a memory region usable with the
577 * @pd: protection domain for this memory region
578 * @mr_type: mem region type
579 * @max_num_sg: Max number of segments allowed
581 * Return: the memory region on success, otherwise return an errno.
583 struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
584 enum ib_mr_type mr_type,
589 if (mr_type != IB_MR_TYPE_MEM_REG)
590 return ERR_PTR(-EINVAL);
592 mr = __rvt_alloc_mr(max_num_sg, pd);
594 return (struct ib_mr *)mr;
600 * rvt_set_page - page assignment function called by ib_sg_to_pages
601 * @ibmr: memory region
602 * @addr: dma address of mapped page
604 * Return: 0 on success
606 static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
608 struct rvt_mr *mr = to_imr(ibmr);
609 u32 ps = 1 << mr->mr.page_shift;
610 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
613 if (unlikely(mapped_segs == mr->mr.max_segs))
616 m = mapped_segs / RVT_SEGSZ;
617 n = mapped_segs % RVT_SEGSZ;
618 mr->mr.map[m]->segs[n].vaddr = (void *)addr;
619 mr->mr.map[m]->segs[n].length = ps;
620 trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
627 * rvt_map_mr_sg - map sg list and set it the memory region
628 * @ibmr: memory region
629 * @sg: dma mapped scatterlist
630 * @sg_nents: number of entries in sg
631 * @sg_offset: offset in bytes into sg
633 * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
635 * Return: number of sg elements mapped to the memory region
637 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
638 int sg_nents, unsigned int *sg_offset)
640 struct rvt_mr *mr = to_imr(ibmr);
644 mr->mr.page_shift = PAGE_SHIFT;
645 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
646 mr->mr.user_base = ibmr->iova;
647 mr->mr.iova = ibmr->iova;
648 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
649 mr->mr.length = (size_t)ibmr->length;
654 * rvt_fast_reg_mr - fast register physical MR
655 * @qp: the queue pair where the work request comes from
656 * @ibmr: the memory region to be registered
657 * @key: updated key for this memory region
658 * @access: access flags for this memory region
660 * Returns 0 on success.
662 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
665 struct rvt_mr *mr = to_imr(ibmr);
667 if (qp->ibqp.pd != mr->mr.pd)
670 /* not applicable to dma MR or user MR */
671 if (!mr->mr.lkey || mr->umem)
674 if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00))
680 mr->mr.access_flags = access;
681 mr->mr.iova = ibmr->iova;
682 atomic_set(&mr->mr.lkey_invalid, 0);
686 EXPORT_SYMBOL(rvt_fast_reg_mr);
689 * rvt_invalidate_rkey - invalidate an MR rkey
690 * @qp: queue pair associated with the invalidate op
691 * @rkey: rkey to invalidate
693 * Returns 0 on success.
695 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey)
697 struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
698 struct rvt_lkey_table *rkt = &dev->lkey_table;
699 struct rvt_mregion *mr;
705 mr = rcu_dereference(
706 rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
707 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
710 atomic_set(&mr->lkey_invalid, 1);
718 EXPORT_SYMBOL(rvt_invalidate_rkey);
721 * rvt_alloc_fmr - allocate a fast memory region
722 * @pd: the protection domain for this memory region
723 * @mr_access_flags: access flags for this memory region
724 * @fmr_attr: fast memory region attributes
726 * Return: the memory region on success, otherwise returns an errno.
728 struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
729 struct ib_fmr_attr *fmr_attr)
736 /* Allocate struct plus pointers to first level page tables. */
737 m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
738 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
742 rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages,
743 PERCPU_REF_INIT_ATOMIC);
748 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
751 rval = rvt_alloc_lkey(&fmr->mr, 0);
754 fmr->ibfmr.rkey = fmr->mr.lkey;
755 fmr->ibfmr.lkey = fmr->mr.lkey;
757 * Resources are allocated but no valid mapping (RKEY can't be
760 fmr->mr.access_flags = mr_access_flags;
761 fmr->mr.max_segs = fmr_attr->max_pages;
762 fmr->mr.page_shift = fmr_attr->page_shift;
769 rvt_deinit_mregion(&fmr->mr);
777 * rvt_map_phys_fmr - set up a fast memory region
778 * @ibmfr: the fast memory region to set up
779 * @page_list: the list of pages to associate with the fast memory region
780 * @list_len: the number of pages to associate with the fast memory region
781 * @iova: the virtual address of the start of the fast memory region
783 * This may be called from interrupt context.
785 * Return: 0 on success
788 int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
789 int list_len, u64 iova)
791 struct rvt_fmr *fmr = to_ifmr(ibfmr);
792 struct rvt_lkey_table *rkt;
797 struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
799 i = atomic_long_read(&fmr->mr.refcount.count);
803 if (list_len > fmr->mr.max_segs)
806 rkt = &rdi->lkey_table;
807 spin_lock_irqsave(&rkt->lock, flags);
808 fmr->mr.user_base = iova;
810 ps = 1 << fmr->mr.page_shift;
811 fmr->mr.length = list_len * ps;
814 for (i = 0; i < list_len; i++) {
815 fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
816 fmr->mr.map[m]->segs[n].length = ps;
817 trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
818 if (++n == RVT_SEGSZ) {
823 spin_unlock_irqrestore(&rkt->lock, flags);
828 * rvt_unmap_fmr - unmap fast memory regions
829 * @fmr_list: the list of fast memory regions to unmap
831 * Return: 0 on success.
833 int rvt_unmap_fmr(struct list_head *fmr_list)
836 struct rvt_lkey_table *rkt;
838 struct rvt_dev_info *rdi;
840 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
841 rdi = ib_to_rvt(fmr->ibfmr.device);
842 rkt = &rdi->lkey_table;
843 spin_lock_irqsave(&rkt->lock, flags);
844 fmr->mr.user_base = 0;
847 spin_unlock_irqrestore(&rkt->lock, flags);
853 * rvt_dealloc_fmr - deallocate a fast memory region
854 * @ibfmr: the fast memory region to deallocate
856 * Return: 0 on success.
858 int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
860 struct rvt_fmr *fmr = to_ifmr(ibfmr);
863 rvt_free_lkey(&fmr->mr);
864 rvt_put_mr(&fmr->mr); /* will set completion if last */
865 ret = rvt_check_refs(&fmr->mr, __func__);
868 rvt_deinit_mregion(&fmr->mr);
875 * rvt_sge_adjacent - is isge compressible
876 * @last_sge: last outgoing SGE written
879 * If adjacent will update last_sge to add length.
881 * Return: true if isge is adjacent to last sge
883 static inline bool rvt_sge_adjacent(struct rvt_sge *last_sge,
886 if (last_sge && sge->lkey == last_sge->mr->lkey &&
887 ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) {
889 if (unlikely((sge->addr - last_sge->mr->user_base +
890 sge->length > last_sge->mr->length)))
891 return false; /* overrun, caller will catch */
893 last_sge->length += sge->length;
895 last_sge->sge_length += sge->length;
896 trace_rvt_sge_adjacent(last_sge, sge);
903 * rvt_lkey_ok - check IB SGE for validity and initialize
904 * @rkt: table containing lkey to check SGE against
905 * @pd: protection domain
906 * @isge: outgoing internal SGE
907 * @last_sge: last outgoing SGE written
911 * Check the IB SGE for validity and initialize our internal version
914 * Increments the reference count when a new sge is stored.
916 * Return: 0 if compressed, 1 if added , otherwise returns -errno.
918 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
919 struct rvt_sge *isge, struct rvt_sge *last_sge,
920 struct ib_sge *sge, int acc)
922 struct rvt_mregion *mr;
927 * We use LKEY == zero for kernel virtual addresses
928 * (see rvt_get_dma_mr() and dma_virt_ops).
930 if (sge->lkey == 0) {
931 struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
935 if (rvt_sge_adjacent(last_sge, sge))
938 mr = rcu_dereference(dev->dma_mr);
945 isge->vaddr = (void *)sge->addr;
946 isge->length = sge->length;
947 isge->sge_length = sge->length;
952 if (rvt_sge_adjacent(last_sge, sge))
955 mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
959 if (!READ_ONCE(mr->lkey_published))
962 if (unlikely(atomic_read(&mr->lkey_invalid) ||
963 mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
966 off = sge->addr - mr->user_base;
967 if (unlikely(sge->addr < mr->user_base ||
968 off + sge->length > mr->length ||
969 (mr->access_flags & acc) != acc))
974 if (mr->page_shift) {
976 * page sizes are uniform power of 2 so no loop is necessary
977 * entries_spanned_by_off is the number of times the loop below
978 * would have executed.
980 size_t entries_spanned_by_off;
982 entries_spanned_by_off = off >> mr->page_shift;
983 off -= (entries_spanned_by_off << mr->page_shift);
984 m = entries_spanned_by_off / RVT_SEGSZ;
985 n = entries_spanned_by_off % RVT_SEGSZ;
989 while (off >= mr->map[m]->segs[n].length) {
990 off -= mr->map[m]->segs[n].length;
992 if (n >= RVT_SEGSZ) {
999 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
1000 isge->length = mr->map[m]->segs[n].length - off;
1001 isge->sge_length = sge->length;
1005 trace_rvt_sge_new(isge, sge);
1013 EXPORT_SYMBOL(rvt_lkey_ok);
1016 * rvt_rkey_ok - check the IB virtual address, length, and RKEY
1017 * @qp: qp for validation
1019 * @len: length of data
1020 * @vaddr: virtual address to place data
1021 * @rkey: rkey to check
1022 * @acc: access flags
1024 * Return: 1 if successful, otherwise 0.
1026 * increments the reference count upon success
1028 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
1029 u32 len, u64 vaddr, u32 rkey, int acc)
1031 struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
1032 struct rvt_lkey_table *rkt = &dev->lkey_table;
1033 struct rvt_mregion *mr;
1038 * We use RKEY == zero for kernel virtual addresses
1039 * (see rvt_get_dma_mr() and dma_virt_ops).
1043 struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
1044 struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
1048 mr = rcu_dereference(rdi->dma_mr);
1055 sge->vaddr = (void *)vaddr;
1057 sge->sge_length = len;
1063 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
1067 /* insure mr read is before test */
1068 if (!READ_ONCE(mr->lkey_published))
1070 if (unlikely(atomic_read(&mr->lkey_invalid) ||
1071 mr->lkey != rkey || qp->ibqp.pd != mr->pd))
1074 off = vaddr - mr->iova;
1075 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
1076 (mr->access_flags & acc) == 0))
1081 if (mr->page_shift) {
1083 * page sizes are uniform power of 2 so no loop is necessary
1084 * entries_spanned_by_off is the number of times the loop below
1085 * would have executed.
1087 size_t entries_spanned_by_off;
1089 entries_spanned_by_off = off >> mr->page_shift;
1090 off -= (entries_spanned_by_off << mr->page_shift);
1091 m = entries_spanned_by_off / RVT_SEGSZ;
1092 n = entries_spanned_by_off % RVT_SEGSZ;
1096 while (off >= mr->map[m]->segs[n].length) {
1097 off -= mr->map[m]->segs[n].length;
1099 if (n >= RVT_SEGSZ) {
1106 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
1107 sge->length = mr->map[m]->segs[n].length - off;
1108 sge->sge_length = len;
1119 EXPORT_SYMBOL(rvt_rkey_ok);