2 * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * qib_alloc_lkey - allocate an lkey
38 * @mr: memory region that this lkey protects
39 * @dma_region: 0->normal key, 1->restricted DMA key
41 * Returns 0 if successful, otherwise returns -errno.
43 * Increments mr reference count as required.
45 * Sets the lkey field mr for non-dma regions.
49 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
55 struct qib_ibdev *dev = to_idev(mr->pd->device);
56 struct qib_lkey_table *rkt = &dev->lk_table;
58 spin_lock_irqsave(&rkt->lock, flags);
60 /* special case for dma_mr lkey == 0 */
62 struct qib_mregion *tmr;
64 tmr = rcu_access_pointer(dev->dma_mr);
67 rcu_assign_pointer(dev->dma_mr, mr);
68 mr->lkey_published = 1;
73 /* Find the next available LKEY */
77 if (rkt->table[r] == NULL)
79 r = (r + 1) & (rkt->max - 1);
83 rkt->next = (r + 1) & (rkt->max - 1);
85 * Make sure lkey is never zero which is reserved to indicate an
90 * bits are capped in qib_verbs.c to insure enough bits
91 * for generation number
93 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
94 ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
101 rcu_assign_pointer(rkt->table[r], mr);
102 mr->lkey_published = 1;
104 spin_unlock_irqrestore(&rkt->lock, flags);
108 spin_unlock_irqrestore(&rkt->lock, flags);
114 * qib_free_lkey - free an lkey
115 * @mr: mr to free from tables
117 void qib_free_lkey(struct qib_mregion *mr)
122 struct qib_ibdev *dev = to_idev(mr->pd->device);
123 struct qib_lkey_table *rkt = &dev->lk_table;
125 spin_lock_irqsave(&rkt->lock, flags);
126 if (!mr->lkey_published)
129 RCU_INIT_POINTER(dev->dma_mr, NULL);
131 r = lkey >> (32 - ib_qib_lkey_table_size);
132 RCU_INIT_POINTER(rkt->table[r], NULL);
135 mr->lkey_published = 0;
137 spin_unlock_irqrestore(&rkt->lock, flags);
141 * qib_lkey_ok - check IB SGE for validity and initialize
142 * @rkt: table containing lkey to check SGE against
143 * @pd: protection domain
144 * @isge: outgoing internal SGE
148 * Return 1 if valid and successful, otherwise returns 0.
150 * increments the reference count upon success
152 * Check the IB SGE for validity and initialize our internal version
155 int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
156 struct qib_sge *isge, struct ib_sge *sge, int acc)
158 struct qib_mregion *mr;
163 * We use LKEY == zero for kernel virtual addresses
164 * (see qib_get_dma_mr and qib_dma.c).
167 if (sge->lkey == 0) {
168 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
172 mr = rcu_dereference(dev->dma_mr);
175 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
180 isge->vaddr = (void *) sge->addr;
181 isge->length = sge->length;
182 isge->sge_length = sge->length;
187 mr = rcu_dereference(
188 rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
189 if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
192 off = sge->addr - mr->user_base;
193 if (unlikely(sge->addr < mr->user_base ||
194 off + sge->length > mr->length ||
195 (mr->access_flags & acc) != acc))
197 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
202 if (mr->page_shift) {
204 page sizes are uniform power of 2 so no loop is necessary
205 entries_spanned_by_off is the number of times the loop below
208 size_t entries_spanned_by_off;
210 entries_spanned_by_off = off >> mr->page_shift;
211 off -= (entries_spanned_by_off << mr->page_shift);
212 m = entries_spanned_by_off/QIB_SEGSZ;
213 n = entries_spanned_by_off%QIB_SEGSZ;
217 while (off >= mr->map[m]->segs[n].length) {
218 off -= mr->map[m]->segs[n].length;
220 if (n >= QIB_SEGSZ) {
227 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
228 isge->length = mr->map[m]->segs[n].length - off;
229 isge->sge_length = sge->length;
240 * qib_rkey_ok - check the IB virtual address, length, and RKEY
241 * @qp: qp for validation
243 * @len: length of data
244 * @vaddr: virtual address to place data
245 * @rkey: rkey to check
248 * Return 1 if successful, otherwise 0.
250 * increments the reference count upon success
252 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
253 u32 len, u64 vaddr, u32 rkey, int acc)
255 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
256 struct qib_mregion *mr;
261 * We use RKEY == zero for kernel virtual addresses
262 * (see qib_get_dma_mr and qib_dma.c).
266 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
267 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
271 mr = rcu_dereference(dev->dma_mr);
274 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
279 sge->vaddr = (void *) vaddr;
281 sge->sge_length = len;
287 mr = rcu_dereference(
288 rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
289 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
292 off = vaddr - mr->iova;
293 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
294 (mr->access_flags & acc) == 0))
296 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
301 if (mr->page_shift) {
303 page sizes are uniform power of 2 so no loop is necessary
304 entries_spanned_by_off is the number of times the loop below
307 size_t entries_spanned_by_off;
309 entries_spanned_by_off = off >> mr->page_shift;
310 off -= (entries_spanned_by_off << mr->page_shift);
311 m = entries_spanned_by_off/QIB_SEGSZ;
312 n = entries_spanned_by_off%QIB_SEGSZ;
316 while (off >= mr->map[m]->segs[n].length) {
317 off -= mr->map[m]->segs[n].length;
319 if (n >= QIB_SEGSZ) {
326 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
327 sge->length = mr->map[m]->segs[n].length - off;
328 sge->sge_length = len;
339 * Initialize the memory region specified by the work request.
341 int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr)
343 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
344 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
345 struct qib_mr *mr = to_imr(wr->mr);
346 struct qib_mregion *mrg;
354 spin_lock_irqsave(&rkt->lock, flags);
355 if (pd->user || key == 0)
358 mrg = rcu_dereference_protected(
359 rkt->table[(key >> (32 - ib_qib_lkey_table_size))],
360 lockdep_is_held(&rkt->lock));
361 if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd))
364 if (mr->npages > mrg->max_segs)
367 ps = mr->ibmr.page_size;
368 if (mr->ibmr.length > ps * mr->npages)
371 mrg->user_base = mr->ibmr.iova;
372 mrg->iova = mr->ibmr.iova;
374 mrg->length = mr->ibmr.length;
375 mrg->access_flags = wr->access;
376 page_list = mr->pages;
379 for (i = 0; i < mr->npages; i++) {
380 mrg->map[m]->segs[n].vaddr = (void *) page_list[i];
381 mrg->map[m]->segs[n].length = ps;
382 if (++n == QIB_SEGSZ) {
390 spin_unlock_irqrestore(&rkt->lock, flags);