2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <rdma/ib_umem.h>
36 #include <linux/atomic.h>
41 module_param(use_dsgl, int, 0644);
42 MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=0)");
44 #define T4_ULPTX_MIN_IO 32
45 #define C4IW_MAX_INLINE_SIZE 96
46 #define T4_ULPTX_MAX_DMA 1024
47 #define C4IW_INLINE_THRESHOLD 128
49 static int inline_threshold = C4IW_INLINE_THRESHOLD;
50 module_param(inline_threshold, int, 0644);
51 MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
53 static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
55 return (is_t4(dev->rdev.lldi.adapter_type) ||
56 is_t5(dev->rdev.lldi.adapter_type)) &&
57 length >= 8*1024*1024*1024ULL;
60 static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
61 u32 len, dma_addr_t data, int wait)
64 struct ulp_mem_io *req;
65 struct ulptx_sgl *sgl;
68 struct c4iw_wr_wait wr_wait;
73 c4iw_init_wr_wait(&wr_wait);
74 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
76 skb = alloc_skb(wr_len, GFP_KERNEL);
79 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
81 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
82 memset(req, 0, wr_len);
83 INIT_ULPTX_WR(req, wr_len, 0, 0);
84 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
85 (wait ? FW_WR_COMPL_F : 0));
86 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
87 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
88 req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
89 req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
90 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
91 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
92 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
94 sgl = (struct ulptx_sgl *)(req + 1);
95 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
97 sgl->len0 = cpu_to_be32(len);
98 sgl->addr0 = cpu_to_be64(data);
100 ret = c4iw_ofld_send(rdev, skb);
104 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
108 static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
112 struct ulp_mem_io *req;
113 struct ulptx_idata *sc;
114 u8 wr_len, *to_dp, *from_dp;
115 int copy_len, num_wqe, i, ret = 0;
116 struct c4iw_wr_wait wr_wait;
117 __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
119 if (is_t4(rdev->lldi.adapter_type))
120 cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
122 cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
125 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
126 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
127 c4iw_init_wr_wait(&wr_wait);
128 for (i = 0; i < num_wqe; i++) {
130 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
132 wr_len = roundup(sizeof *req + sizeof *sc +
133 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
135 skb = alloc_skb(wr_len, GFP_KERNEL);
138 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
140 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
141 memset(req, 0, wr_len);
142 INIT_ULPTX_WR(req, wr_len, 0, 0);
144 if (i == (num_wqe-1)) {
145 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
147 req->wr.wr_lo = (__force __be64)(unsigned long)&wr_wait;
149 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
150 req->wr.wr_mid = cpu_to_be32(
151 FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
154 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
155 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
156 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
158 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
160 sc = (struct ulptx_idata *)(req + 1);
161 sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
162 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
164 to_dp = (u8 *)(sc + 1);
165 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
167 memcpy(to_dp, from_dp, copy_len);
169 memset(to_dp, 0, copy_len);
170 if (copy_len % T4_ULPTX_MIN_IO)
171 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
172 (copy_len % T4_ULPTX_MIN_IO));
173 ret = c4iw_ofld_send(rdev, skb);
176 len -= C4IW_MAX_INLINE_SIZE;
179 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
183 static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
191 daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
192 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
196 while (remain > inline_threshold) {
197 if (remain < T4_ULPTX_MAX_DMA) {
198 if (remain & ~T4_ULPTX_MIN_IO)
199 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
203 dmalen = T4_ULPTX_MAX_DMA;
205 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
214 ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
216 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
221 * write len bytes of data into addr (32B aligned address)
222 * If data is NULL, clear len byte of memory to zero.
224 static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
227 if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
228 if (len > inline_threshold) {
229 if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
230 printk_ratelimited(KERN_WARNING
232 " failure (non fatal)\n",
233 pci_name(rdev->lldi.pdev));
234 return _c4iw_write_mem_inline(rdev, addr, len,
239 return _c4iw_write_mem_inline(rdev, addr, len, data);
241 return _c4iw_write_mem_inline(rdev, addr, len, data);
245 * Build and write a TPT entry.
246 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
247 * pbl_size and pbl_addr
250 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
251 u32 *stag, u8 stag_state, u32 pdid,
252 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
253 int bind_enabled, u32 zbva, u64 to,
254 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
257 struct fw_ri_tpte *tpt;
261 if (c4iw_fatal_error(rdev))
264 tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
268 stag_state = stag_state > 0;
269 stag_idx = (*stag) >> 8;
271 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
272 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
274 mutex_lock(&rdev->stats.lock);
275 rdev->stats.stag.fail++;
276 mutex_unlock(&rdev->stats.lock);
280 mutex_lock(&rdev->stats.lock);
281 rdev->stats.stag.cur += 32;
282 if (rdev->stats.stag.cur > rdev->stats.stag.max)
283 rdev->stats.stag.max = rdev->stats.stag.cur;
284 mutex_unlock(&rdev->stats.lock);
285 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
287 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
288 __func__, stag_state, type, pdid, stag_idx);
290 /* write TPT entry */
292 memset(tpt, 0, sizeof(*tpt));
294 tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
295 FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
296 FW_RI_TPTE_STAGSTATE_V(stag_state) |
297 FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
298 tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
299 (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
300 FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
302 FW_RI_TPTE_PS_V(page_size));
303 tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
304 FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
305 tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
306 tpt->va_hi = cpu_to_be32((u32)(to >> 32));
307 tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
308 tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
309 tpt->len_hi = cpu_to_be32((u32)(len >> 32));
311 err = write_adapter_mem(rdev, stag_idx +
312 (rdev->lldi.vr->stag.start >> 5),
315 if (reset_tpt_entry) {
316 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
317 mutex_lock(&rdev->stats.lock);
318 rdev->stats.stag.cur -= 32;
319 mutex_unlock(&rdev->stats.lock);
325 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
326 u32 pbl_addr, u32 pbl_size)
330 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
331 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
334 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
338 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
341 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
345 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
347 *stag = T4_STAG_UNSET;
348 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
352 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
354 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
358 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
359 u32 pbl_size, u32 pbl_addr)
361 *stag = T4_STAG_UNSET;
362 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
363 0UL, 0, 0, pbl_size, pbl_addr);
366 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
371 mhp->attr.stag = stag;
373 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
374 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
375 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
378 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
379 struct c4iw_mr *mhp, int shift)
381 u32 stag = T4_STAG_UNSET;
384 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
385 FW_RI_STAG_NSMR, mhp->attr.len ?
387 mhp->attr.mw_bind_enable, mhp->attr.zbva,
388 mhp->attr.va_fbo, mhp->attr.len ?
389 mhp->attr.len : -1, shift - 12,
390 mhp->attr.pbl_size, mhp->attr.pbl_addr);
394 ret = finish_mem_reg(mhp, stag);
396 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
401 static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
402 struct c4iw_mr *mhp, int shift, int npages)
407 if (npages > mhp->attr.pbl_size)
410 stag = mhp->attr.stag;
411 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
412 FW_RI_STAG_NSMR, mhp->attr.perms,
413 mhp->attr.mw_bind_enable, mhp->attr.zbva,
414 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
415 mhp->attr.pbl_size, mhp->attr.pbl_addr);
419 ret = finish_mem_reg(mhp, stag);
421 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
427 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
429 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
432 if (!mhp->attr.pbl_addr)
435 mhp->attr.pbl_size = npages;
440 static int build_phys_page_list(struct ib_phys_buf *buffer_list,
441 int num_phys_buf, u64 *iova_start,
442 u64 *total_size, int *npages,
443 int *shift, __be64 **page_list)
450 for (i = 0; i < num_phys_buf; ++i) {
451 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
453 if (i != 0 && i != num_phys_buf - 1 &&
454 (buffer_list[i].size & ~PAGE_MASK))
456 *total_size += buffer_list[i].size;
458 mask |= buffer_list[i].addr;
460 mask |= buffer_list[i].addr & PAGE_MASK;
461 if (i != num_phys_buf - 1)
462 mask |= buffer_list[i].addr + buffer_list[i].size;
464 mask |= (buffer_list[i].addr + buffer_list[i].size +
465 PAGE_SIZE - 1) & PAGE_MASK;
468 if (*total_size > 0xFFFFFFFFULL)
471 /* Find largest page shift we can use to cover buffers */
472 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
473 if ((1ULL << *shift) & mask)
476 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
477 buffer_list[0].addr &= ~0ull << *shift;
480 for (i = 0; i < num_phys_buf; ++i)
481 *npages += (buffer_list[i].size +
482 (1ULL << *shift) - 1) >> *shift;
487 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
492 for (i = 0; i < num_phys_buf; ++i)
494 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
496 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
497 ((u64) j << *shift));
499 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
500 __func__, (unsigned long long)*iova_start,
501 (unsigned long long)mask, *shift, (unsigned long long)*total_size,
508 int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
509 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
510 int num_phys_buf, int acc, u64 *iova_start)
513 struct c4iw_mr mh, *mhp;
515 struct c4iw_dev *rhp;
516 __be64 *page_list = NULL;
522 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
524 /* There can be no memory windows */
525 if (atomic_read(&mr->usecnt))
528 mhp = to_c4iw_mr(mr);
530 php = to_c4iw_pd(mr->pd);
532 /* make sure we are on the same adapter */
536 memcpy(&mh, mhp, sizeof *mhp);
538 if (mr_rereg_mask & IB_MR_REREG_PD)
539 php = to_c4iw_pd(pd);
540 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
541 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
542 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
545 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
546 ret = build_phys_page_list(buffer_list, num_phys_buf,
548 &total_size, &npages,
554 if (mr_exceeds_hw_limits(rhp, total_size)) {
559 ret = reregister_mem(rhp, php, &mh, shift, npages);
563 if (mr_rereg_mask & IB_MR_REREG_PD)
564 mhp->attr.pdid = php->pdid;
565 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
566 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
567 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
569 mhp->attr.va_fbo = *iova_start;
570 mhp->attr.page_size = shift - 12;
571 mhp->attr.len = (u32) total_size;
572 mhp->attr.pbl_size = npages;
578 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
579 struct ib_phys_buf *buffer_list,
580 int num_phys_buf, int acc, u64 *iova_start)
586 struct c4iw_dev *rhp;
591 PDBG("%s ib_pd %p\n", __func__, pd);
592 php = to_c4iw_pd(pd);
595 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
597 return ERR_PTR(-ENOMEM);
601 /* First check that we have enough alignment */
602 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
607 if (num_phys_buf > 1 &&
608 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
613 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
614 &total_size, &npages, &shift,
619 if (mr_exceeds_hw_limits(rhp, total_size)) {
625 ret = alloc_pbl(mhp, npages);
631 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
637 mhp->attr.pdid = php->pdid;
640 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
641 mhp->attr.va_fbo = *iova_start;
642 mhp->attr.page_size = shift - 12;
644 mhp->attr.len = (u32) total_size;
645 mhp->attr.pbl_size = npages;
646 ret = register_mem(rhp, php, mhp, shift);
653 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
654 mhp->attr.pbl_size << 3);
662 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
664 struct c4iw_dev *rhp;
668 u32 stag = T4_STAG_UNSET;
670 PDBG("%s ib_pd %p\n", __func__, pd);
671 php = to_c4iw_pd(pd);
674 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
676 return ERR_PTR(-ENOMEM);
679 mhp->attr.pdid = php->pdid;
680 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
681 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
683 mhp->attr.va_fbo = 0;
684 mhp->attr.page_size = 0;
685 mhp->attr.len = ~0ULL;
686 mhp->attr.pbl_size = 0;
688 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
689 FW_RI_STAG_NSMR, mhp->attr.perms,
690 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
694 ret = finish_mem_reg(mhp, stag);
699 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
706 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
707 u64 virt, int acc, struct ib_udata *udata)
713 struct scatterlist *sg;
714 struct c4iw_dev *rhp;
718 PDBG("%s ib_pd %p\n", __func__, pd);
721 return ERR_PTR(-EINVAL);
723 if ((length + start) < start)
724 return ERR_PTR(-EINVAL);
726 php = to_c4iw_pd(pd);
729 if (mr_exceeds_hw_limits(rhp, length))
730 return ERR_PTR(-EINVAL);
732 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
734 return ERR_PTR(-ENOMEM);
738 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
739 if (IS_ERR(mhp->umem)) {
740 err = PTR_ERR(mhp->umem);
745 shift = ffs(mhp->umem->page_size) - 1;
748 err = alloc_pbl(mhp, n);
752 pages = (__be64 *) __get_free_page(GFP_KERNEL);
760 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
761 len = sg_dma_len(sg) >> shift;
762 for (k = 0; k < len; ++k) {
763 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
764 mhp->umem->page_size * k);
765 if (i == PAGE_SIZE / sizeof *pages) {
766 err = write_pbl(&mhp->rhp->rdev,
768 mhp->attr.pbl_addr + (n << 3), i);
778 err = write_pbl(&mhp->rhp->rdev, pages,
779 mhp->attr.pbl_addr + (n << 3), i);
782 free_page((unsigned long) pages);
786 mhp->attr.pdid = php->pdid;
788 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
789 mhp->attr.va_fbo = virt;
790 mhp->attr.page_size = shift - 12;
791 mhp->attr.len = length;
793 err = register_mem(rhp, php, mhp, shift);
800 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
801 mhp->attr.pbl_size << 3);
804 ib_umem_release(mhp->umem);
809 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
811 struct c4iw_dev *rhp;
818 if (type != IB_MW_TYPE_1)
819 return ERR_PTR(-EINVAL);
821 php = to_c4iw_pd(pd);
823 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
825 return ERR_PTR(-ENOMEM);
826 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
832 mhp->attr.pdid = php->pdid;
833 mhp->attr.type = FW_RI_STAG_MW;
834 mhp->attr.stag = stag;
836 mhp->ibmw.rkey = stag;
837 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
838 deallocate_window(&rhp->rdev, mhp->attr.stag);
840 return ERR_PTR(-ENOMEM);
842 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
846 int c4iw_dealloc_mw(struct ib_mw *mw)
848 struct c4iw_dev *rhp;
852 mhp = to_c4iw_mw(mw);
854 mmid = (mw->rkey) >> 8;
855 remove_handle(rhp, &rhp->mmidr, mmid);
856 deallocate_window(&rhp->rdev, mhp->attr.stag);
858 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
862 struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
863 enum ib_mr_type mr_type,
866 struct c4iw_dev *rhp;
872 int length = roundup(max_num_sg * sizeof(u64), 32);
874 if (mr_type != IB_MR_TYPE_MEM_REG ||
875 max_num_sg > t4_max_fr_depth(use_dsgl))
876 return ERR_PTR(-EINVAL);
878 php = to_c4iw_pd(pd);
880 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
886 mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
887 length, &mhp->mpl_addr, GFP_KERNEL);
892 mhp->max_mpl_len = length;
895 ret = alloc_pbl(mhp, max_num_sg);
898 mhp->attr.pbl_size = max_num_sg;
899 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
900 mhp->attr.pbl_size, mhp->attr.pbl_addr);
903 mhp->attr.pdid = php->pdid;
904 mhp->attr.type = FW_RI_STAG_NSMR;
905 mhp->attr.stag = stag;
908 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
909 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
914 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
917 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
920 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
921 mhp->attr.pbl_size << 3);
923 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
924 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
931 static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
933 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
935 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
938 mhp->mpl[mhp->mpl_len++] = addr;
943 int c4iw_map_mr_sg(struct ib_mr *ibmr,
944 struct scatterlist *sg,
947 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
951 return ib_sg_to_pages(ibmr, sg, sg_nents, c4iw_set_page);
954 int c4iw_dereg_mr(struct ib_mr *ib_mr)
956 struct c4iw_dev *rhp;
960 PDBG("%s ib_mr %p\n", __func__, ib_mr);
961 /* There can be no memory windows */
962 if (atomic_read(&ib_mr->usecnt))
965 mhp = to_c4iw_mr(ib_mr);
967 mmid = mhp->attr.stag >> 8;
968 remove_handle(rhp, &rhp->mmidr, mmid);
970 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
971 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
972 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
974 if (mhp->attr.pbl_size)
975 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
976 mhp->attr.pbl_size << 3);
978 kfree((void *) (unsigned long) mhp->kva);
980 ib_umem_release(mhp->umem);
981 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);