2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
37 static int db_delay_usecs = 1;
38 module_param(db_delay_usecs, int, 0644);
39 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
41 static int ocqp_support = 1;
42 module_param(ocqp_support, int, 0644);
43 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
45 int db_fc_threshold = 1000;
46 module_param(db_fc_threshold, int, 0644);
47 MODULE_PARM_DESC(db_fc_threshold,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
51 int db_coalescing_threshold;
52 module_param(db_coalescing_threshold, int, 0644);
53 MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
57 static int max_fr_immd = T4_MAX_FR_IMMD;
58 module_param(max_fr_immd, int, 0644);
59 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
61 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
65 spin_lock_irq(&dev->lock);
66 if (ird <= dev->avail_ird)
67 dev->avail_ird -= ird;
70 spin_unlock_irq(&dev->lock);
73 dev_warn(&dev->rdev.lldi.pdev->dev,
74 "device IRD resources exhausted\n");
79 static void free_ird(struct c4iw_dev *dev, int ird)
81 spin_lock_irq(&dev->lock);
82 dev->avail_ird += ird;
83 spin_unlock_irq(&dev->lock);
86 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
89 spin_lock_irqsave(&qhp->lock, flag);
90 qhp->attr.state = state;
91 spin_unlock_irqrestore(&qhp->lock, flag);
94 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
96 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
99 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
101 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
102 pci_unmap_addr(sq, mapping));
105 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
107 if (t4_sq_onchip(sq))
108 dealloc_oc_sq(rdev, sq);
110 dealloc_host_sq(rdev, sq);
113 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
115 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
117 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
120 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
121 rdev->lldi.vr->ocq.start;
122 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
123 rdev->lldi.vr->ocq.start);
124 sq->flags |= T4_SQ_ONCHIP;
128 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
130 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
131 &(sq->dma_addr), GFP_KERNEL);
134 sq->phys_addr = virt_to_phys(sq->queue);
135 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
139 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
143 ret = alloc_oc_sq(rdev, sq);
145 ret = alloc_host_sq(rdev, sq);
149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
150 struct c4iw_dev_ucontext *uctx)
153 * uP clears EQ contexts when the connection exits rdma mode,
154 * so no need to post a RESET WR for these EQs.
156 dma_free_coherent(&(rdev->lldi.pdev->dev),
157 wq->rq.memsize, wq->rq.queue,
158 dma_unmap_addr(&wq->rq, mapping));
159 dealloc_sq(rdev, &wq->sq);
160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
163 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
164 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
169 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
170 * then this is a user mapping so compute the page-aligned physical address
173 void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
174 enum cxgb4_bar2_qtype qtype,
175 unsigned int *pbar2_qid, u64 *pbar2_pa)
180 ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
182 &bar2_qoffset, pbar2_qid);
187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
189 if (is_t4(rdev->lldi.adapter_type))
192 return rdev->bar2_kva + bar2_qoffset;
195 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
196 struct t4_cq *rcq, struct t4_cq *scq,
197 struct c4iw_dev_ucontext *uctx)
199 int user = (uctx != &rdev->uctx);
200 struct fw_ri_res_wr *res_wr;
201 struct fw_ri_res *res;
203 struct c4iw_wr_wait wr_wait;
208 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
212 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
219 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
226 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
235 * RQT must be a power of 2 and at least 16 deep.
237 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
238 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
239 if (!wq->rq.rqt_hwaddr) {
244 ret = alloc_sq(rdev, &wq->sq, user);
247 memset(wq->sq.queue, 0, wq->sq.memsize);
248 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
250 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
251 wq->rq.memsize, &(wq->rq.dma_addr),
257 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
258 __func__, wq->sq.queue,
259 (unsigned long long)virt_to_phys(wq->sq.queue),
261 (unsigned long long)virt_to_phys(wq->rq.queue));
262 memset(wq->rq.queue, 0, wq->rq.memsize);
263 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
265 wq->db = rdev->lldi.db_reg;
267 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
269 user ? &wq->sq.bar2_pa : NULL);
270 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS,
272 user ? &wq->rq.bar2_pa : NULL);
275 * User mode must have bar2 access.
277 if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
278 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
279 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
287 /* build fw_ri_res_wr */
288 wr_len = sizeof *res_wr + 2 * sizeof *res;
290 skb = alloc_skb(wr_len, GFP_KERNEL);
295 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
297 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
298 memset(res_wr, 0, wr_len);
299 res_wr->op_nres = cpu_to_be32(
300 FW_WR_OP_V(FW_RI_RES_WR) |
301 FW_RI_RES_WR_NRES_V(2) |
303 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
304 res_wr->cookie = (uintptr_t)&wr_wait;
306 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
307 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
310 * eqsize is the number of 64B entries plus the status page size.
312 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
313 rdev->hw_queue.t4_eq_status_entries;
315 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
316 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
317 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
318 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
319 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
320 FW_RI_RES_WR_IQID_V(scq->cqid));
321 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
322 FW_RI_RES_WR_DCAEN_V(0) |
323 FW_RI_RES_WR_DCACPU_V(0) |
324 FW_RI_RES_WR_FBMIN_V(2) |
325 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
326 FW_RI_RES_WR_FBMAX_V(3)) |
327 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
328 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
329 FW_RI_RES_WR_EQSIZE_V(eqsize));
330 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
331 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
333 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
334 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
337 * eqsize is the number of 64B entries plus the status page size.
339 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
340 rdev->hw_queue.t4_eq_status_entries;
341 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
342 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
343 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
344 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
345 FW_RI_RES_WR_IQID_V(rcq->cqid));
346 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
347 FW_RI_RES_WR_DCAEN_V(0) |
348 FW_RI_RES_WR_DCACPU_V(0) |
349 FW_RI_RES_WR_FBMIN_V(2) |
350 FW_RI_RES_WR_FBMAX_V(3) |
351 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
352 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
353 FW_RI_RES_WR_EQSIZE_V(eqsize));
354 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
355 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
357 c4iw_init_wr_wait(&wr_wait);
359 ret = c4iw_ofld_send(rdev, skb);
362 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
366 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
367 __func__, wq->sq.qid, wq->rq.qid, wq->db,
368 wq->sq.bar2_va, wq->rq.bar2_va);
372 dma_free_coherent(&(rdev->lldi.pdev->dev),
373 wq->rq.memsize, wq->rq.queue,
374 dma_unmap_addr(&wq->rq, mapping));
376 dealloc_sq(rdev, &wq->sq);
378 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
384 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
386 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
390 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
391 struct ib_send_wr *wr, int max, u32 *plenp)
398 dstp = (u8 *)immdp->data;
399 for (i = 0; i < wr->num_sge; i++) {
400 if ((plen + wr->sg_list[i].length) > max)
402 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
403 plen += wr->sg_list[i].length;
404 rem = wr->sg_list[i].length;
406 if (dstp == (u8 *)&sq->queue[sq->size])
407 dstp = (u8 *)sq->queue;
408 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
411 len = (u8 *)&sq->queue[sq->size] - dstp;
412 memcpy(dstp, srcp, len);
418 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
420 memset(dstp, 0, len);
421 immdp->op = FW_RI_DATA_IMMD;
424 immdp->immdlen = cpu_to_be32(plen);
429 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
430 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
431 int num_sge, u32 *plenp)
436 __be64 *flitp = (__be64 *)isglp->sge;
438 for (i = 0; i < num_sge; i++) {
439 if ((plen + sg_list[i].length) < plen)
441 plen += sg_list[i].length;
442 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
444 if (++flitp == queue_end)
446 *flitp = cpu_to_be64(sg_list[i].addr);
447 if (++flitp == queue_end)
450 *flitp = (__force __be64)0;
451 isglp->op = FW_RI_DATA_ISGL;
453 isglp->nsge = cpu_to_be16(num_sge);
460 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
461 struct ib_send_wr *wr, u8 *len16)
467 if (wr->num_sge > T4_MAX_SEND_SGE)
469 switch (wr->opcode) {
471 if (wr->send_flags & IB_SEND_SOLICITED)
472 wqe->send.sendop_pkd = cpu_to_be32(
473 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
475 wqe->send.sendop_pkd = cpu_to_be32(
476 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
477 wqe->send.stag_inv = 0;
479 case IB_WR_SEND_WITH_INV:
480 if (wr->send_flags & IB_SEND_SOLICITED)
481 wqe->send.sendop_pkd = cpu_to_be32(
482 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
484 wqe->send.sendop_pkd = cpu_to_be32(
485 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
486 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
497 if (wr->send_flags & IB_SEND_INLINE) {
498 ret = build_immd(sq, wqe->send.u.immd_src, wr,
499 T4_MAX_SEND_INLINE, &plen);
502 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
505 ret = build_isgl((__be64 *)sq->queue,
506 (__be64 *)&sq->queue[sq->size],
507 wqe->send.u.isgl_src,
508 wr->sg_list, wr->num_sge, &plen);
511 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
512 wr->num_sge * sizeof(struct fw_ri_sge);
515 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
516 wqe->send.u.immd_src[0].r1 = 0;
517 wqe->send.u.immd_src[0].r2 = 0;
518 wqe->send.u.immd_src[0].immdlen = 0;
519 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
522 *len16 = DIV_ROUND_UP(size, 16);
523 wqe->send.plen = cpu_to_be32(plen);
527 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
528 struct ib_send_wr *wr, u8 *len16)
534 if (wr->num_sge > T4_MAX_SEND_SGE)
537 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
538 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
540 if (wr->send_flags & IB_SEND_INLINE) {
541 ret = build_immd(sq, wqe->write.u.immd_src, wr,
542 T4_MAX_WRITE_INLINE, &plen);
545 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
548 ret = build_isgl((__be64 *)sq->queue,
549 (__be64 *)&sq->queue[sq->size],
550 wqe->write.u.isgl_src,
551 wr->sg_list, wr->num_sge, &plen);
554 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
555 wr->num_sge * sizeof(struct fw_ri_sge);
558 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
559 wqe->write.u.immd_src[0].r1 = 0;
560 wqe->write.u.immd_src[0].r2 = 0;
561 wqe->write.u.immd_src[0].immdlen = 0;
562 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
565 *len16 = DIV_ROUND_UP(size, 16);
566 wqe->write.plen = cpu_to_be32(plen);
570 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
575 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
576 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
578 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
579 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
580 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
581 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
583 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
585 wqe->read.stag_src = cpu_to_be32(2);
586 wqe->read.to_src_hi = 0;
587 wqe->read.to_src_lo = 0;
588 wqe->read.stag_sink = cpu_to_be32(2);
590 wqe->read.to_sink_hi = 0;
591 wqe->read.to_sink_lo = 0;
595 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
599 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
600 struct ib_recv_wr *wr, u8 *len16)
604 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
605 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
606 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
609 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
610 wr->num_sge * sizeof(struct fw_ri_sge), 16);
614 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
615 struct ib_reg_wr *wr, struct c4iw_mr *mhp,
618 __be64 *p = (__be64 *)fr->pbl;
620 fr->r2 = cpu_to_be32(0);
621 fr->stag = cpu_to_be32(mhp->ibmr.rkey);
623 fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
624 FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
625 FW_RI_TPTE_STAGSTATE_V(1) |
626 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
627 FW_RI_TPTE_PDID_V(mhp->attr.pdid));
628 fr->tpte.locread_to_qpid = cpu_to_be32(
629 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
630 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
631 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
632 fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
633 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
634 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
635 fr->tpte.len_hi = cpu_to_be32(0);
636 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
637 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
638 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
640 p[0] = cpu_to_be64((u64)mhp->mpl[0]);
641 p[1] = cpu_to_be64((u64)mhp->mpl[1]);
643 *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
646 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
647 struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
650 struct fw_ri_immd *imdp;
653 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
656 if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
659 wqe->fr.qpbinde_to_dcacpu = 0;
660 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
661 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
662 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
664 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
665 wqe->fr.stag = cpu_to_be32(wr->key);
666 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
667 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
670 if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
671 struct fw_ri_dsgl *sglp;
673 for (i = 0; i < mhp->mpl_len; i++)
674 mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
676 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
677 sglp->op = FW_RI_DATA_DSGL;
679 sglp->nsge = cpu_to_be16(1);
680 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
681 sglp->len0 = cpu_to_be32(pbllen);
683 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
685 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
686 imdp->op = FW_RI_DATA_IMMD;
689 imdp->immdlen = cpu_to_be32(pbllen);
690 p = (__be64 *)(imdp + 1);
692 for (i = 0; i < mhp->mpl_len; i++) {
693 *p = cpu_to_be64((u64)mhp->mpl[i]);
695 if (++p == (__be64 *)&sq->queue[sq->size])
696 p = (__be64 *)sq->queue;
702 if (++p == (__be64 *)&sq->queue[sq->size])
703 p = (__be64 *)sq->queue;
705 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
711 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
713 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
715 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
719 static void free_qp_work(struct work_struct *work)
721 struct c4iw_ucontext *ucontext;
723 struct c4iw_dev *rhp;
725 qhp = container_of(work, struct c4iw_qp, free_work);
726 ucontext = qhp->ucontext;
729 PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
730 destroy_qp(&rhp->rdev, &qhp->wq,
731 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
734 c4iw_put_ucontext(ucontext);
738 static void queue_qp_free(struct kref *kref)
742 qhp = container_of(kref, struct c4iw_qp, kref);
743 PDBG("%s qhp %p\n", __func__, qhp);
744 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
747 void c4iw_qp_add_ref(struct ib_qp *qp)
749 PDBG("%s ib_qp %p\n", __func__, qp);
750 kref_get(&to_c4iw_qp(qp)->kref);
753 void c4iw_qp_rem_ref(struct ib_qp *qp)
755 PDBG("%s ib_qp %p\n", __func__, qp);
756 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
759 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
761 if (list_empty(entry))
762 list_add_tail(entry, head);
765 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
769 spin_lock_irqsave(&qhp->rhp->lock, flags);
770 spin_lock(&qhp->lock);
771 if (qhp->rhp->db_state == NORMAL)
772 t4_ring_sq_db(&qhp->wq, inc, NULL);
774 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
775 qhp->wq.sq.wq_pidx_inc += inc;
777 spin_unlock(&qhp->lock);
778 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
782 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
786 spin_lock_irqsave(&qhp->rhp->lock, flags);
787 spin_lock(&qhp->lock);
788 if (qhp->rhp->db_state == NORMAL)
789 t4_ring_rq_db(&qhp->wq, inc, NULL);
791 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
792 qhp->wq.rq.wq_pidx_inc += inc;
794 spin_unlock(&qhp->lock);
795 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
799 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
800 struct ib_send_wr **bad_wr)
804 enum fw_wr_opcodes fw_opcode = 0;
805 enum fw_ri_wr_flags fw_flags;
807 union t4_wr *wqe = NULL;
809 struct t4_swsqe *swsqe;
813 qhp = to_c4iw_qp(ibqp);
814 spin_lock_irqsave(&qhp->lock, flag);
815 if (t4_wq_in_error(&qhp->wq)) {
816 spin_unlock_irqrestore(&qhp->lock, flag);
820 num_wrs = t4_sq_avail(&qhp->wq);
822 spin_unlock_irqrestore(&qhp->lock, flag);
832 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
833 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
836 if (wr->send_flags & IB_SEND_SOLICITED)
837 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
838 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
839 fw_flags |= FW_RI_COMPLETION_FLAG;
840 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
841 switch (wr->opcode) {
842 case IB_WR_SEND_WITH_INV:
844 if (wr->send_flags & IB_SEND_FENCE)
845 fw_flags |= FW_RI_READ_FENCE_FLAG;
846 fw_opcode = FW_RI_SEND_WR;
847 if (wr->opcode == IB_WR_SEND)
848 swsqe->opcode = FW_RI_SEND;
850 swsqe->opcode = FW_RI_SEND_WITH_INV;
851 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
853 case IB_WR_RDMA_WRITE:
854 fw_opcode = FW_RI_RDMA_WRITE_WR;
855 swsqe->opcode = FW_RI_RDMA_WRITE;
856 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
858 case IB_WR_RDMA_READ:
859 case IB_WR_RDMA_READ_WITH_INV:
860 fw_opcode = FW_RI_RDMA_READ_WR;
861 swsqe->opcode = FW_RI_READ_REQ;
862 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
863 c4iw_invalidate_mr(qhp->rhp,
864 wr->sg_list[0].lkey);
865 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
869 err = build_rdma_read(wqe, wr, &len16);
872 swsqe->read_len = wr->sg_list[0].length;
873 if (!qhp->wq.sq.oldest_read)
874 qhp->wq.sq.oldest_read = swsqe;
877 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
879 swsqe->opcode = FW_RI_FAST_REGISTER;
880 if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
881 !mhp->attr.state && mhp->mpl_len <= 2) {
882 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
883 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
886 fw_opcode = FW_RI_FR_NSMR_WR;
887 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
889 qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
896 case IB_WR_LOCAL_INV:
897 if (wr->send_flags & IB_SEND_FENCE)
898 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
899 fw_opcode = FW_RI_INV_LSTAG_WR;
900 swsqe->opcode = FW_RI_LOCAL_INV;
901 err = build_inv_stag(wqe, wr, &len16);
902 c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
905 PDBG("%s post of type=%d TBD!\n", __func__,
913 swsqe->idx = qhp->wq.sq.pidx;
915 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
918 swsqe->wr_id = wr->wr_id;
920 swsqe->sge_ts = cxgb4_read_sge_timestamp(
921 qhp->rhp->rdev.lldi.ports[0]);
922 getnstimeofday(&swsqe->host_ts);
925 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
927 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
928 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
929 swsqe->opcode, swsqe->read_len);
932 t4_sq_produce(&qhp->wq, len16);
933 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
935 if (!qhp->rhp->rdev.status_page->db_off) {
936 t4_ring_sq_db(&qhp->wq, idx, wqe);
937 spin_unlock_irqrestore(&qhp->lock, flag);
939 spin_unlock_irqrestore(&qhp->lock, flag);
940 ring_kernel_sq_db(qhp, idx);
945 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
946 struct ib_recv_wr **bad_wr)
950 union t4_recv_wr *wqe = NULL;
956 qhp = to_c4iw_qp(ibqp);
957 spin_lock_irqsave(&qhp->lock, flag);
958 if (t4_wq_in_error(&qhp->wq)) {
959 spin_unlock_irqrestore(&qhp->lock, flag);
963 num_wrs = t4_rq_avail(&qhp->wq);
965 spin_unlock_irqrestore(&qhp->lock, flag);
970 if (wr->num_sge > T4_MAX_RECV_SGE) {
975 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
979 err = build_rdma_recv(qhp, wqe, wr, &len16);
987 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
989 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
990 cxgb4_read_sge_timestamp(
991 qhp->rhp->rdev.lldi.ports[0]);
993 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
996 wqe->recv.opcode = FW_RI_RECV_WR;
998 wqe->recv.wrid = qhp->wq.rq.pidx;
1000 wqe->recv.r2[1] = 0;
1001 wqe->recv.r2[2] = 0;
1002 wqe->recv.len16 = len16;
1003 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
1004 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
1005 t4_rq_produce(&qhp->wq, len16);
1006 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1010 if (!qhp->rhp->rdev.status_page->db_off) {
1011 t4_ring_rq_db(&qhp->wq, idx, wqe);
1012 spin_unlock_irqrestore(&qhp->lock, flag);
1014 spin_unlock_irqrestore(&qhp->lock, flag);
1015 ring_kernel_rq_db(qhp, idx);
1020 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1030 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1035 status = CQE_STATUS(err_cqe);
1036 opcode = CQE_OPCODE(err_cqe);
1037 rqtype = RQ_TYPE(err_cqe);
1038 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1039 (opcode == FW_RI_SEND_WITH_SE_INV);
1040 tagged = (opcode == FW_RI_RDMA_WRITE) ||
1041 (rqtype && (opcode == FW_RI_READ_RESP));
1046 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1047 *ecode = RDMAP_CANT_INV_STAG;
1049 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1050 *ecode = RDMAP_INV_STAG;
1054 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1055 if ((opcode == FW_RI_SEND_WITH_INV) ||
1056 (opcode == FW_RI_SEND_WITH_SE_INV))
1057 *ecode = RDMAP_CANT_INV_STAG;
1059 *ecode = RDMAP_STAG_NOT_ASSOC;
1062 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1063 *ecode = RDMAP_STAG_NOT_ASSOC;
1066 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1067 *ecode = RDMAP_ACC_VIOL;
1070 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1071 *ecode = RDMAP_TO_WRAP;
1075 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1076 *ecode = DDPT_BASE_BOUNDS;
1078 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1079 *ecode = RDMAP_BASE_BOUNDS;
1082 case T4_ERR_INVALIDATE_SHARED_MR:
1083 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1084 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1085 *ecode = RDMAP_CANT_INV_STAG;
1088 case T4_ERR_ECC_PSTAG:
1089 case T4_ERR_INTERNAL_ERR:
1090 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1093 case T4_ERR_OUT_OF_RQE:
1094 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1095 *ecode = DDPU_INV_MSN_NOBUF;
1097 case T4_ERR_PBL_ADDR_BOUND:
1098 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1099 *ecode = DDPT_BASE_BOUNDS;
1102 *layer_type = LAYER_MPA|DDP_LLP;
1103 *ecode = MPA_CRC_ERR;
1106 *layer_type = LAYER_MPA|DDP_LLP;
1107 *ecode = MPA_MARKER_ERR;
1109 case T4_ERR_PDU_LEN_ERR:
1110 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1111 *ecode = DDPU_MSG_TOOBIG;
1113 case T4_ERR_DDP_VERSION:
1115 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1116 *ecode = DDPT_INV_VERS;
1118 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1119 *ecode = DDPU_INV_VERS;
1122 case T4_ERR_RDMA_VERSION:
1123 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1124 *ecode = RDMAP_INV_VERS;
1127 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1128 *ecode = RDMAP_INV_OPCODE;
1130 case T4_ERR_DDP_QUEUE_NUM:
1131 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1132 *ecode = DDPU_INV_QN;
1135 case T4_ERR_MSN_GAP:
1136 case T4_ERR_MSN_RANGE:
1137 case T4_ERR_IRD_OVERFLOW:
1138 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1139 *ecode = DDPU_INV_MSN_RANGE;
1142 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1146 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1147 *ecode = DDPU_INV_MO;
1150 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1156 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1159 struct fw_ri_wr *wqe;
1160 struct sk_buff *skb;
1161 struct terminate_message *term;
1163 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1166 skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1170 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1172 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1173 memset(wqe, 0, sizeof *wqe);
1174 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1175 wqe->flowid_len16 = cpu_to_be32(
1176 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1177 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1179 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1180 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1181 term = (struct terminate_message *)wqe->u.terminate.termmsg;
1182 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1183 term->layer_etype = qhp->attr.layer_etype;
1184 term->ecode = qhp->attr.ecode;
1186 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1187 c4iw_ofld_send(&qhp->rhp->rdev, skb);
1191 * Assumes qhp lock is held.
1193 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1194 struct c4iw_cq *schp)
1197 int rq_flushed, sq_flushed;
1200 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
1202 /* locking hierarchy: cq lock first, then qp lock. */
1203 spin_lock_irqsave(&rchp->lock, flag);
1204 spin_lock(&qhp->lock);
1206 if (qhp->wq.flushed) {
1207 spin_unlock(&qhp->lock);
1208 spin_unlock_irqrestore(&rchp->lock, flag);
1211 qhp->wq.flushed = 1;
1213 c4iw_flush_hw_cq(rchp);
1214 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1215 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1216 spin_unlock(&qhp->lock);
1217 spin_unlock_irqrestore(&rchp->lock, flag);
1219 /* locking hierarchy: cq lock first, then qp lock. */
1220 spin_lock_irqsave(&schp->lock, flag);
1221 spin_lock(&qhp->lock);
1223 c4iw_flush_hw_cq(schp);
1224 sq_flushed = c4iw_flush_sq(qhp);
1225 spin_unlock(&qhp->lock);
1226 spin_unlock_irqrestore(&schp->lock, flag);
1229 if (t4_clear_cq_armed(&rchp->cq) &&
1230 (rq_flushed || sq_flushed)) {
1231 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1232 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1233 rchp->ibcq.cq_context);
1234 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1237 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
1238 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1239 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1240 rchp->ibcq.cq_context);
1241 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1243 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1244 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1245 (*schp->ibcq.comp_handler)(&schp->ibcq,
1246 schp->ibcq.cq_context);
1247 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1252 static void flush_qp(struct c4iw_qp *qhp)
1254 struct c4iw_cq *rchp, *schp;
1257 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1258 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1260 t4_set_wq_in_error(&qhp->wq);
1261 if (qhp->ibqp.uobject) {
1263 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1264 if (qhp->wq.flushed)
1267 qhp->wq.flushed = 1;
1268 t4_set_cq_in_error(&rchp->cq);
1269 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1270 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1271 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1273 t4_set_cq_in_error(&schp->cq);
1274 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1275 (*schp->ibcq.comp_handler)(&schp->ibcq,
1276 schp->ibcq.cq_context);
1277 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1281 __flush_qp(qhp, rchp, schp);
1284 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1287 struct fw_ri_wr *wqe;
1289 struct sk_buff *skb;
1291 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1294 skb = skb_dequeue(&ep->com.ep_skb_list);
1298 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1300 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1301 memset(wqe, 0, sizeof *wqe);
1302 wqe->op_compl = cpu_to_be32(
1303 FW_WR_OP_V(FW_RI_INIT_WR) |
1305 wqe->flowid_len16 = cpu_to_be32(
1306 FW_WR_FLOWID_V(ep->hwtid) |
1307 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1308 wqe->cookie = (uintptr_t)&ep->com.wr_wait;
1310 wqe->u.fini.type = FW_RI_TYPE_FINI;
1311 ret = c4iw_ofld_send(&rhp->rdev, skb);
1315 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
1316 qhp->wq.sq.qid, __func__);
1318 PDBG("%s ret %d\n", __func__, ret);
1322 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1324 PDBG("%s p2p_type = %d\n", __func__, p2p_type);
1325 memset(&init->u, 0, sizeof init->u);
1327 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1328 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1329 init->u.write.stag_sink = cpu_to_be32(1);
1330 init->u.write.to_sink = cpu_to_be64(1);
1331 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1332 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1333 sizeof(struct fw_ri_immd),
1336 case FW_RI_INIT_P2PTYPE_READ_REQ:
1337 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1338 init->u.read.stag_src = cpu_to_be32(1);
1339 init->u.read.to_src_lo = cpu_to_be32(1);
1340 init->u.read.stag_sink = cpu_to_be32(1);
1341 init->u.read.to_sink_lo = cpu_to_be32(1);
1342 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1347 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1349 struct fw_ri_wr *wqe;
1351 struct sk_buff *skb;
1353 PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
1354 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1356 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1361 ret = alloc_ird(rhp, qhp->attr.max_ird);
1363 qhp->attr.max_ird = 0;
1367 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1369 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1370 memset(wqe, 0, sizeof *wqe);
1371 wqe->op_compl = cpu_to_be32(
1372 FW_WR_OP_V(FW_RI_INIT_WR) |
1374 wqe->flowid_len16 = cpu_to_be32(
1375 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1376 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1378 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
1380 wqe->u.init.type = FW_RI_TYPE_INIT;
1381 wqe->u.init.mpareqbit_p2ptype =
1382 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1383 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1384 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1385 if (qhp->attr.mpa_attr.recv_marker_enabled)
1386 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1387 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1388 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1389 if (qhp->attr.mpa_attr.crc_enabled)
1390 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1392 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1393 FW_RI_QP_RDMA_WRITE_ENABLE |
1394 FW_RI_QP_BIND_ENABLE;
1395 if (!qhp->ibqp.uobject)
1396 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1397 FW_RI_QP_STAG0_ENABLE;
1398 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1399 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1400 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1401 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1402 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1403 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1404 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1405 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1406 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1407 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1408 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1409 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1410 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1411 rhp->rdev.lldi.vr->rq.start);
1412 if (qhp->attr.mpa_attr.initiator)
1413 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1415 ret = c4iw_ofld_send(&rhp->rdev, skb);
1419 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1420 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1424 free_ird(rhp, qhp->attr.max_ird);
1426 PDBG("%s ret %d\n", __func__, ret);
1430 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1431 enum c4iw_qp_attr_mask mask,
1432 struct c4iw_qp_attributes *attrs,
1436 struct c4iw_qp_attributes newattr = qhp->attr;
1441 struct c4iw_ep *ep = NULL;
1443 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1444 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1445 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1447 mutex_lock(&qhp->mutex);
1449 /* Process attr changes if in IDLE */
1450 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1451 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1455 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1456 newattr.enable_rdma_read = attrs->enable_rdma_read;
1457 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1458 newattr.enable_rdma_write = attrs->enable_rdma_write;
1459 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1460 newattr.enable_bind = attrs->enable_bind;
1461 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1462 if (attrs->max_ord > c4iw_max_read_depth) {
1466 newattr.max_ord = attrs->max_ord;
1468 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1469 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1473 newattr.max_ird = attrs->max_ird;
1475 qhp->attr = newattr;
1478 if (mask & C4IW_QP_ATTR_SQ_DB) {
1479 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1482 if (mask & C4IW_QP_ATTR_RQ_DB) {
1483 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1487 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1489 if (qhp->attr.state == attrs->next_state)
1492 switch (qhp->attr.state) {
1493 case C4IW_QP_STATE_IDLE:
1494 switch (attrs->next_state) {
1495 case C4IW_QP_STATE_RTS:
1496 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1500 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1504 qhp->attr.mpa_attr = attrs->mpa_attr;
1505 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1506 qhp->ep = qhp->attr.llp_stream_handle;
1507 set_state(qhp, C4IW_QP_STATE_RTS);
1510 * Ref the endpoint here and deref when we
1511 * disassociate the endpoint from the QP. This
1512 * happens in CLOSING->IDLE transition or *->ERROR
1515 c4iw_get_ep(&qhp->ep->com);
1516 ret = rdma_init(rhp, qhp);
1520 case C4IW_QP_STATE_ERROR:
1521 set_state(qhp, C4IW_QP_STATE_ERROR);
1529 case C4IW_QP_STATE_RTS:
1530 switch (attrs->next_state) {
1531 case C4IW_QP_STATE_CLOSING:
1532 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1533 t4_set_wq_in_error(&qhp->wq);
1534 set_state(qhp, C4IW_QP_STATE_CLOSING);
1539 c4iw_get_ep(&qhp->ep->com);
1541 ret = rdma_fini(rhp, qhp, ep);
1545 case C4IW_QP_STATE_TERMINATE:
1546 t4_set_wq_in_error(&qhp->wq);
1547 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1548 qhp->attr.layer_etype = attrs->layer_etype;
1549 qhp->attr.ecode = attrs->ecode;
1552 c4iw_get_ep(&qhp->ep->com);
1556 terminate = qhp->attr.send_term;
1557 ret = rdma_fini(rhp, qhp, ep);
1562 case C4IW_QP_STATE_ERROR:
1563 t4_set_wq_in_error(&qhp->wq);
1564 set_state(qhp, C4IW_QP_STATE_ERROR);
1569 c4iw_get_ep(&qhp->ep->com);
1578 case C4IW_QP_STATE_CLOSING:
1583 switch (attrs->next_state) {
1584 case C4IW_QP_STATE_IDLE:
1586 set_state(qhp, C4IW_QP_STATE_IDLE);
1587 qhp->attr.llp_stream_handle = NULL;
1588 c4iw_put_ep(&qhp->ep->com);
1590 wake_up(&qhp->wait);
1592 case C4IW_QP_STATE_ERROR:
1599 case C4IW_QP_STATE_ERROR:
1600 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1604 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1608 set_state(qhp, C4IW_QP_STATE_IDLE);
1610 case C4IW_QP_STATE_TERMINATE:
1618 printk(KERN_ERR "%s in a bad state %d\n",
1619 __func__, qhp->attr.state);
1626 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1629 /* disassociate the LLP connection */
1630 qhp->attr.llp_stream_handle = NULL;
1634 set_state(qhp, C4IW_QP_STATE_ERROR);
1639 wake_up(&qhp->wait);
1641 mutex_unlock(&qhp->mutex);
1644 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1647 * If disconnect is 1, then we need to initiate a disconnect
1648 * on the EP. This can be a normal close (RTS->CLOSING) or
1649 * an abnormal close (RTS/CLOSING->ERROR).
1652 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1654 c4iw_put_ep(&ep->com);
1658 * If free is 1, then we've disassociated the EP from the QP
1659 * and we need to dereference the EP.
1662 c4iw_put_ep(&ep->com);
1663 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1667 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1669 struct c4iw_dev *rhp;
1670 struct c4iw_qp *qhp;
1671 struct c4iw_qp_attributes attrs;
1673 qhp = to_c4iw_qp(ib_qp);
1676 attrs.next_state = C4IW_QP_STATE_ERROR;
1677 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1678 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1680 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1681 wait_event(qhp->wait, !qhp->ep);
1683 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1685 spin_lock_irq(&rhp->lock);
1686 if (!list_empty(&qhp->db_fc_entry))
1687 list_del_init(&qhp->db_fc_entry);
1688 spin_unlock_irq(&rhp->lock);
1689 free_ird(rhp, qhp->attr.max_ird);
1691 c4iw_qp_rem_ref(ib_qp);
1693 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1697 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1698 struct ib_udata *udata)
1700 struct c4iw_dev *rhp;
1701 struct c4iw_qp *qhp;
1702 struct c4iw_pd *php;
1703 struct c4iw_cq *schp;
1704 struct c4iw_cq *rchp;
1705 struct c4iw_create_qp_resp uresp;
1706 unsigned int sqsize, rqsize;
1707 struct c4iw_ucontext *ucontext;
1709 struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
1710 struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
1712 PDBG("%s ib_pd %p\n", __func__, pd);
1714 if (attrs->qp_type != IB_QPT_RC)
1715 return ERR_PTR(-EINVAL);
1717 php = to_c4iw_pd(pd);
1719 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1720 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1722 return ERR_PTR(-EINVAL);
1724 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1725 return ERR_PTR(-EINVAL);
1727 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
1728 return ERR_PTR(-E2BIG);
1729 rqsize = attrs->cap.max_recv_wr + 1;
1733 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
1734 return ERR_PTR(-E2BIG);
1735 sqsize = attrs->cap.max_send_wr + 1;
1739 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1741 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1743 return ERR_PTR(-ENOMEM);
1744 qhp->wq.sq.size = sqsize;
1745 qhp->wq.sq.memsize =
1746 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1747 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
1748 qhp->wq.sq.flush_cidx = -1;
1749 qhp->wq.rq.size = rqsize;
1750 qhp->wq.rq.memsize =
1751 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1752 sizeof(*qhp->wq.rq.queue);
1755 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1756 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1759 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1760 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1764 attrs->cap.max_recv_wr = rqsize - 1;
1765 attrs->cap.max_send_wr = sqsize - 1;
1766 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1769 qhp->attr.pd = php->pdid;
1770 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1771 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1772 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1773 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1774 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1775 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1776 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1777 qhp->attr.state = C4IW_QP_STATE_IDLE;
1778 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1779 qhp->attr.enable_rdma_read = 1;
1780 qhp->attr.enable_rdma_write = 1;
1781 qhp->attr.enable_bind = 1;
1782 qhp->attr.max_ord = 0;
1783 qhp->attr.max_ird = 0;
1784 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1785 spin_lock_init(&qhp->lock);
1786 init_completion(&qhp->sq_drained);
1787 init_completion(&qhp->rq_drained);
1788 mutex_init(&qhp->mutex);
1789 init_waitqueue_head(&qhp->wait);
1790 kref_init(&qhp->kref);
1791 INIT_WORK(&qhp->free_work, free_qp_work);
1793 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1798 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
1803 rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
1808 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
1809 if (!sq_db_key_mm) {
1813 rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
1814 if (!rq_db_key_mm) {
1818 if (t4_sq_onchip(&qhp->wq.sq)) {
1819 ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
1821 if (!ma_sync_key_mm) {
1825 uresp.flags = C4IW_QPF_ONCHIP;
1828 uresp.qid_mask = rhp->rdev.qpmask;
1829 uresp.sqid = qhp->wq.sq.qid;
1830 uresp.sq_size = qhp->wq.sq.size;
1831 uresp.sq_memsize = qhp->wq.sq.memsize;
1832 uresp.rqid = qhp->wq.rq.qid;
1833 uresp.rq_size = qhp->wq.rq.size;
1834 uresp.rq_memsize = qhp->wq.rq.memsize;
1835 spin_lock(&ucontext->mmap_lock);
1836 if (ma_sync_key_mm) {
1837 uresp.ma_sync_key = ucontext->key;
1838 ucontext->key += PAGE_SIZE;
1840 uresp.ma_sync_key = 0;
1842 uresp.sq_key = ucontext->key;
1843 ucontext->key += PAGE_SIZE;
1844 uresp.rq_key = ucontext->key;
1845 ucontext->key += PAGE_SIZE;
1846 uresp.sq_db_gts_key = ucontext->key;
1847 ucontext->key += PAGE_SIZE;
1848 uresp.rq_db_gts_key = ucontext->key;
1849 ucontext->key += PAGE_SIZE;
1850 spin_unlock(&ucontext->mmap_lock);
1851 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1854 sq_key_mm->key = uresp.sq_key;
1855 sq_key_mm->addr = qhp->wq.sq.phys_addr;
1856 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1857 insert_mmap(ucontext, sq_key_mm);
1858 rq_key_mm->key = uresp.rq_key;
1859 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
1860 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1861 insert_mmap(ucontext, rq_key_mm);
1862 sq_db_key_mm->key = uresp.sq_db_gts_key;
1863 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
1864 sq_db_key_mm->len = PAGE_SIZE;
1865 insert_mmap(ucontext, sq_db_key_mm);
1866 rq_db_key_mm->key = uresp.rq_db_gts_key;
1867 rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa;
1868 rq_db_key_mm->len = PAGE_SIZE;
1869 insert_mmap(ucontext, rq_db_key_mm);
1870 if (ma_sync_key_mm) {
1871 ma_sync_key_mm->key = uresp.ma_sync_key;
1872 ma_sync_key_mm->addr =
1873 (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
1874 PCIE_MA_SYNC_A) & PAGE_MASK;
1875 ma_sync_key_mm->len = PAGE_SIZE;
1876 insert_mmap(ucontext, ma_sync_key_mm);
1879 c4iw_get_ucontext(ucontext);
1880 qhp->ucontext = ucontext;
1882 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1883 init_timer(&(qhp->timer));
1884 INIT_LIST_HEAD(&qhp->db_fc_entry);
1885 PDBG("%s sq id %u size %u memsize %zu num_entries %u "
1886 "rq id %u size %u memsize %zu num_entries %u\n", __func__,
1887 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
1888 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
1889 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
1892 kfree(ma_sync_key_mm);
1894 kfree(rq_db_key_mm);
1896 kfree(sq_db_key_mm);
1902 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1904 destroy_qp(&rhp->rdev, &qhp->wq,
1905 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1908 return ERR_PTR(ret);
1911 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1912 int attr_mask, struct ib_udata *udata)
1914 struct c4iw_dev *rhp;
1915 struct c4iw_qp *qhp;
1916 enum c4iw_qp_attr_mask mask = 0;
1917 struct c4iw_qp_attributes attrs;
1919 PDBG("%s ib_qp %p\n", __func__, ibqp);
1921 /* iwarp does not support the RTR state */
1922 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1923 attr_mask &= ~IB_QP_STATE;
1925 /* Make sure we still have something left to do */
1929 memset(&attrs, 0, sizeof attrs);
1930 qhp = to_c4iw_qp(ibqp);
1933 attrs.next_state = c4iw_convert_state(attr->qp_state);
1934 attrs.enable_rdma_read = (attr->qp_access_flags &
1935 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1936 attrs.enable_rdma_write = (attr->qp_access_flags &
1937 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1938 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1941 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1942 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1943 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1944 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1945 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1948 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1949 * ringing the queue db when we're in DB_FULL mode.
1950 * Only allow this on T4 devices.
1952 attrs.sq_db_inc = attr->sq_psn;
1953 attrs.rq_db_inc = attr->rq_psn;
1954 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1955 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1956 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
1957 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
1960 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1963 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1965 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1966 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1969 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1970 int attr_mask, struct ib_qp_init_attr *init_attr)
1972 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1974 memset(attr, 0, sizeof *attr);
1975 memset(init_attr, 0, sizeof *init_attr);
1976 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1977 attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
1978 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
1979 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
1980 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
1981 init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
1982 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
1983 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
1987 static void move_qp_to_err(struct c4iw_qp *qp)
1989 struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
1991 (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1994 void c4iw_drain_sq(struct ib_qp *ibqp)
1996 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
2001 spin_lock_irqsave(&qp->lock, flag);
2002 need_to_wait = !t4_sq_empty(&qp->wq);
2003 spin_unlock_irqrestore(&qp->lock, flag);
2006 wait_for_completion(&qp->sq_drained);
2009 void c4iw_drain_rq(struct ib_qp *ibqp)
2011 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
2016 spin_lock_irqsave(&qp->lock, flag);
2017 need_to_wait = !t4_rq_empty(&qp->wq);
2018 spin_unlock_irqrestore(&qp->lock, flag);
2021 wait_for_completion(&qp->rq_drained);