2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
38 #include "rxe_queue.h"
40 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
43 static inline void retry_first_write_send(struct rxe_qp *qp,
44 struct rxe_send_wqe *wqe,
45 unsigned mask, int npsn)
49 for (i = 0; i < npsn; i++) {
50 int to_send = (wqe->dma.resid > qp->mtu) ?
51 qp->mtu : wqe->dma.resid;
53 qp->req.opcode = next_opcode(qp, wqe,
56 if (wqe->wr.send_flags & IB_SEND_INLINE) {
57 wqe->dma.resid -= to_send;
58 wqe->dma.sge_offset += to_send;
60 advance_dma_data(&wqe->dma, to_send);
62 if (mask & WR_WRITE_MASK)
67 static void req_retry(struct rxe_qp *qp)
69 struct rxe_send_wqe *wqe;
70 unsigned int wqe_index;
75 qp->req.wqe_index = consumer_index(qp->sq.queue);
76 qp->req.psn = qp->comp.psn;
79 for (wqe_index = consumer_index(qp->sq.queue);
80 wqe_index != producer_index(qp->sq.queue);
81 wqe_index = next_index(qp->sq.queue, wqe_index)) {
82 wqe = addr_from_index(qp->sq.queue, wqe_index);
83 mask = wr_opcode_mask(wqe->wr.opcode, qp);
85 if (wqe->state == wqe_state_posted)
88 if (wqe->state == wqe_state_done)
91 wqe->iova = (mask & WR_ATOMIC_MASK) ?
92 wqe->wr.wr.atomic.remote_addr :
93 (mask & WR_READ_OR_WRITE_MASK) ?
94 wqe->wr.wr.rdma.remote_addr :
97 if (!first || (mask & WR_READ_MASK) == 0) {
98 wqe->dma.resid = wqe->dma.length;
100 wqe->dma.sge_offset = 0;
106 if (mask & WR_WRITE_OR_SEND_MASK) {
107 npsn = (qp->comp.psn - wqe->first_psn) &
109 retry_first_write_send(qp, wqe, mask, npsn);
112 if (mask & WR_READ_MASK) {
113 npsn = (wqe->dma.length - wqe->dma.resid) /
115 wqe->iova += npsn * qp->mtu;
119 wqe->state = wqe_state_posted;
123 void rnr_nak_timer(unsigned long data)
125 struct rxe_qp *qp = (struct rxe_qp *)data;
127 pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
128 rxe_run_task(&qp->req.task, 1);
131 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
133 struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
136 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
137 /* check to see if we are drained;
138 * state_lock used by requester and completer
140 spin_lock_irqsave(&qp->state_lock, flags);
142 if (qp->req.state != QP_STATE_DRAIN) {
143 /* comp just finished */
144 spin_unlock_irqrestore(&qp->state_lock,
149 if (wqe && ((qp->req.wqe_index !=
150 consumer_index(qp->sq.queue)) ||
151 (wqe->state != wqe_state_posted))) {
152 /* comp not done yet */
153 spin_unlock_irqrestore(&qp->state_lock,
158 qp->req.state = QP_STATE_DRAINED;
159 spin_unlock_irqrestore(&qp->state_lock, flags);
161 if (qp->ibqp.event_handler) {
164 ev.device = qp->ibqp.device;
165 ev.element.qp = &qp->ibqp;
166 ev.event = IB_EVENT_SQ_DRAINED;
167 qp->ibqp.event_handler(&ev,
168 qp->ibqp.qp_context);
173 if (qp->req.wqe_index == producer_index(qp->sq.queue))
176 wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
178 if (unlikely((qp->req.state == QP_STATE_DRAIN ||
179 qp->req.state == QP_STATE_DRAINED) &&
180 (wqe->state != wqe_state_processing)))
183 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
184 (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
185 qp->req.wait_fence = 1;
189 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
193 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
196 case IB_WR_RDMA_WRITE:
197 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
198 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
200 IB_OPCODE_RC_RDMA_WRITE_LAST :
201 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
204 IB_OPCODE_RC_RDMA_WRITE_ONLY :
205 IB_OPCODE_RC_RDMA_WRITE_FIRST;
207 case IB_WR_RDMA_WRITE_WITH_IMM:
208 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
209 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
211 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
212 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
215 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
216 IB_OPCODE_RC_RDMA_WRITE_FIRST;
219 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
220 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
222 IB_OPCODE_RC_SEND_LAST :
223 IB_OPCODE_RC_SEND_MIDDLE;
226 IB_OPCODE_RC_SEND_ONLY :
227 IB_OPCODE_RC_SEND_FIRST;
229 case IB_WR_SEND_WITH_IMM:
230 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
231 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
233 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
234 IB_OPCODE_RC_SEND_MIDDLE;
237 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
238 IB_OPCODE_RC_SEND_FIRST;
240 case IB_WR_RDMA_READ:
241 return IB_OPCODE_RC_RDMA_READ_REQUEST;
243 case IB_WR_ATOMIC_CMP_AND_SWP:
244 return IB_OPCODE_RC_COMPARE_SWAP;
246 case IB_WR_ATOMIC_FETCH_AND_ADD:
247 return IB_OPCODE_RC_FETCH_ADD;
249 case IB_WR_SEND_WITH_INV:
250 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
251 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
252 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
253 IB_OPCODE_RC_SEND_MIDDLE;
255 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
256 IB_OPCODE_RC_SEND_FIRST;
258 case IB_WR_LOCAL_INV:
265 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
268 case IB_WR_RDMA_WRITE:
269 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
270 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
272 IB_OPCODE_UC_RDMA_WRITE_LAST :
273 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
276 IB_OPCODE_UC_RDMA_WRITE_ONLY :
277 IB_OPCODE_UC_RDMA_WRITE_FIRST;
279 case IB_WR_RDMA_WRITE_WITH_IMM:
280 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
281 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
283 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
284 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
287 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
288 IB_OPCODE_UC_RDMA_WRITE_FIRST;
291 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
292 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
294 IB_OPCODE_UC_SEND_LAST :
295 IB_OPCODE_UC_SEND_MIDDLE;
298 IB_OPCODE_UC_SEND_ONLY :
299 IB_OPCODE_UC_SEND_FIRST;
301 case IB_WR_SEND_WITH_IMM:
302 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
303 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
305 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
306 IB_OPCODE_UC_SEND_MIDDLE;
309 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
310 IB_OPCODE_UC_SEND_FIRST;
316 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
319 int fits = (wqe->dma.resid <= qp->mtu);
321 switch (qp_type(qp)) {
323 return next_opcode_rc(qp, opcode, fits);
326 return next_opcode_uc(qp, opcode, fits);
333 return IB_OPCODE_UD_SEND_ONLY;
335 case IB_WR_SEND_WITH_IMM:
336 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
347 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
351 if (wqe->has_rd_atomic)
354 qp->req.need_rd_atomic = 1;
355 depth = atomic_dec_return(&qp->req.rd_atomic);
358 qp->req.need_rd_atomic = 0;
359 wqe->has_rd_atomic = 1;
363 atomic_inc(&qp->req.rd_atomic);
367 static inline int get_mtu(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
369 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
370 struct rxe_port *port;
373 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
379 return port->mtu_cap;
382 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
383 struct rxe_send_wqe *wqe,
384 int opcode, int payload,
385 struct rxe_pkt_info *pkt)
387 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
388 struct rxe_port *port = &rxe->port;
390 struct rxe_send_wr *ibwr = &wqe->wr;
392 int pad = (-payload) & 0x3;
399 /* length from start of bth to end of icrc */
400 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
402 /* pkt->hdr, rxe, port_num and mask are initialized in ifc
405 pkt->opcode = opcode;
407 pkt->psn = qp->req.psn;
408 pkt->mask = rxe_opcode[opcode].mask;
409 pkt->paylen = paylen;
414 av = rxe_get_av(pkt);
415 skb = rxe_init_packet(rxe, av, paylen, pkt);
420 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
421 (pkt->mask & RXE_END_MASK) &&
422 ((pkt->mask & (RXE_SEND_MASK)) ||
423 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
424 (RXE_WRITE_MASK | RXE_IMMDT_MASK));
426 pkey = (qp_type(qp) == IB_QPT_GSI) ?
427 port->pkey_tbl[ibwr->wr.ud.pkey_index] :
428 port->pkey_tbl[qp->attr.pkey_index];
430 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
431 qp->attr.dest_qp_num;
433 ack_req = ((pkt->mask & RXE_END_MASK) ||
434 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
436 qp->req.noack_pkts = 0;
438 bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
441 /* init optional headers */
442 if (pkt->mask & RXE_RETH_MASK) {
443 reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
444 reth_set_va(pkt, wqe->iova);
445 reth_set_len(pkt, wqe->dma.resid);
448 if (pkt->mask & RXE_IMMDT_MASK)
449 immdt_set_imm(pkt, ibwr->ex.imm_data);
451 if (pkt->mask & RXE_IETH_MASK)
452 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
454 if (pkt->mask & RXE_ATMETH_MASK) {
455 atmeth_set_va(pkt, wqe->iova);
456 if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
457 opcode == IB_OPCODE_RD_COMPARE_SWAP) {
458 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
459 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
461 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
463 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
466 if (pkt->mask & RXE_DETH_MASK) {
467 if (qp->ibqp.qp_num == 1)
468 deth_set_qkey(pkt, GSI_QKEY);
470 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
471 deth_set_sqp(pkt, qp->ibqp.qp_num);
477 static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
478 struct rxe_pkt_info *pkt, struct sk_buff *skb,
481 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
486 err = rxe_prepare(rxe, pkt, skb, &crc);
490 if (pkt->mask & RXE_WRITE_OR_SEND) {
491 if (wqe->wr.send_flags & IB_SEND_INLINE) {
492 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
494 crc = crc32_le(crc, tmp, paylen);
496 memcpy(payload_addr(pkt), tmp, paylen);
498 wqe->dma.resid -= paylen;
499 wqe->dma.sge_offset += paylen;
501 err = copy_data(rxe, qp->pd, 0, &wqe->dma,
502 payload_addr(pkt), paylen,
509 p = payload_addr(pkt) + paylen + bth_pad(pkt);
516 static void update_wqe_state(struct rxe_qp *qp,
517 struct rxe_send_wqe *wqe,
518 struct rxe_pkt_info *pkt)
520 if (pkt->mask & RXE_END_MASK) {
521 if (qp_type(qp) == IB_QPT_RC)
522 wqe->state = wqe_state_pending;
524 wqe->state = wqe_state_processing;
528 static void update_wqe_psn(struct rxe_qp *qp,
529 struct rxe_send_wqe *wqe,
530 struct rxe_pkt_info *pkt,
533 /* number of packets left to send including current one */
534 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
536 /* handle zero length packet case */
540 if (pkt->mask & RXE_START_MASK) {
541 wqe->first_psn = qp->req.psn;
542 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
545 if (pkt->mask & RXE_READ_MASK)
546 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
548 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
551 static void save_state(struct rxe_send_wqe *wqe,
553 struct rxe_send_wqe *rollback_wqe,
556 rollback_wqe->state = wqe->state;
557 rollback_wqe->first_psn = wqe->first_psn;
558 rollback_wqe->last_psn = wqe->last_psn;
559 *rollback_psn = qp->req.psn;
562 static void rollback_state(struct rxe_send_wqe *wqe,
564 struct rxe_send_wqe *rollback_wqe,
567 wqe->state = rollback_wqe->state;
568 wqe->first_psn = rollback_wqe->first_psn;
569 wqe->last_psn = rollback_wqe->last_psn;
570 qp->req.psn = rollback_psn;
573 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
574 struct rxe_pkt_info *pkt, int payload)
576 qp->req.opcode = pkt->opcode;
578 if (pkt->mask & RXE_END_MASK)
579 qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
581 qp->need_req_skb = 0;
583 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
584 mod_timer(&qp->retrans_timer,
585 jiffies + qp->qp_timeout_jiffies);
588 int rxe_requester(void *arg)
590 struct rxe_qp *qp = (struct rxe_qp *)arg;
591 struct rxe_pkt_info pkt;
593 struct rxe_send_wqe *wqe;
594 enum rxe_hdr_mask mask;
599 struct rxe_send_wqe rollback_wqe;
603 if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
606 if (unlikely(qp->req.state == QP_STATE_RESET)) {
607 qp->req.wqe_index = consumer_index(qp->sq.queue);
609 qp->req.need_rd_atomic = 0;
610 qp->req.wait_psn = 0;
611 qp->req.need_retry = 0;
615 if (unlikely(qp->req.need_retry)) {
617 qp->req.need_retry = 0;
620 wqe = req_next_wqe(qp);
624 if (wqe->mask & WR_REG_MASK) {
625 if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
626 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
629 rmr = rxe_pool_get_index(&rxe->mr_pool,
630 wqe->wr.ex.invalidate_rkey >> 8);
632 pr_err("No mr for key %#x\n",
633 wqe->wr.ex.invalidate_rkey);
634 wqe->state = wqe_state_error;
635 wqe->status = IB_WC_MW_BIND_ERR;
638 rmr->state = RXE_MEM_STATE_FREE;
640 wqe->state = wqe_state_done;
641 wqe->status = IB_WC_SUCCESS;
642 } else if (wqe->wr.opcode == IB_WR_REG_MR) {
643 struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
645 rmr->state = RXE_MEM_STATE_VALID;
646 rmr->access = wqe->wr.wr.reg.access;
647 rmr->lkey = wqe->wr.wr.reg.key;
648 rmr->rkey = wqe->wr.wr.reg.key;
649 rmr->iova = wqe->wr.wr.reg.mr->iova;
650 wqe->state = wqe_state_done;
651 wqe->status = IB_WC_SUCCESS;
655 if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
656 qp->sq_sig_type == IB_SIGNAL_ALL_WR)
657 rxe_run_task(&qp->comp.task, 1);
658 qp->req.wqe_index = next_index(qp->sq.queue,
663 if (unlikely(qp_type(qp) == IB_QPT_RC &&
664 psn_compare(qp->req.psn, (qp->comp.psn +
665 RXE_MAX_UNACKED_PSNS)) > 0)) {
666 qp->req.wait_psn = 1;
670 /* Limit the number of inflight SKBs per QP */
671 if (unlikely(atomic_read(&qp->skb_out) >
672 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
673 qp->need_req_skb = 1;
677 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
678 if (unlikely(opcode < 0)) {
679 wqe->status = IB_WC_LOC_QP_OP_ERR;
683 mask = rxe_opcode[opcode].mask;
684 if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
685 if (check_init_depth(qp, wqe))
689 mtu = get_mtu(qp, wqe);
690 payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
692 if (qp_type(qp) == IB_QPT_UD) {
693 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
694 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
695 * shall not emit any packets for this message. Further, the CI shall not
696 * generate an error due to this condition.
699 /* fake a successful UD send */
700 wqe->first_psn = qp->req.psn;
701 wqe->last_psn = qp->req.psn;
702 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
703 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
704 qp->req.wqe_index = next_index(qp->sq.queue,
706 wqe->state = wqe_state_done;
707 wqe->status = IB_WC_SUCCESS;
708 __rxe_do_task(&qp->comp.task);
714 skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
715 if (unlikely(!skb)) {
716 pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
720 if (fill_packet(qp, wqe, &pkt, skb, payload)) {
721 pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
726 * To prevent a race on wqe access between requester and completer,
727 * wqe members state and psn need to be set before calling
729 * Otherwise, completer might initiate an unjustified retry flow.
731 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
732 update_wqe_state(qp, wqe, &pkt);
733 update_wqe_psn(qp, wqe, &pkt, payload);
734 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
736 qp->need_req_skb = 1;
738 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
740 if (ret == -EAGAIN) {
742 rxe_run_task(&qp->req.task, 1);
749 update_state(qp, wqe, &pkt, payload);
755 wqe->status = IB_WC_LOC_PROT_ERR;
756 wqe->state = wqe_state_error;
759 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
760 * ---------8<---------8<-------------
761 * ...Note that if a completion error occurs, a Work Completion
762 * will always be generated, even if the signaling
763 * indicator requests an Unsignaled Completion.
764 * ---------8<---------8<-------------
766 wqe->wr.send_flags |= IB_SEND_SIGNALED;
767 __rxe_do_task(&qp->comp.task);