1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
11 #include "rxe_queue.h"
28 COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
29 COMPST_DONE, /* The completer finished successflly */
32 static char *comp_state_name[] = {
33 [COMPST_GET_ACK] = "GET ACK",
34 [COMPST_GET_WQE] = "GET WQE",
35 [COMPST_COMP_WQE] = "COMP WQE",
36 [COMPST_COMP_ACK] = "COMP ACK",
37 [COMPST_CHECK_PSN] = "CHECK PSN",
38 [COMPST_CHECK_ACK] = "CHECK ACK",
39 [COMPST_READ] = "READ",
40 [COMPST_ATOMIC] = "ATOMIC",
41 [COMPST_WRITE_SEND] = "WRITE/SEND",
42 [COMPST_UPDATE_COMP] = "UPDATE COMP",
43 [COMPST_ERROR_RETRY] = "ERROR RETRY",
44 [COMPST_RNR_RETRY] = "RNR RETRY",
45 [COMPST_ERROR] = "ERROR",
46 [COMPST_EXIT] = "EXIT",
47 [COMPST_DONE] = "DONE",
50 static unsigned long rnrnak_usec[32] = {
51 [IB_RNR_TIMER_655_36] = 655360,
52 [IB_RNR_TIMER_000_01] = 10,
53 [IB_RNR_TIMER_000_02] = 20,
54 [IB_RNR_TIMER_000_03] = 30,
55 [IB_RNR_TIMER_000_04] = 40,
56 [IB_RNR_TIMER_000_06] = 60,
57 [IB_RNR_TIMER_000_08] = 80,
58 [IB_RNR_TIMER_000_12] = 120,
59 [IB_RNR_TIMER_000_16] = 160,
60 [IB_RNR_TIMER_000_24] = 240,
61 [IB_RNR_TIMER_000_32] = 320,
62 [IB_RNR_TIMER_000_48] = 480,
63 [IB_RNR_TIMER_000_64] = 640,
64 [IB_RNR_TIMER_000_96] = 960,
65 [IB_RNR_TIMER_001_28] = 1280,
66 [IB_RNR_TIMER_001_92] = 1920,
67 [IB_RNR_TIMER_002_56] = 2560,
68 [IB_RNR_TIMER_003_84] = 3840,
69 [IB_RNR_TIMER_005_12] = 5120,
70 [IB_RNR_TIMER_007_68] = 7680,
71 [IB_RNR_TIMER_010_24] = 10240,
72 [IB_RNR_TIMER_015_36] = 15360,
73 [IB_RNR_TIMER_020_48] = 20480,
74 [IB_RNR_TIMER_030_72] = 30720,
75 [IB_RNR_TIMER_040_96] = 40960,
76 [IB_RNR_TIMER_061_44] = 61410,
77 [IB_RNR_TIMER_081_92] = 81920,
78 [IB_RNR_TIMER_122_88] = 122880,
79 [IB_RNR_TIMER_163_84] = 163840,
80 [IB_RNR_TIMER_245_76] = 245760,
81 [IB_RNR_TIMER_327_68] = 327680,
82 [IB_RNR_TIMER_491_52] = 491520,
85 static inline unsigned long rnrnak_jiffies(u8 timeout)
87 return max_t(unsigned long,
88 usecs_to_jiffies(rnrnak_usec[timeout]), 1);
91 static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
94 case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE;
95 case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE;
96 case IB_WR_SEND: return IB_WC_SEND;
97 case IB_WR_SEND_WITH_IMM: return IB_WC_SEND;
98 case IB_WR_RDMA_READ: return IB_WC_RDMA_READ;
99 case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP;
100 case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD;
101 case IB_WR_LSO: return IB_WC_LSO;
102 case IB_WR_SEND_WITH_INV: return IB_WC_SEND;
103 case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
104 case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
105 case IB_WR_REG_MR: return IB_WC_REG_MR;
112 void retransmit_timer(struct timer_list *t)
114 struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
117 qp->comp.timeout = 1;
118 rxe_run_task(&qp->comp.task, 1);
122 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
126 skb_queue_tail(&qp->resp_pkts, skb);
128 must_sched = skb_queue_len(&qp->resp_pkts) > 1;
130 rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
132 rxe_run_task(&qp->comp.task, must_sched);
135 static inline enum comp_state get_wqe(struct rxe_qp *qp,
136 struct rxe_pkt_info *pkt,
137 struct rxe_send_wqe **wqe_p)
139 struct rxe_send_wqe *wqe;
141 /* we come here whether or not we found a response packet to see if
142 * there are any posted WQEs
144 wqe = queue_head(qp->sq.queue);
147 /* no WQE or requester has not started it yet */
148 if (!wqe || wqe->state == wqe_state_posted)
149 return pkt ? COMPST_DONE : COMPST_EXIT;
151 /* WQE does not require an ack */
152 if (wqe->state == wqe_state_done)
153 return COMPST_COMP_WQE;
155 /* WQE caused an error */
156 if (wqe->state == wqe_state_error)
159 /* we have a WQE, if we also have an ack check its PSN */
160 return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
163 static inline void reset_retry_counters(struct rxe_qp *qp)
165 qp->comp.retry_cnt = qp->attr.retry_cnt;
166 qp->comp.rnr_retry = qp->attr.rnr_retry;
167 qp->comp.started_retry = 0;
170 static inline enum comp_state check_psn(struct rxe_qp *qp,
171 struct rxe_pkt_info *pkt,
172 struct rxe_send_wqe *wqe)
176 /* check to see if response is past the oldest WQE. if it is, complete
177 * send/write or error read/atomic
179 diff = psn_compare(pkt->psn, wqe->last_psn);
181 if (wqe->state == wqe_state_pending) {
182 if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
183 return COMPST_ERROR_RETRY;
185 reset_retry_counters(qp);
186 return COMPST_COMP_WQE;
192 /* compare response packet to expected response */
193 diff = psn_compare(pkt->psn, qp->comp.psn);
195 /* response is most likely a retried packet if it matches an
196 * uncompleted WQE go complete it else ignore it
198 if (pkt->psn == wqe->last_psn)
199 return COMPST_COMP_ACK;
202 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
205 return COMPST_CHECK_ACK;
209 static inline enum comp_state check_ack(struct rxe_qp *qp,
210 struct rxe_pkt_info *pkt,
211 struct rxe_send_wqe *wqe)
213 unsigned int mask = pkt->mask;
215 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
217 /* Check the sequence only */
218 switch (qp->comp.opcode) {
220 /* Will catch all *_ONLY cases. */
221 if (!(mask & RXE_START_MASK))
226 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
227 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
228 if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
229 pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
230 /* read retries of partial data may restart from
231 * read response first or response only.
233 if ((pkt->psn == wqe->first_psn &&
235 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
236 (wqe->first_psn == wqe->last_psn &&
238 IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
248 /* Check operation validity. */
249 switch (pkt->opcode) {
250 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
251 case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
252 case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
255 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
259 /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
261 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
262 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
263 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
264 wqe->status = IB_WC_FATAL_ERR;
267 reset_retry_counters(qp);
270 case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
273 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
276 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
277 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
279 reset_retry_counters(qp);
280 return COMPST_ATOMIC;
282 case IB_OPCODE_RC_ACKNOWLEDGE:
284 switch (syn & AETH_TYPE_MASK) {
286 reset_retry_counters(qp);
287 return COMPST_WRITE_SEND;
290 rxe_counter_inc(rxe, RXE_CNT_RCV_RNR);
291 return COMPST_RNR_RETRY;
295 case AETH_NAK_PSN_SEQ_ERROR:
296 /* a nak implicitly acks all packets with psns
299 if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
301 RXE_CNT_RCV_SEQ_ERR);
302 qp->comp.psn = pkt->psn;
303 if (qp->req.wait_psn) {
304 qp->req.wait_psn = 0;
305 rxe_run_task(&qp->req.task, 0);
308 return COMPST_ERROR_RETRY;
310 case AETH_NAK_INVALID_REQ:
311 wqe->status = IB_WC_REM_INV_REQ_ERR;
314 case AETH_NAK_REM_ACC_ERR:
315 wqe->status = IB_WC_REM_ACCESS_ERR;
318 case AETH_NAK_REM_OP_ERR:
319 wqe->status = IB_WC_REM_OP_ERR;
323 pr_warn("unexpected nak %x\n", syn);
324 wqe->status = IB_WC_REM_OP_ERR;
334 pr_warn("unexpected opcode\n");
340 static inline enum comp_state do_read(struct rxe_qp *qp,
341 struct rxe_pkt_info *pkt,
342 struct rxe_send_wqe *wqe)
346 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
347 &wqe->dma, payload_addr(pkt),
348 payload_size(pkt), to_mem_obj, NULL);
352 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
353 return COMPST_COMP_ACK;
355 return COMPST_UPDATE_COMP;
358 static inline enum comp_state do_atomic(struct rxe_qp *qp,
359 struct rxe_pkt_info *pkt,
360 struct rxe_send_wqe *wqe)
364 u64 atomic_orig = atmack_orig(pkt);
366 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
367 &wqe->dma, &atomic_orig,
368 sizeof(u64), to_mem_obj, NULL);
372 return COMPST_COMP_ACK;
375 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
378 memset(cqe, 0, sizeof(*cqe));
381 struct ib_wc *wc = &cqe->ibwc;
383 wc->wr_id = wqe->wr.wr_id;
384 wc->status = wqe->status;
385 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
386 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
387 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
388 wc->wc_flags = IB_WC_WITH_IMM;
389 wc->byte_len = wqe->dma.length;
392 struct ib_uverbs_wc *uwc = &cqe->uibwc;
394 uwc->wr_id = wqe->wr.wr_id;
395 uwc->status = wqe->status;
396 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
397 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
398 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
399 uwc->wc_flags = IB_WC_WITH_IMM;
400 uwc->byte_len = wqe->dma.length;
401 uwc->qp_num = qp->ibqp.qp_num;
406 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
407 * ---------8<---------8<-------------
408 * ...Note that if a completion error occurs, a Work Completion
409 * will always be generated, even if the signaling
410 * indicator requests an Unsignaled Completion.
411 * ---------8<---------8<-------------
413 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
415 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
418 if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
419 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
420 wqe->status != IB_WC_SUCCESS) {
421 make_send_cqe(qp, wqe, &cqe);
422 advance_consumer(qp->sq.queue);
423 rxe_cq_post(qp->scq, &cqe, 0);
425 advance_consumer(qp->sq.queue);
428 if (wqe->wr.opcode == IB_WR_SEND ||
429 wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
430 wqe->wr.opcode == IB_WR_SEND_WITH_INV)
431 rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND);
434 * we completed something so let req run again
435 * if it is trying to fence
437 if (qp->req.wait_fence) {
438 qp->req.wait_fence = 0;
439 rxe_run_task(&qp->req.task, 0);
443 static inline enum comp_state complete_ack(struct rxe_qp *qp,
444 struct rxe_pkt_info *pkt,
445 struct rxe_send_wqe *wqe)
449 if (wqe->has_rd_atomic) {
450 wqe->has_rd_atomic = 0;
451 atomic_inc(&qp->req.rd_atomic);
452 if (qp->req.need_rd_atomic) {
453 qp->comp.timeout_retry = 0;
454 qp->req.need_rd_atomic = 0;
455 rxe_run_task(&qp->req.task, 0);
459 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
460 /* state_lock used by requester & completer */
461 spin_lock_irqsave(&qp->state_lock, flags);
462 if ((qp->req.state == QP_STATE_DRAIN) &&
463 (qp->comp.psn == qp->req.psn)) {
464 qp->req.state = QP_STATE_DRAINED;
465 spin_unlock_irqrestore(&qp->state_lock, flags);
467 if (qp->ibqp.event_handler) {
470 ev.device = qp->ibqp.device;
471 ev.element.qp = &qp->ibqp;
472 ev.event = IB_EVENT_SQ_DRAINED;
473 qp->ibqp.event_handler(&ev,
474 qp->ibqp.qp_context);
477 spin_unlock_irqrestore(&qp->state_lock, flags);
481 do_complete(qp, wqe);
483 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
484 return COMPST_UPDATE_COMP;
489 static inline enum comp_state complete_wqe(struct rxe_qp *qp,
490 struct rxe_pkt_info *pkt,
491 struct rxe_send_wqe *wqe)
493 if (pkt && wqe->state == wqe_state_pending) {
494 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
495 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
496 qp->comp.opcode = -1;
499 if (qp->req.wait_psn) {
500 qp->req.wait_psn = 0;
501 rxe_run_task(&qp->req.task, 1);
505 do_complete(qp, wqe);
507 return COMPST_GET_WQE;
510 static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
513 struct rxe_send_wqe *wqe;
515 while ((skb = skb_dequeue(&qp->resp_pkts))) {
520 while ((wqe = queue_head(qp->sq.queue))) {
522 wqe->status = IB_WC_WR_FLUSH_ERR;
523 do_complete(qp, wqe);
525 advance_consumer(qp->sq.queue);
530 int rxe_completer(void *arg)
532 struct rxe_qp *qp = (struct rxe_qp *)arg;
533 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
534 struct rxe_send_wqe *wqe = NULL;
535 struct sk_buff *skb = NULL;
536 struct rxe_pkt_info *pkt = NULL;
537 enum comp_state state;
541 if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
542 qp->req.state == QP_STATE_RESET) {
543 rxe_drain_resp_pkts(qp, qp->valid &&
544 qp->req.state == QP_STATE_ERROR);
548 if (qp->comp.timeout) {
549 qp->comp.timeout_retry = 1;
550 qp->comp.timeout = 0;
552 qp->comp.timeout_retry = 0;
555 if (qp->req.need_retry)
558 state = COMPST_GET_ACK;
561 pr_debug("qp#%d state = %s\n", qp_num(qp),
562 comp_state_name[state]);
565 skb = skb_dequeue(&qp->resp_pkts);
567 pkt = SKB_TO_PKT(skb);
568 qp->comp.timeout_retry = 0;
570 state = COMPST_GET_WQE;
574 state = get_wqe(qp, pkt, &wqe);
577 case COMPST_CHECK_PSN:
578 state = check_psn(qp, pkt, wqe);
581 case COMPST_CHECK_ACK:
582 state = check_ack(qp, pkt, wqe);
586 state = do_read(qp, pkt, wqe);
590 state = do_atomic(qp, pkt, wqe);
593 case COMPST_WRITE_SEND:
594 if (wqe->state == wqe_state_pending &&
595 wqe->last_psn == pkt->psn)
596 state = COMPST_COMP_ACK;
598 state = COMPST_UPDATE_COMP;
601 case COMPST_COMP_ACK:
602 state = complete_ack(qp, pkt, wqe);
605 case COMPST_COMP_WQE:
606 state = complete_wqe(qp, pkt, wqe);
609 case COMPST_UPDATE_COMP:
610 if (pkt->mask & RXE_END_MASK)
611 qp->comp.opcode = -1;
613 qp->comp.opcode = pkt->opcode;
615 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
616 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
618 if (qp->req.wait_psn) {
619 qp->req.wait_psn = 0;
620 rxe_run_task(&qp->req.task, 1);
628 rxe_drop_ref(pkt->qp);
635 if (qp->comp.timeout_retry && wqe) {
636 state = COMPST_ERROR_RETRY;
640 /* re reset the timeout counter if
642 * (2) the QP is alive
643 * (3) there is a packet sent by the requester that
644 * might be acked (we still might get spurious
645 * timeouts but try to keep them as few as possible)
646 * (4) the timeout parameter is set
648 if ((qp_type(qp) == IB_QPT_RC) &&
649 (qp->req.state == QP_STATE_READY) &&
650 (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
651 qp->qp_timeout_jiffies)
652 mod_timer(&qp->retrans_timer,
653 jiffies + qp->qp_timeout_jiffies);
656 case COMPST_ERROR_RETRY:
657 /* we come here if the retry timer fired and we did
658 * not receive a response packet. try to retry the send
659 * queue if that makes sense and the limits have not
660 * been exceeded. remember that some timeouts are
661 * spurious since we do not reset the timer but kick
662 * it down the road or let it expire
665 /* there is nothing to retry in this case */
666 if (!wqe || (wqe->state == wqe_state_posted))
669 /* if we've started a retry, don't start another
670 * retry sequence, unless this is a timeout.
672 if (qp->comp.started_retry &&
673 !qp->comp.timeout_retry) {
675 rxe_drop_ref(pkt->qp);
683 if (qp->comp.retry_cnt > 0) {
684 if (qp->comp.retry_cnt != 7)
685 qp->comp.retry_cnt--;
687 /* no point in retrying if we have already
688 * seen the last ack that the requester could
691 if (psn_compare(qp->req.psn,
693 /* tell the requester to retry the
694 * send queue next time around
698 qp->req.need_retry = 1;
699 qp->comp.started_retry = 1;
700 rxe_run_task(&qp->req.task, 0);
704 rxe_drop_ref(pkt->qp);
712 rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
713 wqe->status = IB_WC_RETRY_EXC_ERR;
714 state = COMPST_ERROR;
718 case COMPST_RNR_RETRY:
719 if (qp->comp.rnr_retry > 0) {
720 if (qp->comp.rnr_retry != 7)
721 qp->comp.rnr_retry--;
723 qp->req.need_retry = 1;
724 pr_debug("qp#%d set rnr nak timer\n",
726 mod_timer(&qp->rnr_nak_timer,
727 jiffies + rnrnak_jiffies(aeth_syn(pkt)
729 rxe_drop_ref(pkt->qp);
735 RXE_CNT_RNR_RETRY_EXCEEDED);
736 wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
737 state = COMPST_ERROR;
742 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
743 do_complete(qp, wqe);
747 rxe_drop_ref(pkt->qp);
757 /* we come here if we are done with processing and want the task to
758 * exit from the loop calling us
765 /* we come here if we have processed a packet we want the task to call
766 * us again to see if there is anything else to do