2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
38 #include "rxe_queue.h"
55 COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
56 COMPST_DONE, /* The completer finished successflly */
59 static char *comp_state_name[] = {
60 [COMPST_GET_ACK] = "GET ACK",
61 [COMPST_GET_WQE] = "GET WQE",
62 [COMPST_COMP_WQE] = "COMP WQE",
63 [COMPST_COMP_ACK] = "COMP ACK",
64 [COMPST_CHECK_PSN] = "CHECK PSN",
65 [COMPST_CHECK_ACK] = "CHECK ACK",
66 [COMPST_READ] = "READ",
67 [COMPST_ATOMIC] = "ATOMIC",
68 [COMPST_WRITE_SEND] = "WRITE/SEND",
69 [COMPST_UPDATE_COMP] = "UPDATE COMP",
70 [COMPST_ERROR_RETRY] = "ERROR RETRY",
71 [COMPST_RNR_RETRY] = "RNR RETRY",
72 [COMPST_ERROR] = "ERROR",
73 [COMPST_EXIT] = "EXIT",
74 [COMPST_DONE] = "DONE",
77 static unsigned long rnrnak_usec[32] = {
78 [IB_RNR_TIMER_655_36] = 655360,
79 [IB_RNR_TIMER_000_01] = 10,
80 [IB_RNR_TIMER_000_02] = 20,
81 [IB_RNR_TIMER_000_03] = 30,
82 [IB_RNR_TIMER_000_04] = 40,
83 [IB_RNR_TIMER_000_06] = 60,
84 [IB_RNR_TIMER_000_08] = 80,
85 [IB_RNR_TIMER_000_12] = 120,
86 [IB_RNR_TIMER_000_16] = 160,
87 [IB_RNR_TIMER_000_24] = 240,
88 [IB_RNR_TIMER_000_32] = 320,
89 [IB_RNR_TIMER_000_48] = 480,
90 [IB_RNR_TIMER_000_64] = 640,
91 [IB_RNR_TIMER_000_96] = 960,
92 [IB_RNR_TIMER_001_28] = 1280,
93 [IB_RNR_TIMER_001_92] = 1920,
94 [IB_RNR_TIMER_002_56] = 2560,
95 [IB_RNR_TIMER_003_84] = 3840,
96 [IB_RNR_TIMER_005_12] = 5120,
97 [IB_RNR_TIMER_007_68] = 7680,
98 [IB_RNR_TIMER_010_24] = 10240,
99 [IB_RNR_TIMER_015_36] = 15360,
100 [IB_RNR_TIMER_020_48] = 20480,
101 [IB_RNR_TIMER_030_72] = 30720,
102 [IB_RNR_TIMER_040_96] = 40960,
103 [IB_RNR_TIMER_061_44] = 61410,
104 [IB_RNR_TIMER_081_92] = 81920,
105 [IB_RNR_TIMER_122_88] = 122880,
106 [IB_RNR_TIMER_163_84] = 163840,
107 [IB_RNR_TIMER_245_76] = 245760,
108 [IB_RNR_TIMER_327_68] = 327680,
109 [IB_RNR_TIMER_491_52] = 491520,
112 static inline unsigned long rnrnak_jiffies(u8 timeout)
114 return max_t(unsigned long,
115 usecs_to_jiffies(rnrnak_usec[timeout]), 1);
118 static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
121 case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE;
122 case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE;
123 case IB_WR_SEND: return IB_WC_SEND;
124 case IB_WR_SEND_WITH_IMM: return IB_WC_SEND;
125 case IB_WR_RDMA_READ: return IB_WC_RDMA_READ;
126 case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP;
127 case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD;
128 case IB_WR_LSO: return IB_WC_LSO;
129 case IB_WR_SEND_WITH_INV: return IB_WC_SEND;
130 case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
131 case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
132 case IB_WR_REG_MR: return IB_WC_REG_MR;
139 void retransmit_timer(unsigned long data)
141 struct rxe_qp *qp = (struct rxe_qp *)data;
144 qp->comp.timeout = 1;
145 rxe_run_task(&qp->comp.task, 1);
149 void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
154 skb_queue_tail(&qp->resp_pkts, skb);
156 must_sched = skb_queue_len(&qp->resp_pkts) > 1;
158 rxe_counter_inc(rxe, RXE_CNT_COMPLETER_SCHED);
159 rxe_run_task(&qp->comp.task, must_sched);
162 static inline enum comp_state get_wqe(struct rxe_qp *qp,
163 struct rxe_pkt_info *pkt,
164 struct rxe_send_wqe **wqe_p)
166 struct rxe_send_wqe *wqe;
168 /* we come here whether or not we found a response packet to see if
169 * there are any posted WQEs
171 wqe = queue_head(qp->sq.queue);
174 /* no WQE or requester has not started it yet */
175 if (!wqe || wqe->state == wqe_state_posted)
176 return pkt ? COMPST_DONE : COMPST_EXIT;
178 /* WQE does not require an ack */
179 if (wqe->state == wqe_state_done)
180 return COMPST_COMP_WQE;
182 /* WQE caused an error */
183 if (wqe->state == wqe_state_error)
186 /* we have a WQE, if we also have an ack check its PSN */
187 return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
190 static inline void reset_retry_counters(struct rxe_qp *qp)
192 qp->comp.retry_cnt = qp->attr.retry_cnt;
193 qp->comp.rnr_retry = qp->attr.rnr_retry;
196 static inline enum comp_state check_psn(struct rxe_qp *qp,
197 struct rxe_pkt_info *pkt,
198 struct rxe_send_wqe *wqe)
202 /* check to see if response is past the oldest WQE. if it is, complete
203 * send/write or error read/atomic
205 diff = psn_compare(pkt->psn, wqe->last_psn);
207 if (wqe->state == wqe_state_pending) {
208 if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
209 return COMPST_ERROR_RETRY;
211 reset_retry_counters(qp);
212 return COMPST_COMP_WQE;
218 /* compare response packet to expected response */
219 diff = psn_compare(pkt->psn, qp->comp.psn);
221 /* response is most likely a retried packet if it matches an
222 * uncompleted WQE go complete it else ignore it
224 if (pkt->psn == wqe->last_psn)
225 return COMPST_COMP_ACK;
228 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
231 return COMPST_CHECK_ACK;
235 static inline enum comp_state check_ack(struct rxe_qp *qp,
236 struct rxe_pkt_info *pkt,
237 struct rxe_send_wqe *wqe)
239 unsigned int mask = pkt->mask;
241 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
243 /* Check the sequence only */
244 switch (qp->comp.opcode) {
246 /* Will catch all *_ONLY cases. */
247 if (!(mask & RXE_START_MASK))
252 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
253 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
254 if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
255 pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
256 /* read retries of partial data may restart from
257 * read response first or response only.
259 if ((pkt->psn == wqe->first_psn &&
261 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
262 (wqe->first_psn == wqe->last_psn &&
264 IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
274 /* Check operation validity. */
275 switch (pkt->opcode) {
276 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
277 case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
278 case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
281 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
284 /* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
285 * doesn't have an AETH)
287 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
288 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
289 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
290 wqe->status = IB_WC_FATAL_ERR;
293 reset_retry_counters(qp);
296 case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
299 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
302 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
303 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
305 reset_retry_counters(qp);
306 return COMPST_ATOMIC;
308 case IB_OPCODE_RC_ACKNOWLEDGE:
310 switch (syn & AETH_TYPE_MASK) {
312 reset_retry_counters(qp);
313 return COMPST_WRITE_SEND;
316 rxe_counter_inc(rxe, RXE_CNT_RCV_RNR);
317 return COMPST_RNR_RETRY;
321 case AETH_NAK_PSN_SEQ_ERROR:
322 /* a nak implicitly acks all packets with psns
325 if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
327 RXE_CNT_RCV_SEQ_ERR);
328 qp->comp.psn = pkt->psn;
329 if (qp->req.wait_psn) {
330 qp->req.wait_psn = 0;
331 rxe_run_task(&qp->req.task, 1);
334 return COMPST_ERROR_RETRY;
336 case AETH_NAK_INVALID_REQ:
337 wqe->status = IB_WC_REM_INV_REQ_ERR;
340 case AETH_NAK_REM_ACC_ERR:
341 wqe->status = IB_WC_REM_ACCESS_ERR;
344 case AETH_NAK_REM_OP_ERR:
345 wqe->status = IB_WC_REM_OP_ERR;
349 pr_warn("unexpected nak %x\n", syn);
350 wqe->status = IB_WC_REM_OP_ERR;
360 pr_warn("unexpected opcode\n");
366 static inline enum comp_state do_read(struct rxe_qp *qp,
367 struct rxe_pkt_info *pkt,
368 struct rxe_send_wqe *wqe)
370 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
373 ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
374 &wqe->dma, payload_addr(pkt),
375 payload_size(pkt), to_mem_obj, NULL);
379 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
380 return COMPST_COMP_ACK;
382 return COMPST_UPDATE_COMP;
385 static inline enum comp_state do_atomic(struct rxe_qp *qp,
386 struct rxe_pkt_info *pkt,
387 struct rxe_send_wqe *wqe)
389 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
392 u64 atomic_orig = atmack_orig(pkt);
394 ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
395 &wqe->dma, &atomic_orig,
396 sizeof(u64), to_mem_obj, NULL);
400 return COMPST_COMP_ACK;
403 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
406 memset(cqe, 0, sizeof(*cqe));
409 struct ib_wc *wc = &cqe->ibwc;
411 wc->wr_id = wqe->wr.wr_id;
412 wc->status = wqe->status;
413 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
414 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
415 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
416 wc->wc_flags = IB_WC_WITH_IMM;
417 wc->byte_len = wqe->dma.length;
420 struct ib_uverbs_wc *uwc = &cqe->uibwc;
422 uwc->wr_id = wqe->wr.wr_id;
423 uwc->status = wqe->status;
424 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
425 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
426 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
427 uwc->wc_flags = IB_WC_WITH_IMM;
428 uwc->byte_len = wqe->dma.length;
429 uwc->qp_num = qp->ibqp.qp_num;
434 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
435 * ---------8<---------8<-------------
436 * ...Note that if a completion error occurs, a Work Completion
437 * will always be generated, even if the signaling
438 * indicator requests an Unsignaled Completion.
439 * ---------8<---------8<-------------
441 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
445 if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
446 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
447 wqe->status != IB_WC_SUCCESS) {
448 make_send_cqe(qp, wqe, &cqe);
449 advance_consumer(qp->sq.queue);
450 rxe_cq_post(qp->scq, &cqe, 0);
452 advance_consumer(qp->sq.queue);
456 * we completed something so let req run again
457 * if it is trying to fence
459 if (qp->req.wait_fence) {
460 qp->req.wait_fence = 0;
461 rxe_run_task(&qp->req.task, 1);
465 static inline enum comp_state complete_ack(struct rxe_qp *qp,
466 struct rxe_pkt_info *pkt,
467 struct rxe_send_wqe *wqe)
471 if (wqe->has_rd_atomic) {
472 wqe->has_rd_atomic = 0;
473 atomic_inc(&qp->req.rd_atomic);
474 if (qp->req.need_rd_atomic) {
475 qp->comp.timeout_retry = 0;
476 qp->req.need_rd_atomic = 0;
477 rxe_run_task(&qp->req.task, 1);
481 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
482 /* state_lock used by requester & completer */
483 spin_lock_irqsave(&qp->state_lock, flags);
484 if ((qp->req.state == QP_STATE_DRAIN) &&
485 (qp->comp.psn == qp->req.psn)) {
486 qp->req.state = QP_STATE_DRAINED;
487 spin_unlock_irqrestore(&qp->state_lock, flags);
489 if (qp->ibqp.event_handler) {
492 ev.device = qp->ibqp.device;
493 ev.element.qp = &qp->ibqp;
494 ev.event = IB_EVENT_SQ_DRAINED;
495 qp->ibqp.event_handler(&ev,
496 qp->ibqp.qp_context);
499 spin_unlock_irqrestore(&qp->state_lock, flags);
503 do_complete(qp, wqe);
505 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
506 return COMPST_UPDATE_COMP;
511 static inline enum comp_state complete_wqe(struct rxe_qp *qp,
512 struct rxe_pkt_info *pkt,
513 struct rxe_send_wqe *wqe)
515 if (pkt && wqe->state == wqe_state_pending) {
516 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
517 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
518 qp->comp.opcode = -1;
521 if (qp->req.wait_psn) {
522 qp->req.wait_psn = 0;
523 rxe_run_task(&qp->req.task, 1);
527 do_complete(qp, wqe);
529 return COMPST_GET_WQE;
532 static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
535 struct rxe_send_wqe *wqe;
537 while ((skb = skb_dequeue(&qp->resp_pkts))) {
542 while ((wqe = queue_head(qp->sq.queue))) {
544 wqe->status = IB_WC_WR_FLUSH_ERR;
545 do_complete(qp, wqe);
547 advance_consumer(qp->sq.queue);
552 int rxe_completer(void *arg)
554 struct rxe_qp *qp = (struct rxe_qp *)arg;
555 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
556 struct rxe_send_wqe *wqe = wqe;
557 struct sk_buff *skb = NULL;
558 struct rxe_pkt_info *pkt = NULL;
559 enum comp_state state;
563 if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
564 qp->req.state == QP_STATE_RESET) {
565 rxe_drain_resp_pkts(qp, qp->valid &&
566 qp->req.state == QP_STATE_ERROR);
570 if (qp->comp.timeout) {
571 qp->comp.timeout_retry = 1;
572 qp->comp.timeout = 0;
574 qp->comp.timeout_retry = 0;
577 if (qp->req.need_retry)
580 state = COMPST_GET_ACK;
583 pr_debug("qp#%d state = %s\n", qp_num(qp),
584 comp_state_name[state]);
587 skb = skb_dequeue(&qp->resp_pkts);
589 pkt = SKB_TO_PKT(skb);
590 qp->comp.timeout_retry = 0;
592 state = COMPST_GET_WQE;
596 state = get_wqe(qp, pkt, &wqe);
599 case COMPST_CHECK_PSN:
600 state = check_psn(qp, pkt, wqe);
603 case COMPST_CHECK_ACK:
604 state = check_ack(qp, pkt, wqe);
608 state = do_read(qp, pkt, wqe);
612 state = do_atomic(qp, pkt, wqe);
615 case COMPST_WRITE_SEND:
616 if (wqe->state == wqe_state_pending &&
617 wqe->last_psn == pkt->psn)
618 state = COMPST_COMP_ACK;
620 state = COMPST_UPDATE_COMP;
623 case COMPST_COMP_ACK:
624 state = complete_ack(qp, pkt, wqe);
627 case COMPST_COMP_WQE:
628 state = complete_wqe(qp, pkt, wqe);
631 case COMPST_UPDATE_COMP:
632 if (pkt->mask & RXE_END_MASK)
633 qp->comp.opcode = -1;
635 qp->comp.opcode = pkt->opcode;
637 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
638 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
640 if (qp->req.wait_psn) {
641 qp->req.wait_psn = 0;
642 rxe_run_task(&qp->req.task, 1);
650 rxe_drop_ref(pkt->qp);
657 if (qp->comp.timeout_retry && wqe) {
658 state = COMPST_ERROR_RETRY;
662 /* re reset the timeout counter if
664 * (2) the QP is alive
665 * (3) there is a packet sent by the requester that
666 * might be acked (we still might get spurious
667 * timeouts but try to keep them as few as possible)
668 * (4) the timeout parameter is set
670 if ((qp_type(qp) == IB_QPT_RC) &&
671 (qp->req.state == QP_STATE_READY) &&
672 (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
673 qp->qp_timeout_jiffies)
674 mod_timer(&qp->retrans_timer,
675 jiffies + qp->qp_timeout_jiffies);
679 case COMPST_ERROR_RETRY:
680 /* we come here if the retry timer fired and we did
681 * not receive a response packet. try to retry the send
682 * queue if that makes sense and the limits have not
683 * been exceeded. remember that some timeouts are
684 * spurious since we do not reset the timer but kick
685 * it down the road or let it expire
688 /* there is nothing to retry in this case */
689 if (!wqe || (wqe->state == wqe_state_posted)) {
694 if (qp->comp.retry_cnt > 0) {
695 if (qp->comp.retry_cnt != 7)
696 qp->comp.retry_cnt--;
698 /* no point in retrying if we have already
699 * seen the last ack that the requester could
702 if (psn_compare(qp->req.psn,
704 /* tell the requester to retry the
705 * send queue next time around
709 qp->req.need_retry = 1;
710 rxe_run_task(&qp->req.task, 1);
714 rxe_drop_ref(pkt->qp);
723 rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
724 wqe->status = IB_WC_RETRY_EXC_ERR;
725 state = COMPST_ERROR;
729 case COMPST_RNR_RETRY:
730 if (qp->comp.rnr_retry > 0) {
731 if (qp->comp.rnr_retry != 7)
732 qp->comp.rnr_retry--;
734 qp->req.need_retry = 1;
735 pr_debug("qp#%d set rnr nak timer\n",
737 mod_timer(&qp->rnr_nak_timer,
738 jiffies + rnrnak_jiffies(aeth_syn(pkt)
740 rxe_drop_ref(pkt->qp);
746 RXE_CNT_RNR_RETRY_EXCEEDED);
747 wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
748 state = COMPST_ERROR;
753 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
754 do_complete(qp, wqe);
758 rxe_drop_ref(pkt->qp);
769 /* we come here if we are done with processing and want the task to
770 * exit from the loop calling us
777 /* we come here if we have processed a packet we want the task to call
778 * us again to see if there is anything else to do