1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
12 #include "rxe_queue.h"
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 struct rxe_send_wqe *wqe,
19 unsigned int mask, int npsn)
23 for (i = 0; i < npsn; i++) {
24 int to_send = (wqe->dma.resid > qp->mtu) ?
25 qp->mtu : wqe->dma.resid;
27 qp->req.opcode = next_opcode(qp, wqe,
30 if (wqe->wr.send_flags & IB_SEND_INLINE) {
31 wqe->dma.resid -= to_send;
32 wqe->dma.sge_offset += to_send;
34 advance_dma_data(&wqe->dma, to_send);
36 if (mask & WR_WRITE_MASK)
41 static void req_retry(struct rxe_qp *qp)
43 struct rxe_send_wqe *wqe;
44 unsigned int wqe_index;
49 qp->req.wqe_index = consumer_index(qp->sq.queue);
50 qp->req.psn = qp->comp.psn;
53 for (wqe_index = consumer_index(qp->sq.queue);
54 wqe_index != producer_index(qp->sq.queue);
55 wqe_index = next_index(qp->sq.queue, wqe_index)) {
56 wqe = addr_from_index(qp->sq.queue, wqe_index);
57 mask = wr_opcode_mask(wqe->wr.opcode, qp);
59 if (wqe->state == wqe_state_posted)
62 if (wqe->state == wqe_state_done)
65 wqe->iova = (mask & WR_ATOMIC_MASK) ?
66 wqe->wr.wr.atomic.remote_addr :
67 (mask & WR_READ_OR_WRITE_MASK) ?
68 wqe->wr.wr.rdma.remote_addr :
71 if (!first || (mask & WR_READ_MASK) == 0) {
72 wqe->dma.resid = wqe->dma.length;
74 wqe->dma.sge_offset = 0;
80 if (mask & WR_WRITE_OR_SEND_MASK) {
81 npsn = (qp->comp.psn - wqe->first_psn) &
83 retry_first_write_send(qp, wqe, mask, npsn);
86 if (mask & WR_READ_MASK) {
87 npsn = (wqe->dma.length - wqe->dma.resid) /
89 wqe->iova += npsn * qp->mtu;
93 wqe->state = wqe_state_posted;
97 void rnr_nak_timer(struct timer_list *t)
99 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
101 pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
102 rxe_run_task(&qp->req.task, 1);
105 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
107 struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
110 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
111 /* check to see if we are drained;
112 * state_lock used by requester and completer
114 spin_lock_irqsave(&qp->state_lock, flags);
116 if (qp->req.state != QP_STATE_DRAIN) {
117 /* comp just finished */
118 spin_unlock_irqrestore(&qp->state_lock,
123 if (wqe && ((qp->req.wqe_index !=
124 consumer_index(qp->sq.queue)) ||
125 (wqe->state != wqe_state_posted))) {
126 /* comp not done yet */
127 spin_unlock_irqrestore(&qp->state_lock,
132 qp->req.state = QP_STATE_DRAINED;
133 spin_unlock_irqrestore(&qp->state_lock, flags);
135 if (qp->ibqp.event_handler) {
138 ev.device = qp->ibqp.device;
139 ev.element.qp = &qp->ibqp;
140 ev.event = IB_EVENT_SQ_DRAINED;
141 qp->ibqp.event_handler(&ev,
142 qp->ibqp.qp_context);
147 if (qp->req.wqe_index == producer_index(qp->sq.queue))
150 wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
152 if (unlikely((qp->req.state == QP_STATE_DRAIN ||
153 qp->req.state == QP_STATE_DRAINED) &&
154 (wqe->state != wqe_state_processing)))
157 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
158 (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
159 qp->req.wait_fence = 1;
163 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
167 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
170 case IB_WR_RDMA_WRITE:
171 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
172 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
174 IB_OPCODE_RC_RDMA_WRITE_LAST :
175 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
178 IB_OPCODE_RC_RDMA_WRITE_ONLY :
179 IB_OPCODE_RC_RDMA_WRITE_FIRST;
181 case IB_WR_RDMA_WRITE_WITH_IMM:
182 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
183 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
185 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
186 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
189 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
190 IB_OPCODE_RC_RDMA_WRITE_FIRST;
193 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
194 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
196 IB_OPCODE_RC_SEND_LAST :
197 IB_OPCODE_RC_SEND_MIDDLE;
200 IB_OPCODE_RC_SEND_ONLY :
201 IB_OPCODE_RC_SEND_FIRST;
203 case IB_WR_SEND_WITH_IMM:
204 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
205 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
207 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
208 IB_OPCODE_RC_SEND_MIDDLE;
211 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
212 IB_OPCODE_RC_SEND_FIRST;
214 case IB_WR_RDMA_READ:
215 return IB_OPCODE_RC_RDMA_READ_REQUEST;
217 case IB_WR_ATOMIC_CMP_AND_SWP:
218 return IB_OPCODE_RC_COMPARE_SWAP;
220 case IB_WR_ATOMIC_FETCH_AND_ADD:
221 return IB_OPCODE_RC_FETCH_ADD;
223 case IB_WR_SEND_WITH_INV:
224 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
225 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
226 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
227 IB_OPCODE_RC_SEND_MIDDLE;
229 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
230 IB_OPCODE_RC_SEND_FIRST;
232 case IB_WR_LOCAL_INV:
239 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
242 case IB_WR_RDMA_WRITE:
243 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
244 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
246 IB_OPCODE_UC_RDMA_WRITE_LAST :
247 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
250 IB_OPCODE_UC_RDMA_WRITE_ONLY :
251 IB_OPCODE_UC_RDMA_WRITE_FIRST;
253 case IB_WR_RDMA_WRITE_WITH_IMM:
254 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
255 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
257 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
258 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
261 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
262 IB_OPCODE_UC_RDMA_WRITE_FIRST;
265 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
266 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
268 IB_OPCODE_UC_SEND_LAST :
269 IB_OPCODE_UC_SEND_MIDDLE;
272 IB_OPCODE_UC_SEND_ONLY :
273 IB_OPCODE_UC_SEND_FIRST;
275 case IB_WR_SEND_WITH_IMM:
276 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
277 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
279 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
280 IB_OPCODE_UC_SEND_MIDDLE;
283 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
284 IB_OPCODE_UC_SEND_FIRST;
290 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
293 int fits = (wqe->dma.resid <= qp->mtu);
295 switch (qp_type(qp)) {
297 return next_opcode_rc(qp, opcode, fits);
300 return next_opcode_uc(qp, opcode, fits);
307 return IB_OPCODE_UD_SEND_ONLY;
309 case IB_WR_SEND_WITH_IMM:
310 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
321 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
325 if (wqe->has_rd_atomic)
328 qp->req.need_rd_atomic = 1;
329 depth = atomic_dec_return(&qp->req.rd_atomic);
332 qp->req.need_rd_atomic = 0;
333 wqe->has_rd_atomic = 1;
337 atomic_inc(&qp->req.rd_atomic);
341 static inline int get_mtu(struct rxe_qp *qp)
343 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
345 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
348 return rxe->port.mtu_cap;
351 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
352 struct rxe_send_wqe *wqe,
353 int opcode, int payload,
354 struct rxe_pkt_info *pkt)
356 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
358 struct rxe_send_wr *ibwr = &wqe->wr;
360 int pad = (-payload) & 0x3;
367 /* length from start of bth to end of icrc */
368 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
370 /* pkt->hdr, rxe, port_num and mask are initialized in ifc
373 pkt->opcode = opcode;
375 pkt->psn = qp->req.psn;
376 pkt->mask = rxe_opcode[opcode].mask;
377 pkt->paylen = paylen;
382 av = rxe_get_av(pkt);
383 skb = rxe_init_packet(rxe, av, paylen, pkt);
388 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
389 (pkt->mask & RXE_END_MASK) &&
390 ((pkt->mask & (RXE_SEND_MASK)) ||
391 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
392 (RXE_WRITE_MASK | RXE_IMMDT_MASK));
394 pkey = IB_DEFAULT_PKEY_FULL;
396 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
397 qp->attr.dest_qp_num;
399 ack_req = ((pkt->mask & RXE_END_MASK) ||
400 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
402 qp->req.noack_pkts = 0;
404 bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
407 /* init optional headers */
408 if (pkt->mask & RXE_RETH_MASK) {
409 reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
410 reth_set_va(pkt, wqe->iova);
411 reth_set_len(pkt, wqe->dma.resid);
414 if (pkt->mask & RXE_IMMDT_MASK)
415 immdt_set_imm(pkt, ibwr->ex.imm_data);
417 if (pkt->mask & RXE_IETH_MASK)
418 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
420 if (pkt->mask & RXE_ATMETH_MASK) {
421 atmeth_set_va(pkt, wqe->iova);
422 if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
423 opcode == IB_OPCODE_RD_COMPARE_SWAP) {
424 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
425 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
427 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
429 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
432 if (pkt->mask & RXE_DETH_MASK) {
433 if (qp->ibqp.qp_num == 1)
434 deth_set_qkey(pkt, GSI_QKEY);
436 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
437 deth_set_sqp(pkt, qp->ibqp.qp_num);
443 static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
444 struct rxe_pkt_info *pkt, struct sk_buff *skb,
447 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
452 err = rxe_prepare(pkt, skb, &crc);
456 if (pkt->mask & RXE_WRITE_OR_SEND) {
457 if (wqe->wr.send_flags & IB_SEND_INLINE) {
458 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
460 crc = rxe_crc32(rxe, crc, tmp, paylen);
461 memcpy(payload_addr(pkt), tmp, paylen);
463 wqe->dma.resid -= paylen;
464 wqe->dma.sge_offset += paylen;
466 err = copy_data(qp->pd, 0, &wqe->dma,
467 payload_addr(pkt), paylen,
474 u8 *pad = payload_addr(pkt) + paylen;
476 memset(pad, 0, bth_pad(pkt));
477 crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
480 p = payload_addr(pkt) + paylen + bth_pad(pkt);
487 static void update_wqe_state(struct rxe_qp *qp,
488 struct rxe_send_wqe *wqe,
489 struct rxe_pkt_info *pkt)
491 if (pkt->mask & RXE_END_MASK) {
492 if (qp_type(qp) == IB_QPT_RC)
493 wqe->state = wqe_state_pending;
495 wqe->state = wqe_state_processing;
499 static void update_wqe_psn(struct rxe_qp *qp,
500 struct rxe_send_wqe *wqe,
501 struct rxe_pkt_info *pkt,
504 /* number of packets left to send including current one */
505 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
507 /* handle zero length packet case */
511 if (pkt->mask & RXE_START_MASK) {
512 wqe->first_psn = qp->req.psn;
513 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
516 if (pkt->mask & RXE_READ_MASK)
517 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
519 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
522 static void save_state(struct rxe_send_wqe *wqe,
524 struct rxe_send_wqe *rollback_wqe,
527 rollback_wqe->state = wqe->state;
528 rollback_wqe->first_psn = wqe->first_psn;
529 rollback_wqe->last_psn = wqe->last_psn;
530 *rollback_psn = qp->req.psn;
533 static void rollback_state(struct rxe_send_wqe *wqe,
535 struct rxe_send_wqe *rollback_wqe,
538 wqe->state = rollback_wqe->state;
539 wqe->first_psn = rollback_wqe->first_psn;
540 wqe->last_psn = rollback_wqe->last_psn;
541 qp->req.psn = rollback_psn;
544 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
545 struct rxe_pkt_info *pkt, int payload)
547 qp->req.opcode = pkt->opcode;
549 if (pkt->mask & RXE_END_MASK)
550 qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
552 qp->need_req_skb = 0;
554 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
555 mod_timer(&qp->retrans_timer,
556 jiffies + qp->qp_timeout_jiffies);
559 int rxe_requester(void *arg)
561 struct rxe_qp *qp = (struct rxe_qp *)arg;
562 struct rxe_pkt_info pkt;
564 struct rxe_send_wqe *wqe;
565 enum rxe_hdr_mask mask;
570 struct rxe_send_wqe rollback_wqe;
576 if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
579 if (unlikely(qp->req.state == QP_STATE_RESET)) {
580 qp->req.wqe_index = consumer_index(qp->sq.queue);
582 qp->req.need_rd_atomic = 0;
583 qp->req.wait_psn = 0;
584 qp->req.need_retry = 0;
588 if (unlikely(qp->req.need_retry)) {
590 qp->req.need_retry = 0;
593 wqe = req_next_wqe(qp);
597 if (wqe->mask & WR_REG_MASK) {
598 if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
599 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
602 rmr = rxe_pool_get_index(&rxe->mr_pool,
603 wqe->wr.ex.invalidate_rkey >> 8);
605 pr_err("No mr for key %#x\n",
606 wqe->wr.ex.invalidate_rkey);
607 wqe->state = wqe_state_error;
608 wqe->status = IB_WC_MW_BIND_ERR;
611 rmr->state = RXE_MEM_STATE_FREE;
613 wqe->state = wqe_state_done;
614 wqe->status = IB_WC_SUCCESS;
615 } else if (wqe->wr.opcode == IB_WR_REG_MR) {
616 struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
618 rmr->state = RXE_MEM_STATE_VALID;
619 rmr->access = wqe->wr.wr.reg.access;
620 rmr->ibmr.lkey = wqe->wr.wr.reg.key;
621 rmr->ibmr.rkey = wqe->wr.wr.reg.key;
622 rmr->iova = wqe->wr.wr.reg.mr->iova;
623 wqe->state = wqe_state_done;
624 wqe->status = IB_WC_SUCCESS;
628 if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
629 qp->sq_sig_type == IB_SIGNAL_ALL_WR)
630 rxe_run_task(&qp->comp.task, 1);
631 qp->req.wqe_index = next_index(qp->sq.queue,
636 if (unlikely(qp_type(qp) == IB_QPT_RC &&
637 psn_compare(qp->req.psn, (qp->comp.psn +
638 RXE_MAX_UNACKED_PSNS)) > 0)) {
639 qp->req.wait_psn = 1;
643 /* Limit the number of inflight SKBs per QP */
644 if (unlikely(atomic_read(&qp->skb_out) >
645 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
646 qp->need_req_skb = 1;
650 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
651 if (unlikely(opcode < 0)) {
652 wqe->status = IB_WC_LOC_QP_OP_ERR;
656 mask = rxe_opcode[opcode].mask;
657 if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
658 if (check_init_depth(qp, wqe))
663 payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
665 if (qp_type(qp) == IB_QPT_UD) {
666 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
667 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
668 * shall not emit any packets for this message. Further, the CI shall not
669 * generate an error due to this condition.
672 /* fake a successful UD send */
673 wqe->first_psn = qp->req.psn;
674 wqe->last_psn = qp->req.psn;
675 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
676 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
677 qp->req.wqe_index = next_index(qp->sq.queue,
679 wqe->state = wqe_state_done;
680 wqe->status = IB_WC_SUCCESS;
681 __rxe_do_task(&qp->comp.task);
688 skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
689 if (unlikely(!skb)) {
690 pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
694 if (fill_packet(qp, wqe, &pkt, skb, payload)) {
695 pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
701 * To prevent a race on wqe access between requester and completer,
702 * wqe members state and psn need to be set before calling
704 * Otherwise, completer might initiate an unjustified retry flow.
706 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
707 update_wqe_state(qp, wqe, &pkt);
708 update_wqe_psn(qp, wqe, &pkt, payload);
709 ret = rxe_xmit_packet(qp, &pkt, skb);
711 qp->need_req_skb = 1;
713 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
715 if (ret == -EAGAIN) {
716 rxe_run_task(&qp->req.task, 1);
723 update_state(qp, wqe, &pkt, payload);
728 wqe->status = IB_WC_LOC_PROT_ERR;
729 wqe->state = wqe_state_error;
730 __rxe_do_task(&qp->comp.task);