1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
6 #include <linux/errno.h>
7 #include <linux/types.h>
9 #include <linux/scatterlist.h>
10 #include <linux/llist.h>
11 #include <asm/barrier.h>
15 #include "siw_verbs.h"
18 static char siw_qp_state_to_string[SIW_QP_STATE_COUNT][sizeof "TERMINATE"] = {
19 [SIW_QP_STATE_IDLE] = "IDLE",
20 [SIW_QP_STATE_RTR] = "RTR",
21 [SIW_QP_STATE_RTS] = "RTS",
22 [SIW_QP_STATE_CLOSING] = "CLOSING",
23 [SIW_QP_STATE_TERMINATE] = "TERMINATE",
24 [SIW_QP_STATE_ERROR] = "ERROR"
28 * iWARP (RDMAP, DDP and MPA) parameters as well as Softiwarp settings on a
29 * per-RDMAP message basis. Please keep order of initializer. All MPA len
30 * is initialized to minimum packet size.
32 struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1] = {
33 { /* RDMAP_RDMA_WRITE */
34 .hdr_len = sizeof(struct iwarp_rdma_write),
35 .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_write) - 2),
36 .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST |
37 cpu_to_be16(DDP_VERSION << 8) |
38 cpu_to_be16(RDMAP_VERSION << 6) |
39 cpu_to_be16(RDMAP_RDMA_WRITE),
40 .rx_data = siw_proc_write },
41 { /* RDMAP_RDMA_READ_REQ */
42 .hdr_len = sizeof(struct iwarp_rdma_rreq),
43 .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rreq) - 2),
44 .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
45 cpu_to_be16(RDMAP_VERSION << 6) |
46 cpu_to_be16(RDMAP_RDMA_READ_REQ),
47 .rx_data = siw_proc_rreq },
48 { /* RDMAP_RDMA_READ_RESP */
49 .hdr_len = sizeof(struct iwarp_rdma_rresp),
50 .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rresp) - 2),
51 .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST |
52 cpu_to_be16(DDP_VERSION << 8) |
53 cpu_to_be16(RDMAP_VERSION << 6) |
54 cpu_to_be16(RDMAP_RDMA_READ_RESP),
55 .rx_data = siw_proc_rresp },
57 .hdr_len = sizeof(struct iwarp_send),
58 .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2),
59 .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
60 cpu_to_be16(RDMAP_VERSION << 6) |
61 cpu_to_be16(RDMAP_SEND),
62 .rx_data = siw_proc_send },
63 { /* RDMAP_SEND_INVAL */
64 .hdr_len = sizeof(struct iwarp_send_inv),
65 .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2),
66 .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
67 cpu_to_be16(RDMAP_VERSION << 6) |
68 cpu_to_be16(RDMAP_SEND_INVAL),
69 .rx_data = siw_proc_send },
71 .hdr_len = sizeof(struct iwarp_send),
72 .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2),
73 .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
74 cpu_to_be16(RDMAP_VERSION << 6) |
75 cpu_to_be16(RDMAP_SEND_SE),
76 .rx_data = siw_proc_send },
77 { /* RDMAP_SEND_SE_INVAL */
78 .hdr_len = sizeof(struct iwarp_send_inv),
79 .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2),
80 .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
81 cpu_to_be16(RDMAP_VERSION << 6) |
82 cpu_to_be16(RDMAP_SEND_SE_INVAL),
83 .rx_data = siw_proc_send },
84 { /* RDMAP_TERMINATE */
85 .hdr_len = sizeof(struct iwarp_terminate),
86 .ctrl.mpa_len = htons(sizeof(struct iwarp_terminate) - 2),
87 .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
88 cpu_to_be16(RDMAP_VERSION << 6) |
89 cpu_to_be16(RDMAP_TERMINATE),
90 .rx_data = siw_proc_terminate }
93 void siw_qp_llp_data_ready(struct sock *sk)
97 read_lock(&sk->sk_callback_lock);
99 if (unlikely(!sk->sk_user_data || !sk_to_qp(sk)))
104 if (likely(!qp->rx_stream.rx_suspend &&
105 down_read_trylock(&qp->state_lock))) {
106 read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 };
108 if (likely(qp->attrs.state == SIW_QP_STATE_RTS))
110 * Implements data receive operation during
111 * socket callback. TCP gracefully catches
112 * the case where there is nothing to receive
113 * (not calling siw_tcp_rx_data() then).
115 tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);
117 up_read(&qp->state_lock);
119 siw_dbg_qp(qp, "unable to process RX, suspend: %d\n",
120 qp->rx_stream.rx_suspend);
123 read_unlock(&sk->sk_callback_lock);
126 void siw_qp_llp_close(struct siw_qp *qp)
128 siw_dbg_qp(qp, "enter llp close, state = %s\n",
129 siw_qp_state_to_string[qp->attrs.state]);
131 down_write(&qp->state_lock);
133 qp->rx_stream.rx_suspend = 1;
134 qp->tx_ctx.tx_suspend = 1;
137 switch (qp->attrs.state) {
138 case SIW_QP_STATE_RTS:
139 case SIW_QP_STATE_RTR:
140 case SIW_QP_STATE_IDLE:
141 case SIW_QP_STATE_TERMINATE:
142 qp->attrs.state = SIW_QP_STATE_ERROR;
145 * SIW_QP_STATE_CLOSING:
147 * This is a forced close. shall the QP be moved to
150 case SIW_QP_STATE_CLOSING:
151 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE)
152 qp->attrs.state = SIW_QP_STATE_ERROR;
154 qp->attrs.state = SIW_QP_STATE_IDLE;
158 siw_dbg_qp(qp, "llp close: no state transition needed: %s\n",
159 siw_qp_state_to_string[qp->attrs.state]);
166 * Dereference closing CEP
169 siw_cep_put(qp->cep);
173 up_write(&qp->state_lock);
175 siw_dbg_qp(qp, "llp close exit: state %s\n",
176 siw_qp_state_to_string[qp->attrs.state]);
180 * socket callback routine informing about newly available send space.
181 * Function schedules SQ work for processing SQ items.
183 void siw_qp_llp_write_space(struct sock *sk)
187 read_lock(&sk->sk_callback_lock);
191 cep->sk_write_space(sk);
193 if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
194 (void)siw_sq_start(cep->qp);
197 read_unlock(&sk->sk_callback_lock);
200 static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
203 irq_size = roundup_pow_of_two(irq_size);
204 qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
206 qp->attrs.irq_size = 0;
211 orq_size = roundup_pow_of_two(orq_size);
212 qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
214 qp->attrs.orq_size = 0;
215 qp->attrs.irq_size = 0;
220 qp->attrs.irq_size = irq_size;
221 qp->attrs.orq_size = orq_size;
222 siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
226 static int siw_qp_enable_crc(struct siw_qp *qp)
228 struct siw_rx_stream *c_rx = &qp->rx_stream;
229 struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
232 if (siw_crypto_shash == NULL)
235 size = crypto_shash_descsize(siw_crypto_shash) +
236 sizeof(struct shash_desc);
238 c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
239 c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
240 if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) {
241 kfree(c_tx->mpa_crc_hd);
242 kfree(c_rx->mpa_crc_hd);
243 c_tx->mpa_crc_hd = NULL;
244 c_rx->mpa_crc_hd = NULL;
247 c_tx->mpa_crc_hd->tfm = siw_crypto_shash;
248 c_rx->mpa_crc_hd->tfm = siw_crypto_shash;
254 * Send a non signalled READ or WRITE to peer side as negotiated
255 * with MPAv2 P2P setup protocol. The work request is only created
256 * as a current active WR and does not consume Send Queue space.
258 * Caller must hold QP state lock.
260 int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
262 struct siw_wqe *wqe = tx_wqe(qp);
266 spin_lock_irqsave(&qp->sq_lock, flags);
268 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) {
269 spin_unlock_irqrestore(&qp->sq_lock, flags);
272 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
274 wqe->wr_status = SIW_WR_QUEUED;
276 wqe->sqe.num_sge = 1;
277 wqe->sqe.sge[0].length = 0;
278 wqe->sqe.sge[0].laddr = 0;
279 wqe->sqe.sge[0].lkey = 0;
281 * While it must not be checked for inbound zero length
282 * READ/WRITE, some HW may treat STag 0 special.
288 if (ctrl & MPA_V2_RDMA_WRITE_RTR)
289 wqe->sqe.opcode = SIW_OP_WRITE;
290 else if (ctrl & MPA_V2_RDMA_READ_RTR) {
291 struct siw_sqe *rreq = NULL;
293 wqe->sqe.opcode = SIW_OP_READ;
295 spin_lock(&qp->orq_lock);
297 if (qp->attrs.orq_size)
298 rreq = orq_get_free(qp);
300 siw_read_to_orq(rreq, &wqe->sqe);
305 spin_unlock(&qp->orq_lock);
310 wqe->wr_status = SIW_WR_IDLE;
312 spin_unlock_irqrestore(&qp->sq_lock, flags);
315 rv = siw_sq_start(qp);
321 * Map memory access error to DDP tagged error
323 enum ddp_ecode siw_tagged_error(enum siw_access_state state)
327 return DDP_ECODE_T_INVALID_STAG;
329 return DDP_ECODE_T_BASE_BOUNDS;
331 return DDP_ECODE_T_STAG_NOT_ASSOC;
334 * RFC 5041 (DDP) lacks an ecode for insufficient access
335 * permissions. 'Invalid STag' seem to be the closest
338 return DDP_ECODE_T_INVALID_STAG;
341 return DDP_ECODE_T_INVALID_STAG;
346 * Map memory access error to RDMAP protection error
348 enum rdmap_ecode siw_rdmap_error(enum siw_access_state state)
352 return RDMAP_ECODE_INVALID_STAG;
354 return RDMAP_ECODE_BASE_BOUNDS;
356 return RDMAP_ECODE_STAG_NOT_ASSOC;
358 return RDMAP_ECODE_ACCESS_RIGHTS;
360 return RDMAP_ECODE_UNSPECIFIED;
364 void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype,
367 if (!qp->term_info.valid) {
368 memset(&qp->term_info, 0, sizeof(qp->term_info));
369 qp->term_info.layer = layer;
370 qp->term_info.etype = etype;
371 qp->term_info.ecode = ecode;
372 qp->term_info.in_tx = in_tx;
373 qp->term_info.valid = 1;
375 siw_dbg_qp(qp, "init TERM: layer %d, type %d, code %d, in tx %s\n",
376 layer, etype, ecode, in_tx ? "yes" : "no");
380 * Send a TERMINATE message, as defined in RFC's 5040/5041/5044/6581.
381 * Sending TERMINATE messages is best effort - such messages
382 * can only be send if the QP is still connected and it does
383 * not have another outbound message in-progress, i.e. the
384 * TERMINATE message must not interfer with an incomplete current
385 * transmit operation.
387 void siw_send_terminate(struct siw_qp *qp)
390 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
391 struct iwarp_terminate *term = NULL;
392 union iwarp_hdr *err_hdr = NULL;
393 struct socket *s = qp->attrs.sk;
394 struct siw_rx_stream *srx = &qp->rx_stream;
395 union iwarp_hdr *rx_hdr = &srx->hdr;
397 int num_frags, len_terminate, rv;
399 if (!qp->term_info.valid)
402 qp->term_info.valid = 0;
404 if (tx_wqe(qp)->wr_status == SIW_WR_INPROGRESS) {
405 siw_dbg_qp(qp, "cannot send TERMINATE: op %d in progress\n",
406 tx_type(tx_wqe(qp)));
410 /* QP not yet in RTS. Take socket from connection end point */
414 siw_dbg_qp(qp, "cannot send TERMINATE: not connected\n");
418 term = kzalloc(sizeof(*term), GFP_KERNEL);
422 term->ddp_qn = cpu_to_be32(RDMAP_UNTAGGED_QN_TERMINATE);
424 term->ddp_msn = cpu_to_be32(1);
426 iov[0].iov_base = term;
427 iov[0].iov_len = sizeof(*term);
429 if ((qp->term_info.layer == TERM_ERROR_LAYER_DDP) ||
430 ((qp->term_info.layer == TERM_ERROR_LAYER_RDMAP) &&
431 (qp->term_info.etype != RDMAP_ETYPE_CATASTROPHIC))) {
432 err_hdr = kzalloc(sizeof(*err_hdr), GFP_KERNEL);
438 memcpy(&term->ctrl, &iwarp_pktinfo[RDMAP_TERMINATE].ctrl,
439 sizeof(struct iwarp_ctrl));
441 __rdmap_term_set_layer(term, qp->term_info.layer);
442 __rdmap_term_set_etype(term, qp->term_info.etype);
443 __rdmap_term_set_ecode(term, qp->term_info.ecode);
445 switch (qp->term_info.layer) {
446 case TERM_ERROR_LAYER_RDMAP:
447 if (qp->term_info.etype == RDMAP_ETYPE_CATASTROPHIC)
448 /* No additional DDP/RDMAP header to be included */
451 if (qp->term_info.etype == RDMAP_ETYPE_REMOTE_PROTECTION) {
453 * Complete RDMAP frame will get attached, and
454 * DDP segment length is valid
460 if (qp->term_info.in_tx) {
461 struct iwarp_rdma_rreq *rreq;
462 struct siw_wqe *wqe = tx_wqe(qp);
464 /* Inbound RREQ error, detected during
465 * RRESP creation. Take state from
466 * current TX work queue element to
467 * reconstruct peers RREQ.
469 rreq = (struct iwarp_rdma_rreq *)err_hdr;
472 &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl,
473 sizeof(struct iwarp_ctrl));
477 htonl(RDMAP_UNTAGGED_QN_RDMA_READ);
479 /* Provide RREQ's MSN as kept aside */
480 rreq->ddp_msn = htonl(wqe->sqe.sge[0].length);
482 rreq->ddp_mo = htonl(wqe->processed);
483 rreq->sink_stag = htonl(wqe->sqe.rkey);
484 rreq->sink_to = cpu_to_be64(wqe->sqe.raddr);
485 rreq->read_size = htonl(wqe->sqe.sge[0].length);
486 rreq->source_stag = htonl(wqe->sqe.sge[0].lkey);
488 cpu_to_be64(wqe->sqe.sge[0].laddr);
490 iov[1].iov_base = rreq;
491 iov[1].iov_len = sizeof(*rreq);
493 rx_hdr = (union iwarp_hdr *)rreq;
495 /* Take RDMAP/DDP information from
496 * current (failed) inbound frame.
498 iov[1].iov_base = rx_hdr;
500 if (__rdmap_get_opcode(&rx_hdr->ctrl) ==
503 sizeof(struct iwarp_rdma_rreq);
506 sizeof(struct iwarp_send);
509 /* Do not report DDP hdr information if packet
512 if ((qp->term_info.ecode == RDMAP_ECODE_VERSION) ||
513 (qp->term_info.ecode == RDMAP_ECODE_OPCODE))
516 iov[1].iov_base = rx_hdr;
518 /* Only DDP frame will get attached */
519 if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED)
521 sizeof(struct iwarp_rdma_write);
523 iov[1].iov_len = sizeof(struct iwarp_send);
528 term->ctrl.mpa_len = cpu_to_be16(iov[1].iov_len);
531 case TERM_ERROR_LAYER_DDP:
532 /* Report error encountered while DDP processing.
533 * This can only happen as a result of inbound
537 /* Do not report DDP hdr information if packet
540 if (((qp->term_info.etype == DDP_ETYPE_TAGGED_BUF) &&
541 (qp->term_info.ecode == DDP_ECODE_T_VERSION)) ||
542 ((qp->term_info.etype == DDP_ETYPE_UNTAGGED_BUF) &&
543 (qp->term_info.ecode == DDP_ECODE_UT_VERSION)))
546 iov[1].iov_base = rx_hdr;
548 if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED)
549 iov[1].iov_len = sizeof(struct iwarp_ctrl_tagged);
551 iov[1].iov_len = sizeof(struct iwarp_ctrl_untagged);
560 if (term->flag_m || term->flag_d || term->flag_r) {
561 iov[2].iov_base = &crc;
562 iov[2].iov_len = sizeof(crc);
563 len_terminate = sizeof(*term) + iov[1].iov_len + MPA_CRC_SIZE;
566 iov[1].iov_base = &crc;
567 iov[1].iov_len = sizeof(crc);
568 len_terminate = sizeof(*term) + MPA_CRC_SIZE;
572 /* Adjust DDP Segment Length parameter, if valid */
574 u32 real_ddp_len = be16_to_cpu(rx_hdr->ctrl.mpa_len);
575 enum rdma_opcode op = __rdmap_get_opcode(&rx_hdr->ctrl);
577 real_ddp_len -= iwarp_pktinfo[op].hdr_len - MPA_HDR_SIZE;
578 rx_hdr->ctrl.mpa_len = cpu_to_be16(real_ddp_len);
582 cpu_to_be16(len_terminate - (MPA_HDR_SIZE + MPA_CRC_SIZE));
583 if (qp->tx_ctx.mpa_crc_hd) {
584 crypto_shash_init(qp->tx_ctx.mpa_crc_hd);
585 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd,
586 (u8 *)iov[0].iov_base,
590 if (num_frags == 3) {
591 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd,
592 (u8 *)iov[1].iov_base,
596 crypto_shash_final(qp->tx_ctx.mpa_crc_hd, (u8 *)&crc);
599 rv = kernel_sendmsg(s, &msg, iov, num_frags, len_terminate);
600 siw_dbg_qp(qp, "sent TERM: %s, layer %d, type %d, code %d (%d bytes)\n",
601 rv == len_terminate ? "success" : "failure",
602 __rdmap_term_layer(term), __rdmap_term_etype(term),
603 __rdmap_term_ecode(term), rv);
610 * Handle all attrs other than state
612 static void siw_qp_modify_nonstate(struct siw_qp *qp,
613 struct siw_qp_attrs *attrs,
614 enum siw_qp_attr_mask mask)
616 if (mask & SIW_QP_ATTR_ACCESS_FLAGS) {
617 if (attrs->flags & SIW_RDMA_BIND_ENABLED)
618 qp->attrs.flags |= SIW_RDMA_BIND_ENABLED;
620 qp->attrs.flags &= ~SIW_RDMA_BIND_ENABLED;
622 if (attrs->flags & SIW_RDMA_WRITE_ENABLED)
623 qp->attrs.flags |= SIW_RDMA_WRITE_ENABLED;
625 qp->attrs.flags &= ~SIW_RDMA_WRITE_ENABLED;
627 if (attrs->flags & SIW_RDMA_READ_ENABLED)
628 qp->attrs.flags |= SIW_RDMA_READ_ENABLED;
630 qp->attrs.flags &= ~SIW_RDMA_READ_ENABLED;
634 static int siw_qp_nextstate_from_idle(struct siw_qp *qp,
635 struct siw_qp_attrs *attrs,
636 enum siw_qp_attr_mask mask)
640 switch (attrs->state) {
641 case SIW_QP_STATE_RTS:
642 if (attrs->flags & SIW_MPA_CRC) {
643 rv = siw_qp_enable_crc(qp);
647 if (!(mask & SIW_QP_ATTR_LLP_HANDLE)) {
648 siw_dbg_qp(qp, "no socket\n");
652 if (!(mask & SIW_QP_ATTR_MPA)) {
653 siw_dbg_qp(qp, "no MPA\n");
658 * Initialize iWARP TX state
660 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0;
661 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0;
662 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0;
665 * Initialize iWARP RX state
667 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 1;
668 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 1;
669 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 1;
672 * init IRD free queue, caller has already checked
675 rv = siw_qp_readq_init(qp, attrs->irq_size,
680 qp->attrs.sk = attrs->sk;
681 qp->attrs.state = SIW_QP_STATE_RTS;
683 siw_dbg_qp(qp, "enter RTS: crc=%s, ord=%u, ird=%u\n",
684 attrs->flags & SIW_MPA_CRC ? "y" : "n",
685 qp->attrs.orq_size, qp->attrs.irq_size);
688 case SIW_QP_STATE_ERROR:
690 qp->attrs.state = SIW_QP_STATE_ERROR;
692 siw_cep_put(qp->cep);
703 static int siw_qp_nextstate_from_rts(struct siw_qp *qp,
704 struct siw_qp_attrs *attrs)
708 switch (attrs->state) {
709 case SIW_QP_STATE_CLOSING:
711 * Verbs: move to IDLE if SQ and ORQ are empty.
712 * Move to ERROR otherwise. But first of all we must
713 * close the connection. So we keep CLOSING or ERROR
714 * as a transient state, schedule connection drop work
715 * and wait for the socket state change upcall to
718 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) {
719 qp->attrs.state = SIW_QP_STATE_CLOSING;
721 qp->attrs.state = SIW_QP_STATE_ERROR;
729 case SIW_QP_STATE_TERMINATE:
730 qp->attrs.state = SIW_QP_STATE_TERMINATE;
732 siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
733 RDMAP_ETYPE_CATASTROPHIC,
734 RDMAP_ECODE_UNSPECIFIED, 1);
738 case SIW_QP_STATE_ERROR:
740 * This is an emergency close.
742 * Any in progress transmit operation will get
744 * This will likely result in a protocol failure,
745 * if a TX operation is in transit. The caller
746 * could unconditional wait to give the current
747 * operation a chance to complete.
748 * Esp., how to handle the non-empty IRQ case?
749 * The peer was asking for data transfer at a valid
754 qp->attrs.state = SIW_QP_STATE_ERROR;
764 static void siw_qp_nextstate_from_term(struct siw_qp *qp,
765 struct siw_qp_attrs *attrs)
767 switch (attrs->state) {
768 case SIW_QP_STATE_ERROR:
770 qp->attrs.state = SIW_QP_STATE_ERROR;
772 if (tx_wqe(qp)->wr_status != SIW_WR_IDLE)
781 static int siw_qp_nextstate_from_close(struct siw_qp *qp,
782 struct siw_qp_attrs *attrs)
786 switch (attrs->state) {
787 case SIW_QP_STATE_IDLE:
788 WARN_ON(tx_wqe(qp)->wr_status != SIW_WR_IDLE);
789 qp->attrs.state = SIW_QP_STATE_IDLE;
792 case SIW_QP_STATE_CLOSING:
794 * The LLP may already moved the QP to closing
795 * due to graceful peer close init
799 case SIW_QP_STATE_ERROR:
801 * QP was moved to CLOSING by LLP event
802 * not yet seen by user.
804 qp->attrs.state = SIW_QP_STATE_ERROR;
806 if (tx_wqe(qp)->wr_status != SIW_WR_IDLE)
813 siw_dbg_qp(qp, "state transition undefined: %s => %s\n",
814 siw_qp_state_to_string[qp->attrs.state],
815 siw_qp_state_to_string[attrs->state]);
823 * Caller must hold qp->state_lock
825 int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs,
826 enum siw_qp_attr_mask mask)
828 int drop_conn = 0, rv = 0;
833 siw_dbg_qp(qp, "state: %s => %s\n",
834 siw_qp_state_to_string[qp->attrs.state],
835 siw_qp_state_to_string[attrs->state]);
837 if (mask != SIW_QP_ATTR_STATE)
838 siw_qp_modify_nonstate(qp, attrs, mask);
840 if (!(mask & SIW_QP_ATTR_STATE))
843 switch (qp->attrs.state) {
844 case SIW_QP_STATE_IDLE:
845 case SIW_QP_STATE_RTR:
846 rv = siw_qp_nextstate_from_idle(qp, attrs, mask);
849 case SIW_QP_STATE_RTS:
850 drop_conn = siw_qp_nextstate_from_rts(qp, attrs);
853 case SIW_QP_STATE_TERMINATE:
854 siw_qp_nextstate_from_term(qp, attrs);
857 case SIW_QP_STATE_CLOSING:
858 siw_qp_nextstate_from_close(qp, attrs);
864 siw_qp_cm_drop(qp, 0);
869 void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
872 rreq->opcode = sqe->opcode;
873 rreq->sge[0].laddr = sqe->sge[0].laddr;
874 rreq->sge[0].length = sqe->sge[0].length;
875 rreq->sge[0].lkey = sqe->sge[0].lkey;
876 rreq->sge[1].lkey = sqe->sge[1].lkey;
877 rreq->flags = sqe->flags | SIW_WQE_VALID;
881 static int siw_activate_tx_from_sq(struct siw_qp *qp)
884 struct siw_wqe *wqe = tx_wqe(qp);
887 sqe = sq_get_next(qp);
891 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
892 wqe->wr_status = SIW_WR_QUEUED;
894 /* First copy SQE to kernel private memory */
895 memcpy(&wqe->sqe, sqe, sizeof(*sqe));
897 if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
901 if (wqe->sqe.flags & SIW_WQE_INLINE) {
902 if (wqe->sqe.opcode != SIW_OP_SEND &&
903 wqe->sqe.opcode != SIW_OP_WRITE) {
907 if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
911 wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
912 wqe->sqe.sge[0].lkey = 0;
913 wqe->sqe.num_sge = 1;
915 if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
916 /* A READ cannot be fenced */
917 if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
919 SIW_OP_READ_LOCAL_INV)) {
920 siw_dbg_qp(qp, "cannot fence read\n");
924 spin_lock(&qp->orq_lock);
926 if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
927 qp->tx_ctx.orq_fence = 1;
930 spin_unlock(&qp->orq_lock);
932 } else if (wqe->sqe.opcode == SIW_OP_READ ||
933 wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
934 struct siw_sqe *rreq;
936 if (unlikely(!qp->attrs.orq_size)) {
937 /* We negotiated not to send READ req's */
941 wqe->sqe.num_sge = 1;
943 spin_lock(&qp->orq_lock);
945 rreq = orq_get_free(qp);
948 * Make an immediate copy in ORQ to be ready
949 * to process loopback READ reply
951 siw_read_to_orq(rreq, &wqe->sqe);
954 qp->tx_ctx.orq_fence = 1;
957 spin_unlock(&qp->orq_lock);
960 /* Clear SQE, can be re-used by application */
961 smp_store_mb(sqe->flags, 0);
964 if (unlikely(rv < 0)) {
965 siw_dbg_qp(qp, "error %d\n", rv);
966 wqe->wr_status = SIW_WR_IDLE;
972 * Must be called with SQ locked.
973 * To avoid complete SQ starvation by constant inbound READ requests,
974 * the active IRQ will not be served after qp->irq_burst, if the
975 * SQ has pending work.
977 int siw_activate_tx(struct siw_qp *qp)
979 struct siw_sqe *irqe;
980 struct siw_wqe *wqe = tx_wqe(qp);
982 if (!qp->attrs.irq_size)
983 return siw_activate_tx_from_sq(qp);
985 irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
987 if (!(irqe->flags & SIW_WQE_VALID))
988 return siw_activate_tx_from_sq(qp);
991 * Avoid local WQE processing starvation in case
992 * of constant inbound READ request stream
994 if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
996 return siw_activate_tx_from_sq(qp);
998 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
999 wqe->wr_status = SIW_WR_QUEUED;
1001 /* start READ RESPONSE */
1002 wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
1004 if (irqe->num_sge) {
1005 wqe->sqe.num_sge = 1;
1006 wqe->sqe.sge[0].length = irqe->sge[0].length;
1007 wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
1008 wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
1010 wqe->sqe.num_sge = 0;
1013 /* Retain original RREQ's message sequence number for
1014 * potential error reporting cases.
1016 wqe->sqe.sge[1].length = irqe->sge[1].length;
1018 wqe->sqe.rkey = irqe->rkey;
1019 wqe->sqe.raddr = irqe->raddr;
1024 /* mark current IRQ entry free */
1025 smp_store_mb(irqe->flags, 0);
1031 * Check if current CQ state qualifies for calling CQ completion
1032 * handler. Must be called with CQ lock held.
1034 static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
1038 if (!cq->base_cq.comp_handler)
1041 /* Read application shared notification state */
1042 cq_notify = READ_ONCE(cq->notify->flags);
1044 if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
1045 ((cq_notify & SIW_NOTIFY_SOLICITED) &&
1046 (flags & SIW_WQE_SOLICITED))) {
1048 * CQ notification is one-shot: Since the
1049 * current CQE causes user notification,
1050 * the CQ gets dis-aremd and must be re-aremd
1051 * by the user for a new notification.
1053 WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
1060 int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
1061 enum siw_wc_status status)
1063 struct siw_cq *cq = qp->scq;
1067 u32 sqe_flags = sqe->flags;
1068 struct siw_cqe *cqe;
1070 unsigned long flags;
1072 spin_lock_irqsave(&cq->lock, flags);
1074 idx = cq->cq_put % cq->num_cqe;
1075 cqe = &cq->queue[idx];
1077 if (!READ_ONCE(cqe->flags)) {
1081 cqe->opcode = sqe->opcode;
1082 cqe->status = status;
1086 if (rdma_is_kernel_res(&cq->base_cq.res))
1087 cqe->base_qp = &qp->base_qp;
1089 cqe->qp_id = qp_id(qp);
1091 /* mark CQE valid for application */
1092 WRITE_ONCE(cqe->flags, SIW_WQE_VALID);
1094 smp_store_mb(sqe->flags, 0);
1097 notify = siw_cq_notify_now(cq, sqe_flags);
1099 spin_unlock_irqrestore(&cq->lock, flags);
1102 siw_dbg_cq(cq, "Call completion handler\n");
1103 cq->base_cq.comp_handler(&cq->base_cq,
1104 cq->base_cq.cq_context);
1107 spin_unlock_irqrestore(&cq->lock, flags);
1109 siw_cq_event(cq, IB_EVENT_CQ_ERR);
1113 smp_store_mb(sqe->flags, 0);
1118 int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
1119 u32 inval_stag, enum siw_wc_status status)
1121 struct siw_cq *cq = qp->rcq;
1125 struct siw_cqe *cqe;
1127 unsigned long flags;
1129 spin_lock_irqsave(&cq->lock, flags);
1131 idx = cq->cq_put % cq->num_cqe;
1132 cqe = &cq->queue[idx];
1134 if (!READ_ONCE(cqe->flags)) {
1136 u8 cqe_flags = SIW_WQE_VALID;
1139 cqe->opcode = SIW_OP_RECEIVE;
1140 cqe->status = status;
1144 if (rdma_is_kernel_res(&cq->base_cq.res)) {
1145 cqe->base_qp = &qp->base_qp;
1147 cqe_flags |= SIW_WQE_REM_INVAL;
1148 cqe->inval_stag = inval_stag;
1151 cqe->qp_id = qp_id(qp);
1153 /* mark CQE valid for application */
1154 WRITE_ONCE(cqe->flags, cqe_flags);
1156 smp_store_mb(rqe->flags, 0);
1159 notify = siw_cq_notify_now(cq, SIW_WQE_SIGNALLED);
1161 spin_unlock_irqrestore(&cq->lock, flags);
1164 siw_dbg_cq(cq, "Call completion handler\n");
1165 cq->base_cq.comp_handler(&cq->base_cq,
1166 cq->base_cq.cq_context);
1169 spin_unlock_irqrestore(&cq->lock, flags);
1171 siw_cq_event(cq, IB_EVENT_CQ_ERR);
1175 smp_store_mb(rqe->flags, 0);
1183 * Flush SQ and ORRQ entries to CQ.
1185 * Must be called with QP state write lock held.
1186 * Therefore, SQ and ORQ lock must not be taken.
1188 void siw_sq_flush(struct siw_qp *qp)
1190 struct siw_sqe *sqe;
1191 struct siw_wqe *wqe = tx_wqe(qp);
1192 int async_event = 0;
1195 * Start with completing any work currently on the ORQ
1197 while (qp->attrs.orq_size) {
1198 sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size];
1199 if (!READ_ONCE(sqe->flags))
1202 if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
1205 WRITE_ONCE(sqe->flags, 0);
1209 * Flush an in-progress WQE if present
1211 if (wqe->wr_status != SIW_WR_IDLE) {
1212 siw_dbg_qp(qp, "flush current SQE, type %d, status %d\n",
1213 tx_type(wqe), wqe->wr_status);
1215 siw_wqe_put_mem(wqe, tx_type(wqe));
1217 if (tx_type(wqe) != SIW_OP_READ_RESPONSE &&
1218 ((tx_type(wqe) != SIW_OP_READ &&
1219 tx_type(wqe) != SIW_OP_READ_LOCAL_INV) ||
1220 wqe->wr_status == SIW_WR_QUEUED))
1222 * An in-progress Read Request is already in
1225 siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
1226 SIW_WC_WR_FLUSH_ERR);
1228 wqe->wr_status = SIW_WR_IDLE;
1231 * Flush the Send Queue
1233 while (qp->attrs.sq_size) {
1234 sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
1235 if (!READ_ONCE(sqe->flags))
1239 if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
1241 * Shall IB_EVENT_SQ_DRAINED be supressed if work
1246 WRITE_ONCE(sqe->flags, 0);
1250 siw_qp_event(qp, IB_EVENT_SQ_DRAINED);
1256 * Flush recv queue entries to CQ. Also
1257 * takes care of pending active tagged and untagged
1258 * inbound transfers, which have target memory
1261 * Must be called with QP state write lock held.
1262 * Therefore, RQ lock must not be taken.
1264 void siw_rq_flush(struct siw_qp *qp)
1266 struct siw_wqe *wqe = &qp->rx_untagged.wqe_active;
1269 * Flush an in-progress untagged operation if present
1271 if (wqe->wr_status != SIW_WR_IDLE) {
1272 siw_dbg_qp(qp, "flush current rqe, type %d, status %d\n",
1273 rx_type(wqe), wqe->wr_status);
1275 siw_wqe_put_mem(wqe, rx_type(wqe));
1277 if (rx_type(wqe) == SIW_OP_RECEIVE) {
1278 siw_rqe_complete(qp, &wqe->rqe, wqe->bytes,
1279 0, SIW_WC_WR_FLUSH_ERR);
1280 } else if (rx_type(wqe) != SIW_OP_READ &&
1281 rx_type(wqe) != SIW_OP_READ_RESPONSE &&
1282 rx_type(wqe) != SIW_OP_WRITE) {
1283 siw_sqe_complete(qp, &wqe->sqe, 0, SIW_WC_WR_FLUSH_ERR);
1285 wqe->wr_status = SIW_WR_IDLE;
1287 wqe = &qp->rx_tagged.wqe_active;
1289 if (wqe->wr_status != SIW_WR_IDLE) {
1290 siw_wqe_put_mem(wqe, rx_type(wqe));
1291 wqe->wr_status = SIW_WR_IDLE;
1294 * Flush the Receive Queue
1296 while (qp->attrs.rq_size) {
1297 struct siw_rqe *rqe =
1298 &qp->recvq[qp->rq_get % qp->attrs.rq_size];
1300 if (!READ_ONCE(rqe->flags))
1303 if (siw_rqe_complete(qp, rqe, 0, 0, SIW_WC_WR_FLUSH_ERR) != 0)
1306 WRITE_ONCE(rqe->flags, 0);
1311 int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
1313 int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
1317 kref_init(&qp->ref);
1319 siw_dbg_qp(qp, "new QP\n");
1324 void siw_free_qp(struct kref *ref)
1326 struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
1327 struct siw_device *sdev = qp->sdev;
1328 unsigned long flags;
1331 siw_cep_put(qp->cep);
1333 found = xa_erase(&sdev->qp_xa, qp_id(qp));
1334 WARN_ON(found != qp);
1335 spin_lock_irqsave(&sdev->lock, flags);
1336 list_del(&qp->devq);
1337 spin_unlock_irqrestore(&sdev->lock, flags);
1344 siw_put_tx_cpu(qp->tx_cpu);
1346 atomic_dec(&sdev->num_qp);