2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include "verbs_txreq.h"
52 /* cut down ridiculously long IB macro names */
53 #define OP(x) UC_OP(x)
56 * hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
57 * @qp: a pointer to the QP
59 * Assume s_lock is held.
61 * Return 1 if constructed; otherwise, return 0.
63 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
65 struct hfi1_qp_priv *priv = qp->priv;
66 struct ib_other_headers *ohdr;
74 ps->s_txreq = get_txreq(ps->dev, qp);
78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
79 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
81 /* We are in the error state, flush the work request. */
82 smp_read_barrier_depends(); /* see post_one_send() */
83 if (qp->s_last == ACCESS_ONCE(qp->s_head))
85 /* If DMAs are in progress, we can't flush immediately. */
86 if (iowait_sdma_pending(&priv->s_iowait)) {
87 qp->s_flags |= RVT_S_WAIT_DMA;
91 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
92 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
96 ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
97 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
98 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
100 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
101 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
103 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
105 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
107 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
108 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
109 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
111 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
114 /* Get the next send request. */
115 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
117 switch (qp->s_state) {
119 if (!(ib_rvt_state_ops[qp->state] &
120 RVT_PROCESS_NEXT_SEND_OK))
122 /* Check if send work queue is empty. */
123 smp_read_barrier_depends(); /* see post_one_send() */
124 if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
129 * Local operations are processed immediately
130 * after all prior requests have completed.
132 if (wqe->wr.opcode == IB_WR_REG_MR ||
133 wqe->wr.opcode == IB_WR_LOCAL_INV) {
137 if (qp->s_last != qp->s_cur)
139 if (++qp->s_cur == qp->s_size)
141 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
142 err = rvt_invalidate_rkey(
143 qp, wqe->wr.ex.invalidate_rkey);
146 hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
149 atomic_dec(&qp->local_ops_pending);
154 * Start a new request.
156 qp->s_psn = wqe->psn;
157 qp->s_sge.sge = wqe->sg_list[0];
158 qp->s_sge.sg_list = wqe->sg_list + 1;
159 qp->s_sge.num_sge = wqe->wr.num_sge;
160 qp->s_sge.total_len = wqe->length;
163 switch (wqe->wr.opcode) {
165 case IB_WR_SEND_WITH_IMM:
167 qp->s_state = OP(SEND_FIRST);
171 if (wqe->wr.opcode == IB_WR_SEND) {
172 qp->s_state = OP(SEND_ONLY);
175 OP(SEND_ONLY_WITH_IMMEDIATE);
176 /* Immediate data comes after the BTH */
177 ohdr->u.imm_data = wqe->wr.ex.imm_data;
180 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
181 bth0 |= IB_BTH_SOLICITED;
183 if (++qp->s_cur >= qp->s_size)
187 case IB_WR_RDMA_WRITE:
188 case IB_WR_RDMA_WRITE_WITH_IMM:
189 ohdr->u.rc.reth.vaddr =
190 cpu_to_be64(wqe->rdma_wr.remote_addr);
191 ohdr->u.rc.reth.rkey =
192 cpu_to_be32(wqe->rdma_wr.rkey);
193 ohdr->u.rc.reth.length = cpu_to_be32(len);
194 hwords += sizeof(struct ib_reth) / 4;
196 qp->s_state = OP(RDMA_WRITE_FIRST);
200 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
201 qp->s_state = OP(RDMA_WRITE_ONLY);
204 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
205 /* Immediate data comes after the RETH */
206 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
208 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
209 bth0 |= IB_BTH_SOLICITED;
212 if (++qp->s_cur >= qp->s_size)
222 qp->s_state = OP(SEND_MIDDLE);
224 case OP(SEND_MIDDLE):
228 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
231 if (wqe->wr.opcode == IB_WR_SEND) {
232 qp->s_state = OP(SEND_LAST);
234 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
235 /* Immediate data comes after the BTH */
236 ohdr->u.imm_data = wqe->wr.ex.imm_data;
239 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
240 bth0 |= IB_BTH_SOLICITED;
242 if (++qp->s_cur >= qp->s_size)
246 case OP(RDMA_WRITE_FIRST):
247 qp->s_state = OP(RDMA_WRITE_MIDDLE);
249 case OP(RDMA_WRITE_MIDDLE):
253 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
256 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
257 qp->s_state = OP(RDMA_WRITE_LAST);
260 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
261 /* Immediate data comes after the BTH */
262 ohdr->u.imm_data = wqe->wr.ex.imm_data;
264 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
265 bth0 |= IB_BTH_SOLICITED;
268 if (++qp->s_cur >= qp->s_size)
273 qp->s_hdrwords = hwords;
274 ps->s_txreq->sde = priv->s_sde;
275 ps->s_txreq->ss = &qp->s_sge;
276 ps->s_txreq->s_cur_size = len;
277 hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
278 mask_psn(qp->s_psn++), middle, ps);
280 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
284 hfi1_put_txreq(ps->s_txreq);
289 hfi1_put_txreq(ps->s_txreq);
293 qp->s_flags &= ~RVT_S_BUSY;
299 * hfi1_uc_rcv - handle an incoming UC packet
300 * @ibp: the port the packet came in on
301 * @hdr: the header of the packet
302 * @rcv_flags: flags relevant to rcv processing
303 * @data: the packet data
304 * @tlen: the length of the packet
305 * @qp: the QP for this packet.
307 * This is called from qp_rcv() to process an incoming UC packet
309 * Called at interrupt level.
311 void hfi1_uc_rcv(struct hfi1_packet *packet)
313 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
314 void *data = packet->payload;
315 u32 tlen = packet->tlen;
316 struct rvt_qp *qp = packet->qp;
317 struct ib_other_headers *ohdr = packet->ohdr;
318 u32 opcode = packet->opcode;
319 u32 hdrsize = packet->hlen;
321 u32 pad = packet->pad;
324 struct ib_reth *reth;
326 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
328 if (hfi1_ruc_check_hdr(ibp, packet))
331 process_ecn(qp, packet, true);
333 psn = ib_bth_get_psn(ohdr);
334 /* Compare the PSN verses the expected PSN. */
335 if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
337 * Handle a sequence error.
338 * Silently drop any current message.
342 if (qp->r_state == OP(SEND_FIRST) ||
343 qp->r_state == OP(SEND_MIDDLE)) {
344 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
345 qp->r_sge.num_sge = 0;
347 rvt_put_ss(&qp->r_sge);
349 qp->r_state = OP(SEND_LAST);
353 case OP(SEND_ONLY_WITH_IMMEDIATE):
356 case OP(RDMA_WRITE_FIRST):
357 case OP(RDMA_WRITE_ONLY):
358 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
366 /* Check for opcode sequence errors. */
367 switch (qp->r_state) {
369 case OP(SEND_MIDDLE):
370 if (opcode == OP(SEND_MIDDLE) ||
371 opcode == OP(SEND_LAST) ||
372 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
376 case OP(RDMA_WRITE_FIRST):
377 case OP(RDMA_WRITE_MIDDLE):
378 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
379 opcode == OP(RDMA_WRITE_LAST) ||
380 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
385 if (opcode == OP(SEND_FIRST) ||
386 opcode == OP(SEND_ONLY) ||
387 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
388 opcode == OP(RDMA_WRITE_FIRST) ||
389 opcode == OP(RDMA_WRITE_ONLY) ||
390 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
395 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
398 /* OK, process the packet. */
402 case OP(SEND_ONLY_WITH_IMMEDIATE):
404 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
405 qp->r_sge = qp->s_rdma_read_sge;
407 ret = hfi1_rvt_get_rwqe(qp, 0);
413 * qp->s_rdma_read_sge will be the owner
414 * of the mr references.
416 qp->s_rdma_read_sge = qp->r_sge;
419 if (opcode == OP(SEND_ONLY))
420 goto no_immediate_data;
421 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
424 case OP(SEND_MIDDLE):
425 /* Check for invalid length PMTU or posted rwqe len. */
427 * There will be no padding for 9B packet but 16B packets
428 * will come in with some padding since we always add
429 * CRC and LT bytes which will need to be flit aligned
431 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
433 qp->r_rcv_len += pmtu;
434 if (unlikely(qp->r_rcv_len > qp->r_len))
436 hfi1_copy_sge(&qp->r_sge, data, pmtu, false, false);
439 case OP(SEND_LAST_WITH_IMMEDIATE):
441 wc.ex.imm_data = ohdr->u.imm_data;
442 wc.wc_flags = IB_WC_WITH_IMM;
449 /* Check for invalid length. */
450 /* LAST len should be >= 1 */
451 if (unlikely(tlen < (hdrsize + extra_bytes)))
453 /* Don't count the CRC. */
454 tlen -= (hdrsize + extra_bytes);
455 wc.byte_len = tlen + qp->r_rcv_len;
456 if (unlikely(wc.byte_len > qp->r_len))
458 wc.opcode = IB_WC_RECV;
459 hfi1_copy_sge(&qp->r_sge, data, tlen, false, false);
460 rvt_put_ss(&qp->s_rdma_read_sge);
462 wc.wr_id = qp->r_wr_id;
463 wc.status = IB_WC_SUCCESS;
465 wc.src_qp = qp->remote_qpn;
466 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
468 * It seems that IB mandates the presence of an SL in a
469 * work completion only for the UD transport (see section
470 * 11.4.2 of IBTA Vol. 1).
472 * However, the way the SL is chosen below is consistent
473 * with the way that IB/qib works and is trying avoid
474 * introducing incompatibilities.
476 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
478 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
479 /* zero fields that are N/A */
482 wc.dlid_path_bits = 0;
484 /* Signal completion event if the solicited bit is set. */
485 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
487 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
490 case OP(RDMA_WRITE_FIRST):
491 case OP(RDMA_WRITE_ONLY):
492 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
494 if (unlikely(!(qp->qp_access_flags &
495 IB_ACCESS_REMOTE_WRITE))) {
498 reth = &ohdr->u.rc.reth;
499 qp->r_len = be32_to_cpu(reth->length);
501 qp->r_sge.sg_list = NULL;
502 if (qp->r_len != 0) {
503 u32 rkey = be32_to_cpu(reth->rkey);
504 u64 vaddr = be64_to_cpu(reth->vaddr);
508 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
509 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
512 qp->r_sge.num_sge = 1;
514 qp->r_sge.num_sge = 0;
515 qp->r_sge.sge.mr = NULL;
516 qp->r_sge.sge.vaddr = NULL;
517 qp->r_sge.sge.length = 0;
518 qp->r_sge.sge.sge_length = 0;
520 if (opcode == OP(RDMA_WRITE_ONLY)) {
522 } else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
523 wc.ex.imm_data = ohdr->u.rc.imm_data;
527 case OP(RDMA_WRITE_MIDDLE):
528 /* Check for invalid length PMTU or posted rwqe len. */
529 if (unlikely(tlen != (hdrsize + pmtu + 4)))
531 qp->r_rcv_len += pmtu;
532 if (unlikely(qp->r_rcv_len > qp->r_len))
534 hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
537 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
538 wc.ex.imm_data = ohdr->u.imm_data;
540 wc.wc_flags = IB_WC_WITH_IMM;
542 /* Check for invalid length. */
543 /* LAST len should be >= 1 */
544 if (unlikely(tlen < (hdrsize + pad + 4)))
546 /* Don't count the CRC. */
547 tlen -= (hdrsize + extra_bytes);
548 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
550 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
551 rvt_put_ss(&qp->s_rdma_read_sge);
553 ret = hfi1_rvt_get_rwqe(qp, 1);
559 wc.byte_len = qp->r_len;
560 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
561 hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
562 rvt_put_ss(&qp->r_sge);
565 case OP(RDMA_WRITE_LAST):
567 /* Check for invalid length. */
568 /* LAST len should be >= 1 */
569 if (unlikely(tlen < (hdrsize + pad + 4)))
571 /* Don't count the CRC. */
572 tlen -= (hdrsize + extra_bytes);
573 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
575 hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
576 rvt_put_ss(&qp->r_sge);
580 /* Drop packet for unknown opcodes. */
584 qp->r_state = opcode;
588 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
589 qp->r_sge.num_sge = 0;
591 ibp->rvp.n_pkt_drops++;
595 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);