2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
53 #include "verbs_txreq.h"
57 * Validate a RWQE and fill in the SGE state.
60 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
64 struct rvt_lkey_table *rkt;
66 struct rvt_sge_state *ss;
68 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
69 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
71 ss->sg_list = qp->r_sg_list;
73 for (i = j = 0; i < wqe->num_sge; i++) {
74 if (wqe->sg_list[i].length == 0)
77 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
78 NULL, &wqe->sg_list[i],
79 IB_ACCESS_LOCAL_WRITE);
80 if (unlikely(ret <= 0))
82 qp->r_len += wqe->sg_list[i].length;
86 ss->total_len = qp->r_len;
92 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
97 memset(&wc, 0, sizeof(wc));
98 wc.wr_id = wqe->wr_id;
99 wc.status = IB_WC_LOC_PROT_ERR;
100 wc.opcode = IB_WC_RECV;
102 /* Signal solicited completion event. */
103 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
110 * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
112 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
114 * Return -1 if there is a local error, 0 if no RWQE is available,
115 * otherwise return 1.
117 * Can be called from interrupt level.
119 int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
125 struct rvt_rwqe *wqe;
126 void (*handler)(struct ib_event *, void *);
131 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
132 handler = srq->ibsrq.event_handler;
140 spin_lock_irqsave(&rq->lock, flags);
141 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
148 /* Validate tail before using it since it is user writable. */
149 if (tail >= rq->size)
151 if (unlikely(tail == wq->head)) {
155 /* Make sure entry is read after head index is read. */
157 wqe = rvt_get_rwqe_ptr(rq, tail);
159 * Even though we update the tail index in memory, the verbs
160 * consumer is not supposed to post more entries until a
161 * completion is generated.
163 if (++tail >= rq->size)
166 if (!wr_id_only && !init_sge(qp, wqe)) {
170 qp->r_wr_id = wqe->wr_id;
173 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
178 * Validate head pointer value and compute
179 * the number of remaining WQEs.
185 n += rq->size - tail;
188 if (n < srq->limit) {
192 spin_unlock_irqrestore(&rq->lock, flags);
193 ev.device = qp->ibqp.device;
194 ev.element.srq = qp->ibqp.srq;
195 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
196 handler(&ev, srq->ibsrq.srq_context);
201 spin_unlock_irqrestore(&rq->lock, flags);
206 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
208 return (gid->global.interface_id == id &&
209 (gid->global.subnet_prefix == gid_prefix ||
210 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
215 * This should be called with the QP r_lock held.
217 * The s_lock will be acquired around the hfi1_migrate_qp() call.
219 int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
223 struct rvt_qp *qp = packet->qp;
224 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
225 u32 dlid = packet->dlid;
226 u32 slid = packet->slid;
232 bth0 = be32_to_cpu(packet->ohdr->bth[0]);
233 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
234 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
235 pkey = hfi1_16B_get_pkey(packet->hdr);
236 migrated = bth1 & OPA_BTH_MIG_REQ;
238 pkey = ib_bth_get_pkey(packet->ohdr);
239 migrated = bth0 & IB_BTH_MIG_REQ;
242 if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
244 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
246 (packet->etype != RHF_RCV_TYPE_BYPASS))
249 const struct ib_global_route *grh;
251 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
254 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
255 guid = get_sguid(ibp, grh->sgid_index);
256 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
261 grh->dgid.global.subnet_prefix,
262 grh->dgid.global.interface_id))
265 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
267 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
271 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
272 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
273 ppd_from_ibp(ibp)->port !=
274 rdma_ah_get_port_num(&qp->alt_ah_attr))
276 spin_lock_irqsave(&qp->s_lock, flags);
278 spin_unlock_irqrestore(&qp->s_lock, flags);
281 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
283 (packet->etype != RHF_RCV_TYPE_BYPASS))
286 const struct ib_global_route *grh;
288 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
291 grh = rdma_ah_read_grh(&qp->remote_ah_attr);
292 guid = get_sguid(ibp, grh->sgid_index);
293 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
298 grh->dgid.global.subnet_prefix,
299 grh->dgid.global.interface_id))
302 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
304 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
308 /* Validate the SLID. See Ch. 9.6.1.5 */
309 if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
310 ppd_from_ibp(ibp)->port != qp->port_num)
312 if (qp->s_mig_state == IB_MIG_REARM && !migrated)
313 qp->s_mig_state = IB_MIG_ARMED;
320 * ruc_loopback - handle UC and RC loopback requests
321 * @sqp: the sending QP
323 * This is called from hfi1_do_send() to
324 * forward a WQE addressed to the same HFI.
325 * Note that although we are single threaded due to the send engine, we still
326 * have to protect against post_send(). We don't have to worry about
327 * receive interrupts since this is a connected protocol and all packets
328 * will pass through here.
330 static void ruc_loopback(struct rvt_qp *sqp)
332 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
334 struct rvt_swqe *wqe;
340 enum ib_wc_status send_status;
343 bool copy_last = false;
349 * Note that we check the responder QP state after
350 * checking the requester's state.
352 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
355 spin_lock_irqsave(&sqp->s_lock, flags);
357 /* Return if we are already busy processing a work request. */
358 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
359 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
362 sqp->s_flags |= RVT_S_BUSY;
365 smp_read_barrier_depends(); /* see post_one_send() */
366 if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
368 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
370 /* Return if it is not OK to start a new work request. */
371 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
372 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
374 /* We are in the error state, flush the work request. */
375 send_status = IB_WC_WR_FLUSH_ERR;
380 * We can rely on the entry not changing without the s_lock
381 * being held until we update s_last.
382 * We increment s_cur to indicate s_last is in progress.
384 if (sqp->s_last == sqp->s_cur) {
385 if (++sqp->s_cur >= sqp->s_size)
388 spin_unlock_irqrestore(&sqp->s_lock, flags);
390 if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
391 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
392 ibp->rvp.n_pkt_drops++;
394 * For RC, the requester would timeout and retry so
395 * shortcut the timeouts and just signal too many retries.
397 if (sqp->ibqp.qp_type == IB_QPT_RC)
398 send_status = IB_WC_RETRY_EXC_ERR;
400 send_status = IB_WC_SUCCESS;
404 memset(&wc, 0, sizeof(wc));
405 send_status = IB_WC_SUCCESS;
408 sqp->s_sge.sge = wqe->sg_list[0];
409 sqp->s_sge.sg_list = wqe->sg_list + 1;
410 sqp->s_sge.num_sge = wqe->wr.num_sge;
411 sqp->s_len = wqe->length;
412 switch (wqe->wr.opcode) {
416 case IB_WR_LOCAL_INV:
417 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
418 if (rvt_invalidate_rkey(sqp,
419 wqe->wr.ex.invalidate_rkey))
420 send_status = IB_WC_LOC_PROT_ERR;
425 case IB_WR_SEND_WITH_INV:
426 if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
427 wc.wc_flags = IB_WC_WITH_INVALIDATE;
428 wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
432 case IB_WR_SEND_WITH_IMM:
433 wc.wc_flags = IB_WC_WITH_IMM;
434 wc.ex.imm_data = wqe->wr.ex.imm_data;
438 ret = hfi1_rvt_get_rwqe(qp, 0);
443 if (wqe->length > qp->r_len)
447 case IB_WR_RDMA_WRITE_WITH_IMM:
448 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
450 wc.wc_flags = IB_WC_WITH_IMM;
451 wc.ex.imm_data = wqe->wr.ex.imm_data;
452 ret = hfi1_rvt_get_rwqe(qp, 1);
457 /* skip copy_last set and qp_access_flags recheck */
459 case IB_WR_RDMA_WRITE:
460 copy_last = rvt_is_user_qp(qp);
461 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
464 if (wqe->length == 0)
466 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
467 wqe->rdma_wr.remote_addr,
469 IB_ACCESS_REMOTE_WRITE)))
471 qp->r_sge.sg_list = NULL;
472 qp->r_sge.num_sge = 1;
473 qp->r_sge.total_len = wqe->length;
476 case IB_WR_RDMA_READ:
477 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
479 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
480 wqe->rdma_wr.remote_addr,
482 IB_ACCESS_REMOTE_READ)))
485 sqp->s_sge.sg_list = NULL;
486 sqp->s_sge.num_sge = 1;
487 qp->r_sge.sge = wqe->sg_list[0];
488 qp->r_sge.sg_list = wqe->sg_list + 1;
489 qp->r_sge.num_sge = wqe->wr.num_sge;
490 qp->r_sge.total_len = wqe->length;
493 case IB_WR_ATOMIC_CMP_AND_SWP:
494 case IB_WR_ATOMIC_FETCH_AND_ADD:
495 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
497 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
498 wqe->atomic_wr.remote_addr,
500 IB_ACCESS_REMOTE_ATOMIC)))
502 /* Perform atomic OP and save result. */
503 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
504 sdata = wqe->atomic_wr.compare_add;
505 *(u64 *)sqp->s_sge.sge.vaddr =
506 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
507 (u64)atomic64_add_return(sdata, maddr) - sdata :
508 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
509 sdata, wqe->atomic_wr.swap);
510 rvt_put_mr(qp->r_sge.sge.mr);
511 qp->r_sge.num_sge = 0;
515 send_status = IB_WC_LOC_QP_OP_ERR;
519 sge = &sqp->s_sge.sge;
521 u32 len = sqp->s_len;
523 if (len > sge->length)
525 if (len > sge->sge_length)
526 len = sge->sge_length;
527 WARN_ON_ONCE(len == 0);
528 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
531 sge->sge_length -= len;
532 if (sge->sge_length == 0) {
535 if (--sqp->s_sge.num_sge)
536 *sge = *sqp->s_sge.sg_list++;
537 } else if (sge->length == 0 && sge->mr->lkey) {
538 if (++sge->n >= RVT_SEGSZ) {
539 if (++sge->m >= sge->mr->mapsz)
544 sge->mr->map[sge->m]->segs[sge->n].vaddr;
546 sge->mr->map[sge->m]->segs[sge->n].length;
551 rvt_put_ss(&qp->r_sge);
553 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
556 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
557 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
559 wc.opcode = IB_WC_RECV;
560 wc.wr_id = qp->r_wr_id;
561 wc.status = IB_WC_SUCCESS;
562 wc.byte_len = wqe->length;
564 wc.src_qp = qp->remote_qpn;
565 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
566 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
568 /* Signal completion event if the solicited bit is set. */
569 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
570 wqe->wr.send_flags & IB_SEND_SOLICITED);
573 spin_lock_irqsave(&sqp->s_lock, flags);
574 ibp->rvp.n_loop_pkts++;
576 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
577 hfi1_send_complete(sqp, wqe, send_status);
579 atomic_dec(&sqp->local_ops_pending);
586 if (qp->ibqp.qp_type == IB_QPT_UC)
588 ibp->rvp.n_rnr_naks++;
590 * Note: we don't need the s_lock held since the BUSY flag
591 * makes this single threaded.
593 if (sqp->s_rnr_retry == 0) {
594 send_status = IB_WC_RNR_RETRY_EXC_ERR;
597 if (sqp->s_rnr_retry_cnt < 7)
599 spin_lock_irqsave(&sqp->s_lock, flags);
600 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
602 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
603 IB_AETH_CREDIT_SHIFT);
607 send_status = IB_WC_REM_OP_ERR;
608 wc.status = IB_WC_LOC_QP_OP_ERR;
613 sqp->ibqp.qp_type == IB_QPT_RC ?
614 IB_WC_REM_INV_REQ_ERR :
616 wc.status = IB_WC_LOC_QP_OP_ERR;
620 send_status = IB_WC_REM_ACCESS_ERR;
621 wc.status = IB_WC_LOC_PROT_ERR;
623 /* responder goes to error state */
624 rvt_rc_error(qp, wc.status);
627 spin_lock_irqsave(&sqp->s_lock, flags);
628 hfi1_send_complete(sqp, wqe, send_status);
629 if (sqp->ibqp.qp_type == IB_QPT_RC) {
630 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
632 sqp->s_flags &= ~RVT_S_BUSY;
633 spin_unlock_irqrestore(&sqp->s_lock, flags);
637 ev.device = sqp->ibqp.device;
638 ev.element.qp = &sqp->ibqp;
639 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
640 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
645 sqp->s_flags &= ~RVT_S_BUSY;
647 spin_unlock_irqrestore(&sqp->s_lock, flags);
653 * hfi1_make_grh - construct a GRH header
654 * @ibp: a pointer to the IB port
655 * @hdr: a pointer to the GRH header being constructed
656 * @grh: the global route address to send to
657 * @hwords: size of header after grh being sent in dwords
658 * @nwords: the number of 32 bit words of data being sent
660 * Return the size of the header in 32 bit words.
662 u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
663 const struct ib_global_route *grh, u32 hwords, u32 nwords)
665 hdr->version_tclass_flow =
666 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
667 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
668 (grh->flow_label << IB_GRH_FLOW_SHIFT));
669 hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
670 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
671 hdr->next_hdr = IB_GRH_NEXT_HDR;
672 hdr->hop_limit = grh->hop_limit;
673 /* The SGID is 32-bit aligned. */
674 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
675 hdr->sgid.global.interface_id =
676 grh->sgid_index < HFI1_GUIDS_PER_PORT ?
677 get_sguid(ibp, grh->sgid_index) :
678 get_sguid(ibp, HFI1_PORT_GUID_INDEX);
679 hdr->dgid = grh->dgid;
681 /* GRH header size in 32-bit words. */
682 return sizeof(struct ib_grh) / sizeof(u32);
685 #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
686 hdr.ibh.u.oth.bth[2]) / 4)
689 * build_ahg - create ahg in s_ahg
690 * @qp: a pointer to QP
691 * @npsn: the next PSN for the request/response
693 * This routine handles the AHG by allocating an ahg entry and causing the
694 * copy of the first middle.
696 * Subsequent middles use the copied entry, editing the
697 * PSN with 1 or 2 edits.
699 static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
701 struct hfi1_qp_priv *priv = qp->priv;
703 if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
705 if (!(qp->s_flags & RVT_S_AHG_VALID)) {
706 /* first middle that needs copy */
707 if (qp->s_ahgidx < 0)
708 qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
709 if (qp->s_ahgidx >= 0) {
711 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
712 /* save to protect a change in another thread */
713 priv->s_ahg->ahgidx = qp->s_ahgidx;
714 qp->s_flags |= RVT_S_AHG_VALID;
717 /* subsequent middle after valid */
718 if (qp->s_ahgidx >= 0) {
719 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
720 priv->s_ahg->ahgidx = qp->s_ahgidx;
721 priv->s_ahg->ahgcount++;
722 priv->s_ahg->ahgdesc[0] =
723 sdma_build_ahg_descriptor(
724 (__force u16)cpu_to_be16((u16)npsn),
728 if ((npsn & 0xffff0000) !=
729 (qp->s_ahgpsn & 0xffff0000)) {
730 priv->s_ahg->ahgcount++;
731 priv->s_ahg->ahgdesc[1] =
732 sdma_build_ahg_descriptor(
733 (__force u16)cpu_to_be16(
743 static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
744 struct ib_other_headers *ohdr,
745 u32 bth0, u32 bth1, u32 bth2)
747 bth1 |= qp->remote_qpn;
748 ohdr->bth[0] = cpu_to_be32(bth0);
749 ohdr->bth[1] = cpu_to_be32(bth1);
750 ohdr->bth[2] = cpu_to_be32(bth2);
754 * hfi1_make_ruc_header_16B - build a 16B header
755 * @qp: the queue pair
756 * @ohdr: a pointer to the destination header memory
757 * @bth0: bth0 passed in from the RC/UC builder
758 * @bth2: bth2 passed in from the RC/UC builder
759 * @middle: non zero implies indicates ahg "could" be used
760 * @ps: the current packet state
762 * This routine may disarm ahg under these situations:
763 * - packet needs a GRH
765 * - migration state not IB_MIG_MIGRATED
767 static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
768 struct ib_other_headers *ohdr,
769 u32 bth0, u32 bth2, int middle,
770 struct hfi1_pkt_state *ps)
772 struct hfi1_qp_priv *priv = qp->priv;
773 struct hfi1_ibport *ibp = ps->ibp;
774 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
777 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
778 u8 l4 = OPA_16B_L4_IB_LOCAL;
779 u8 extra_bytes = hfi1_get_16b_padding((qp->s_hdrwords << 2),
780 ps->s_txreq->s_cur_size);
781 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
782 extra_bytes + SIZE_OF_LT) >> 2);
785 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
786 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
788 struct ib_global_route *grd =
789 rdma_ah_retrieve_grh(&qp->remote_ah_attr);
793 * Ensure OPA GIDs are transformed to IB gids
794 * before creating the GRH.
796 if (grd->sgid_index == OPA_GID_INDEX)
798 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
799 l4 = OPA_16B_L4_IB_GLOBAL;
800 hdrwords = qp->s_hdrwords - 4;
801 qp->s_hdrwords += hfi1_make_grh(ibp, grh, grd,
806 if (qp->s_mig_state == IB_MIG_MIGRATED)
807 bth1 |= OPA_BTH_MIG_REQ;
811 if (qp->s_flags & RVT_S_ECN) {
812 qp->s_flags &= ~RVT_S_ECN;
813 /* we recently received a FECN, so return a BECN */
820 qp->s_flags &= ~RVT_S_AHG_VALID;
823 bth0 |= extra_bytes << 20;
824 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
827 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
830 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
831 ((1 << ppd->lmc) - 1));
833 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
835 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
837 (qp->s_hdrwords + nwords) >> 1,
838 pkey, becn, 0, l4, priv->s_sc);
842 * hfi1_make_ruc_header_9B - build a 9B header
843 * @qp: the queue pair
844 * @ohdr: a pointer to the destination header memory
845 * @bth0: bth0 passed in from the RC/UC builder
846 * @bth2: bth2 passed in from the RC/UC builder
847 * @middle: non zero implies indicates ahg "could" be used
848 * @ps: the current packet state
850 * This routine may disarm ahg under these situations:
851 * - packet needs a GRH
853 * - migration state not IB_MIG_MIGRATED
855 static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
856 struct ib_other_headers *ohdr,
857 u32 bth0, u32 bth2, int middle,
858 struct hfi1_pkt_state *ps)
860 struct hfi1_qp_priv *priv = qp->priv;
861 struct hfi1_ibport *ibp = ps->ibp;
862 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
864 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
865 u16 lrh0 = HFI1_LRH_BTH;
867 u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
868 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
871 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
872 struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
873 int hdrwords = qp->s_hdrwords - 2;
877 hfi1_make_grh(ibp, grh,
878 rdma_ah_read_grh(&qp->remote_ah_attr),
882 lrh0 |= (priv->s_sc & 0xf) << 12 |
883 (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
885 if (qp->s_mig_state == IB_MIG_MIGRATED)
886 bth0 |= IB_BTH_MIG_REQ;
890 if (qp->s_flags & RVT_S_ECN) {
891 qp->s_flags &= ~RVT_S_ECN;
892 /* we recently received a FECN, so return a BECN */
893 bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
899 qp->s_flags &= ~RVT_S_AHG_VALID;
902 bth0 |= extra_bytes << 20;
903 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
906 slid = be16_to_cpu(IB_LID_PERMISSIVE);
909 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
910 ((1 << ppd->lmc) - 1));
911 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
913 qp->s_hdrwords + nwords,
914 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
915 ppd_from_ibp(ibp)->lid |
916 rdma_ah_get_path_bits(&qp->remote_ah_attr));
919 typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
920 struct ib_other_headers *ohdr,
921 u32 bth0, u32 bth2, int middle,
922 struct hfi1_pkt_state *ps);
924 /* We support only two types - 9B and 16B for now */
925 static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
926 [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
927 [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
930 void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
931 u32 bth0, u32 bth2, int middle,
932 struct hfi1_pkt_state *ps)
934 struct hfi1_qp_priv *priv = qp->priv;
937 * reset s_ahg/AHG fields
939 * This insures that the ahgentry/ahgcount
940 * are at a non-AHG default to protect
941 * build_verbs_tx_desc() from using
944 * build_ahg() will modify as appropriate
945 * to use the AHG feature.
947 priv->s_ahg->tx_flags = 0;
948 priv->s_ahg->ahgcount = 0;
949 priv->s_ahg->ahgidx = 0;
951 /* Make the appropriate header */
952 hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth2, middle, ps);
955 /* when sending, force a reschedule every one of these periods */
956 #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
959 * schedule_send_yield - test for a yield required for QP send engine
960 * @timeout: Final time for timeout slice for jiffies
961 * @qp: a pointer to QP
962 * @ps: a pointer to a structure with commonly lookup values for
963 * the the send engine progress
965 * This routine checks if the time slice for the QP has expired
966 * for RC QPs, if so an additional work entry is queued. At this
967 * point, other QPs have an opportunity to be scheduled. It
968 * returns true if a yield is required, otherwise, false
971 static bool schedule_send_yield(struct rvt_qp *qp,
972 struct hfi1_pkt_state *ps)
974 ps->pkts_sent = true;
976 if (unlikely(time_after(jiffies, ps->timeout))) {
977 if (!ps->in_thread ||
978 workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
979 spin_lock_irqsave(&qp->s_lock, ps->flags);
980 qp->s_flags &= ~RVT_S_BUSY;
981 hfi1_schedule_send(qp);
982 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
983 this_cpu_inc(*ps->ppd->dd->send_schedule);
984 trace_hfi1_rc_expired_time_slice(qp, true);
989 this_cpu_inc(*ps->ppd->dd->send_schedule);
990 ps->timeout = jiffies + ps->timeout_int;
993 trace_hfi1_rc_expired_time_slice(qp, false);
997 void hfi1_do_send_from_rvt(struct rvt_qp *qp)
999 hfi1_do_send(qp, false);
1002 void _hfi1_do_send(struct work_struct *work)
1004 struct iowait *wait = container_of(work, struct iowait, iowork);
1005 struct rvt_qp *qp = iowait_to_qp(wait);
1007 hfi1_do_send(qp, true);
1011 * hfi1_do_send - perform a send on a QP
1012 * @work: contains a pointer to the QP
1013 * @in_thread: true if in a workqueue thread
1015 * Process entries in the send work queue until credit or queue is
1016 * exhausted. Only allow one CPU to send a packet per QP.
1017 * Otherwise, two threads could send packets out of order.
1019 void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
1021 struct hfi1_pkt_state ps;
1022 struct hfi1_qp_priv *priv = qp->priv;
1023 int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
1025 ps.dev = to_idev(qp->ibqp.device);
1026 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
1027 ps.ppd = ppd_from_ibp(ps.ibp);
1028 ps.in_thread = in_thread;
1030 trace_hfi1_rc_do_send(qp, in_thread);
1032 switch (qp->ibqp.qp_type) {
1034 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
1035 ~((1 << ps.ppd->lmc) - 1)) ==
1040 make_req = hfi1_make_rc_req;
1041 ps.timeout_int = qp->timeout_jiffies;
1044 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
1045 ~((1 << ps.ppd->lmc) - 1)) ==
1050 make_req = hfi1_make_uc_req;
1051 ps.timeout_int = SEND_RESCHED_TIMEOUT;
1054 make_req = hfi1_make_ud_req;
1055 ps.timeout_int = SEND_RESCHED_TIMEOUT;
1058 spin_lock_irqsave(&qp->s_lock, ps.flags);
1060 /* Return if we are already busy processing a work request. */
1061 if (!hfi1_send_ok(qp)) {
1062 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
1066 qp->s_flags |= RVT_S_BUSY;
1068 ps.timeout_int = ps.timeout_int / 8;
1069 ps.timeout = jiffies + ps.timeout_int;
1070 ps.cpu = priv->s_sde ? priv->s_sde->cpu :
1071 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
1072 ps.pkts_sent = false;
1074 /* insure a pre-built packet is handled */
1075 ps.s_txreq = get_waiting_verbs_txreq(qp);
1077 /* Check for a constructed packet to be sent. */
1078 if (qp->s_hdrwords != 0) {
1079 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
1081 * If the packet cannot be sent now, return and
1082 * the send engine will be woken up later.
1084 if (hfi1_verbs_send(qp, &ps))
1086 /* Record that s_ahg is empty. */
1088 /* allow other tasks to run */
1089 if (schedule_send_yield(qp, &ps))
1092 spin_lock_irqsave(&qp->s_lock, ps.flags);
1094 } while (make_req(qp, &ps));
1095 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
1096 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
1100 * This should be called with s_lock held.
1102 void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
1103 enum ib_wc_status status)
1107 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
1112 trace_hfi1_qp_send_completion(qp, wqe, last);
1113 if (++last >= qp->s_size)
1115 trace_hfi1_qp_send_completion(qp, wqe, last);
1117 /* See post_send() */
1120 if (qp->ibqp.qp_type == IB_QPT_UD ||
1121 qp->ibqp.qp_type == IB_QPT_SMI ||
1122 qp->ibqp.qp_type == IB_QPT_GSI)
1123 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
1125 rvt_qp_swqe_complete(qp,
1127 ib_hfi1_wc_opcode[wqe->wr.opcode],
1130 if (qp->s_acked == old_last)
1132 if (qp->s_cur == old_last)
1134 if (qp->s_tail == old_last)
1136 if (qp->state == IB_QPS_SQD && last == qp->s_cur)