2 * Copyright(c) 2015-2020 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/pci.h>
51 #include <linux/delay.h>
52 #include <linux/netdevice.h>
53 #include <linux/vmalloc.h>
54 #include <linux/module.h>
55 #include <linux/prefetch.h>
56 #include <rdma/ib_verbs.h>
57 #include <linux/etherdevice.h>
71 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
74 * The size has to be longer than this string, so we can append
75 * board/chip information to it in the initialization code.
77 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
79 DEFINE_MUTEX(hfi1_mutex); /* general driver use */
81 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
82 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify(
84 HFI1_DEFAULT_MAX_MTU));
86 unsigned int hfi1_cu = 1;
87 module_param_named(cu, hfi1_cu, uint, S_IRUGO);
88 MODULE_PARM_DESC(cu, "Credit return units");
90 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
91 static int hfi1_caps_set(const char *val, const struct kernel_param *kp);
92 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp);
93 static const struct kernel_param_ops cap_ops = {
97 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
98 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
100 MODULE_LICENSE("Dual BSD/GPL");
101 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
104 * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
106 #define MAX_PKT_RECV 64
108 * MAX_PKT_THREAD_RCV is the max # of packets processed before
109 * the qp_wait_list queue is flushed.
111 #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4)
112 #define EGR_HEAD_UPDATE_THRESHOLD 16
114 struct hfi1_ib_stats hfi1_stats;
116 static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
119 unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
120 cap_mask = *cap_mask_ptr, value, diff,
121 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
122 HFI1_CAP_WRITABLE_MASK);
124 ret = kstrtoul(val, 0, &value);
126 pr_warn("Invalid module parameter value for 'cap_mask'\n");
129 /* Get the changed bits (except the locked bit) */
130 diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
132 /* Remove any bits that are not allowed to change after driver load */
133 if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
134 pr_warn("Ignoring non-writable capability bits %#lx\n",
139 /* Mask off any reserved bits */
140 diff &= ~HFI1_CAP_RESERVED_MASK;
141 /* Clear any previously set and changing bits */
143 /* Update the bits with the new capability */
144 cap_mask |= (value & diff);
145 /* Check for any kernel/user restrictions */
146 diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
147 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
149 /* Set the bitmask to the final set */
150 *cap_mask_ptr = cap_mask;
155 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
157 unsigned long cap_mask = *(unsigned long *)kp->arg;
159 cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
160 cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
162 return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
165 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
167 struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
168 struct hfi1_devdata *dd = container_of(ibdev,
169 struct hfi1_devdata, verbs_dev);
174 * Return count of units with at least one port ACTIVE.
176 int hfi1_count_active_units(void)
178 struct hfi1_devdata *dd;
179 struct hfi1_pportdata *ppd;
180 unsigned long index, flags;
181 int pidx, nunits_active = 0;
183 xa_lock_irqsave(&hfi1_dev_table, flags);
184 xa_for_each(&hfi1_dev_table, index, dd) {
185 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1)
187 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
188 ppd = dd->pport + pidx;
189 if (ppd->lid && ppd->linkup) {
195 xa_unlock_irqrestore(&hfi1_dev_table, flags);
196 return nunits_active;
200 * Get address of eager buffer from it's index (allocated in chunks, not
203 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
206 u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
208 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
209 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
210 (offset * RCV_BUF_BLOCK_SIZE));
213 static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd,
216 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
218 return (void *)(rhf_addr - rcd->rhf_offset + offset);
221 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd,
224 return (struct ib_header *)hfi1_get_header(rcd, rhf_addr);
227 static inline struct hfi1_16b_header
228 *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd,
231 return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr);
235 * Validate and encode the a given RcvArray Buffer size.
236 * The function will check whether the given size falls within
237 * allowed size ranges for the respective type and, optionally,
238 * return the proper encoding.
240 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
242 if (unlikely(!PAGE_ALIGNED(size)))
244 if (unlikely(size < MIN_EAGER_BUFFER))
247 (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
250 *encoded = ilog2(size / PAGE_SIZE) + 1;
254 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
255 struct hfi1_packet *packet)
257 struct ib_header *rhdr = packet->hdr;
258 u32 rte = rhf_rcv_type_err(packet->rhf);
260 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
261 struct hfi1_devdata *dd = ppd->dd;
262 struct hfi1_ibdev *verbs_dev = &dd->verbs_dev;
263 struct rvt_dev_info *rdi = &verbs_dev->rdi;
265 if ((packet->rhf & RHF_DC_ERR) &&
266 hfi1_dbg_fault_suppress_err(verbs_dev))
269 if (packet->rhf & RHF_ICRC_ERR)
272 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
275 u8 lnh = ib_get_lnh(rhdr);
277 mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE);
278 if (lnh == HFI1_LRH_BTH) {
279 packet->ohdr = &rhdr->u.oth;
280 } else if (lnh == HFI1_LRH_GRH) {
281 packet->ohdr = &rhdr->u.l.oth;
282 packet->grh = &rhdr->u.l.grh;
288 if (packet->rhf & RHF_TID_ERR) {
289 /* For TIDERR and RC QPs preemptively schedule a NAK */
290 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
291 u32 dlid = ib_get_dlid(rhdr);
294 /* Sanity check packet */
301 struct ib_grh *grh = packet->grh;
303 if (grh->next_hdr != IB_GRH_NEXT_HDR)
305 vtf = be32_to_cpu(grh->version_tclass_flow);
306 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
310 /* Get the destination QP number. */
311 qp_num = ib_bth_get_qpn(packet->ohdr);
312 if (dlid < mlid_base) {
317 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
324 * Handle only RC QPs - for other QP types drop error
327 spin_lock_irqsave(&qp->r_lock, flags);
329 /* Check for valid receive state. */
330 if (!(ib_rvt_state_ops[qp->state] &
331 RVT_PROCESS_RECV_OK)) {
332 ibp->rvp.n_pkt_drops++;
335 switch (qp->ibqp.qp_type) {
337 hfi1_rc_hdrerr(rcd, packet, qp);
340 /* For now don't handle any other QP types */
344 spin_unlock_irqrestore(&qp->r_lock, flags);
347 } /* Valid packet with TIDErr */
349 /* handle "RcvTypeErr" flags */
351 case RHF_RTE_ERROR_OP_CODE_ERR:
356 if (rhf_use_egr_bfr(packet->rhf))
360 goto drop; /* this should never happen */
362 opcode = ib_bth_get_opcode(packet->ohdr);
363 if (opcode == IB_OPCODE_CNP) {
365 * Only in pre-B0 h/w is the CNP_OPCODE handled
366 * via this code path.
368 struct rvt_qp *qp = NULL;
371 u8 svc_type, sl, sc5;
373 sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf);
374 sl = ibp->sc_to_sl[sc5];
376 lqpn = ib_bth_get_qpn(packet->ohdr);
378 qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
384 switch (qp->ibqp.qp_type) {
388 svc_type = IB_CC_SVCTYPE_UD;
391 rlid = ib_get_slid(rhdr);
392 rqpn = qp->remote_qpn;
393 svc_type = IB_CC_SVCTYPE_UC;
400 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
404 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
415 static inline void init_packet(struct hfi1_ctxtdata *rcd,
416 struct hfi1_packet *packet)
418 packet->rsize = get_hdrqentsize(rcd); /* words */
419 packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */
423 packet->rhf_addr = get_rhf_addr(rcd);
424 packet->rhf = rhf_to_cpu(packet->rhf_addr);
425 packet->rhqoff = hfi1_rcd_head(rcd);
429 /* We support only two types - 9B and 16B for now */
430 static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
431 [HFI1_PKT_TYPE_9B] = &return_cnp,
432 [HFI1_PKT_TYPE_16B] = &return_cnp_16B
436 * hfi1_process_ecn_slowpath - Process FECN or BECN bits
437 * @qp: The packet's destination QP
438 * @pkt: The packet itself.
439 * @prescan: Is the caller the RXQ prescan
441 * Process the packet's FECN or BECN bits. By now, the packet
442 * has already been evaluated whether processing of those bit should
444 * The significance of the @prescan argument is that if the caller
445 * is the RXQ prescan, a CNP will be send out instead of waiting for the
446 * normal packet processing to send an ACK with BECN set (or a CNP).
448 bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
451 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
452 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
453 struct ib_other_headers *ohdr = pkt->ohdr;
454 struct ib_grh *grh = pkt->grh;
457 u32 rlid, slid, dlid = 0;
458 u8 hdr_type, sc, svc_type, opcode;
459 bool is_mcast = false, ignore_fecn = false, do_cnp = false,
462 /* can be called from prescan */
463 if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
464 pkey = hfi1_16B_get_pkey(pkt->hdr);
465 sc = hfi1_16B_get_sc(pkt->hdr);
466 dlid = hfi1_16B_get_dlid(pkt->hdr);
467 slid = hfi1_16B_get_slid(pkt->hdr);
468 is_mcast = hfi1_is_16B_mcast(dlid);
469 opcode = ib_bth_get_opcode(ohdr);
470 hdr_type = HFI1_PKT_TYPE_16B;
471 fecn = hfi1_16B_get_fecn(pkt->hdr);
472 becn = hfi1_16B_get_becn(pkt->hdr);
474 pkey = ib_bth_get_pkey(ohdr);
475 sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
476 dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
478 slid = ib_get_slid(pkt->hdr);
479 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
480 (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
481 opcode = ib_bth_get_opcode(ohdr);
482 hdr_type = HFI1_PKT_TYPE_9B;
483 fecn = ib_bth_get_fecn(ohdr);
484 becn = ib_bth_get_becn(ohdr);
487 switch (qp->ibqp.qp_type) {
490 rqpn = ib_get_sqpn(pkt->ohdr);
491 svc_type = IB_CC_SVCTYPE_UD;
496 rqpn = ib_get_sqpn(pkt->ohdr);
497 svc_type = IB_CC_SVCTYPE_UD;
500 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
501 rqpn = qp->remote_qpn;
502 svc_type = IB_CC_SVCTYPE_UC;
505 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
506 rqpn = qp->remote_qpn;
507 svc_type = IB_CC_SVCTYPE_RC;
513 ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) ||
514 (opcode == IB_OPCODE_RC_ACKNOWLEDGE);
516 * ACKNOWLEDGE packets do not get a CNP but this will be
517 * guarded by ignore_fecn above.
520 (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
521 opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) ||
522 opcode == TID_OP(READ_RESP) ||
523 opcode == TID_OP(ACK);
525 /* Call appropriate CNP handler */
526 if (!ignore_fecn && do_cnp && fecn)
527 hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
528 dlid, rlid, sc, grh);
531 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
532 u8 sl = ibp->sc_to_sl[sc];
534 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
536 return !ignore_fecn && fecn;
540 struct hfi1_ctxtdata *rcd;
548 static inline void init_ps_mdata(struct ps_mdata *mdata,
549 struct hfi1_packet *packet)
551 struct hfi1_ctxtdata *rcd = packet->rcd;
554 mdata->rsize = packet->rsize;
555 mdata->maxcnt = packet->maxcnt;
556 mdata->ps_head = packet->rhqoff;
558 if (get_dma_rtail_setting(rcd)) {
559 mdata->ps_tail = get_rcvhdrtail(rcd);
560 if (rcd->ctxt == HFI1_CTRL_CTXT)
561 mdata->ps_seq = hfi1_seq_cnt(rcd);
563 mdata->ps_seq = 0; /* not used with DMA_RTAIL */
565 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
566 mdata->ps_seq = hfi1_seq_cnt(rcd);
570 static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
571 struct hfi1_ctxtdata *rcd)
573 if (get_dma_rtail_setting(rcd))
574 return mdata->ps_head == mdata->ps_tail;
575 return mdata->ps_seq != rhf_rcv_seq(rhf);
578 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
579 struct hfi1_ctxtdata *rcd)
582 * Control context can potentially receive an invalid rhf.
585 if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
586 return mdata->ps_seq != rhf_rcv_seq(rhf);
591 static inline void update_ps_mdata(struct ps_mdata *mdata,
592 struct hfi1_ctxtdata *rcd)
594 mdata->ps_head += mdata->rsize;
595 if (mdata->ps_head >= mdata->maxcnt)
598 /* Control context must do seq counting */
599 if (!get_dma_rtail_setting(rcd) ||
600 rcd->ctxt == HFI1_CTRL_CTXT)
601 mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq);
605 * prescan_rxq - search through the receive queue looking for packets
606 * containing Excplicit Congestion Notifications (FECNs, or BECNs).
607 * When an ECN is found, process the Congestion Notification, and toggle
609 * This is declared as a macro to allow quick checking of the port to avoid
610 * the overhead of a function call if not enabled.
612 #define prescan_rxq(rcd, packet) \
614 if (rcd->ppd->cc_prescan) \
615 __prescan_rxq(packet); \
617 static void __prescan_rxq(struct hfi1_packet *packet)
619 struct hfi1_ctxtdata *rcd = packet->rcd;
620 struct ps_mdata mdata;
622 init_ps_mdata(&mdata, packet);
625 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
626 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
627 packet->rcd->rhf_offset;
629 struct ib_header *hdr;
630 struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
631 u64 rhf = rhf_to_cpu(rhf_addr);
632 u32 etype = rhf_rcv_type(rhf), qpn, bth1;
635 if (ps_done(&mdata, rhf, rcd))
638 if (ps_skip(&mdata, rhf, rcd))
641 if (etype != RHF_RCV_TYPE_IB)
644 packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr);
646 lnh = ib_get_lnh(hdr);
648 if (lnh == HFI1_LRH_BTH) {
649 packet->ohdr = &hdr->u.oth;
651 } else if (lnh == HFI1_LRH_GRH) {
652 packet->ohdr = &hdr->u.l.oth;
653 packet->grh = &hdr->u.l.grh;
655 goto next; /* just in case */
658 if (!hfi1_may_ecn(packet))
661 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
662 qpn = bth1 & RVT_QPN_MASK;
664 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
671 hfi1_process_ecn_slowpath(qp, packet, true);
674 /* turn off BECN, FECN */
675 bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK);
676 packet->ohdr->bth[1] = cpu_to_be32(bth1);
678 update_ps_mdata(&mdata, rcd);
682 static void process_rcv_qp_work(struct hfi1_packet *packet)
684 struct rvt_qp *qp, *nqp;
685 struct hfi1_ctxtdata *rcd = packet->rcd;
688 * Iterate over all QPs waiting to respond.
689 * The list won't change since the IRQ is only run on one CPU.
691 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
692 list_del_init(&qp->rspwait);
693 if (qp->r_flags & RVT_R_RSP_NAK) {
694 qp->r_flags &= ~RVT_R_RSP_NAK;
696 hfi1_send_rc_ack(packet, 0);
698 if (qp->r_flags & RVT_R_RSP_SEND) {
701 qp->r_flags &= ~RVT_R_RSP_SEND;
702 spin_lock_irqsave(&qp->s_lock, flags);
703 if (ib_rvt_state_ops[qp->state] &
704 RVT_PROCESS_OR_FLUSH_SEND)
705 hfi1_schedule_send(qp);
706 spin_unlock_irqrestore(&qp->s_lock, flags);
712 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
715 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
716 /* allow defered processing */
717 process_rcv_qp_work(packet);
721 this_cpu_inc(*packet->rcd->dd->rcv_limit);
722 return RCV_PKT_LIMIT;
726 static inline int check_max_packet(struct hfi1_packet *packet, int thread)
728 int ret = RCV_PKT_OK;
730 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0))
731 ret = max_packet_exceeded(packet, thread);
735 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
739 packet->rcd->dd->ctx0_seq_drop++;
740 /* Set up for the next packet */
741 packet->rhqoff += packet->rsize;
742 if (packet->rhqoff >= packet->maxcnt)
746 ret = check_max_packet(packet, thread);
748 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
749 packet->rcd->rhf_offset;
750 packet->rhf = rhf_to_cpu(packet->rhf_addr);
755 static void process_rcv_packet_napi(struct hfi1_packet *packet)
757 packet->etype = rhf_rcv_type(packet->rhf);
760 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
761 /* retrieve eager buffer details */
762 packet->etail = rhf_egr_index(packet->rhf);
763 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
766 * Prefetch the contents of the eager buffer. It is
767 * OK to send a negative length to prefetch_range().
768 * The +2 is the size of the RHF.
770 prefetch_range(packet->ebuf,
771 packet->tlen - ((packet->rcd->rcvhdrqentsize -
772 (rhf_hdrq_offset(packet->rhf)
775 packet->rcd->rhf_rcv_function_map[packet->etype](packet);
778 /* Set up for the next packet */
779 packet->rhqoff += packet->rsize;
780 if (packet->rhqoff >= packet->maxcnt)
783 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
784 packet->rcd->rhf_offset;
785 packet->rhf = rhf_to_cpu(packet->rhf_addr);
788 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
792 packet->etype = rhf_rcv_type(packet->rhf);
795 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
796 /* retrieve eager buffer details */
798 if (rhf_use_egr_bfr(packet->rhf)) {
799 packet->etail = rhf_egr_index(packet->rhf);
800 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
803 * Prefetch the contents of the eager buffer. It is
804 * OK to send a negative length to prefetch_range().
805 * The +2 is the size of the RHF.
807 prefetch_range(packet->ebuf,
808 packet->tlen - ((get_hdrqentsize(packet->rcd) -
809 (rhf_hdrq_offset(packet->rhf)
814 * Call a type specific handler for the packet. We
815 * should be able to trust that etype won't be beyond
816 * the range of valid indexes. If so something is really
817 * wrong and we can probably just let things come
818 * crashing down. There is no need to eat another
819 * comparison in this performance critical code.
821 packet->rcd->rhf_rcv_function_map[packet->etype](packet);
824 /* Set up for the next packet */
825 packet->rhqoff += packet->rsize;
826 if (packet->rhqoff >= packet->maxcnt)
829 ret = check_max_packet(packet, thread);
831 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
832 packet->rcd->rhf_offset;
833 packet->rhf = rhf_to_cpu(packet->rhf_addr);
838 static inline void process_rcv_update(int last, struct hfi1_packet *packet)
841 * Update head regs etc., every 16 packets, if not last pkt,
842 * to help prevent rcvhdrq overflows, when many packets
843 * are processed and queue is nearly full.
844 * Don't request an interrupt for intermediate updates.
846 if (!last && !(packet->numpkt & 0xf)) {
847 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
848 packet->etail, 0, 0);
854 static inline void finish_packet(struct hfi1_packet *packet)
857 * Nothing we need to free for the packet.
859 * The only thing we need to do is a final update and call for an
862 update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr,
863 packet->etail, rcv_intr_dynamic, packet->numpkt);
867 * handle_receive_interrupt_napi_fp - receive a packet
869 * @budget: polling budget
871 * Called from interrupt handler for receive interrupt.
872 * This is the fast path interrupt handler
873 * when executing napi soft irq environment.
875 int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget)
877 struct hfi1_packet packet;
879 init_packet(rcd, &packet);
880 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
883 while (packet.numpkt < budget) {
884 process_rcv_packet_napi(&packet);
885 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
888 process_rcv_update(0, &packet);
890 hfi1_set_rcd_head(rcd, packet.rhqoff);
892 finish_packet(&packet);
893 return packet.numpkt;
897 * Handle receive interrupts when using the no dma rtail option.
899 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
901 int last = RCV_PKT_OK;
902 struct hfi1_packet packet;
904 init_packet(rcd, &packet);
905 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
910 prescan_rxq(rcd, &packet);
912 while (last == RCV_PKT_OK) {
913 last = process_rcv_packet(&packet, thread);
914 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
916 process_rcv_update(last, &packet);
918 process_rcv_qp_work(&packet);
919 hfi1_set_rcd_head(rcd, packet.rhqoff);
921 finish_packet(&packet);
925 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
928 int last = RCV_PKT_OK;
929 struct hfi1_packet packet;
931 init_packet(rcd, &packet);
932 hdrqtail = get_rcvhdrtail(rcd);
933 if (packet.rhqoff == hdrqtail) {
937 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
939 prescan_rxq(rcd, &packet);
941 while (last == RCV_PKT_OK) {
942 last = process_rcv_packet(&packet, thread);
943 if (packet.rhqoff == hdrqtail)
945 process_rcv_update(last, &packet);
947 process_rcv_qp_work(&packet);
948 hfi1_set_rcd_head(rcd, packet.rhqoff);
950 finish_packet(&packet);
954 static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
959 * For dynamically allocated kernel contexts (like vnic) switch
960 * interrupt handler only for that context. Otherwise, switch
961 * interrupt handler for all statically allocated kernel contexts.
963 if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) {
970 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
971 rcd = hfi1_rcd_get_by_index(dd, i);
972 if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic))
978 void set_all_slowpath(struct hfi1_devdata *dd)
980 struct hfi1_ctxtdata *rcd;
983 /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
984 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
985 rcd = hfi1_rcd_get_by_index(dd, i);
988 if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
989 rcd->do_interrupt = rcd->slow_handler;
995 static bool __set_armed_to_active(struct hfi1_packet *packet)
997 u8 etype = rhf_rcv_type(packet->rhf);
1000 if (etype == RHF_RCV_TYPE_IB) {
1001 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd,
1003 sc = hfi1_9B_get_sc5(hdr, packet->rhf);
1004 } else if (etype == RHF_RCV_TYPE_BYPASS) {
1005 struct hfi1_16b_header *hdr = hfi1_get_16B_header(
1008 sc = hfi1_16B_get_sc(hdr);
1010 if (sc != SC15_PACKET) {
1011 int hwstate = driver_lstate(packet->rcd->ppd);
1012 struct work_struct *lsaw =
1013 &packet->rcd->ppd->linkstate_active_work;
1015 if (hwstate != IB_PORT_ACTIVE) {
1016 dd_dev_info(packet->rcd->dd,
1017 "Unexpected link state %s\n",
1018 opa_lstate_name(hwstate));
1022 queue_work(packet->rcd->ppd->link_wq, lsaw);
1029 * armed to active - the fast path for armed to active
1030 * @packet: the packet structure
1032 * Return true if packet processing needs to bail.
1034 static bool set_armed_to_active(struct hfi1_packet *packet)
1036 if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED))
1038 return __set_armed_to_active(packet);
1042 * handle_receive_interrupt - receive a packet
1045 * Called from interrupt handler for errors or receive interrupt.
1046 * This is the slow path interrupt handler.
1048 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
1050 struct hfi1_devdata *dd = rcd->dd;
1052 int needset, last = RCV_PKT_OK;
1053 struct hfi1_packet packet;
1058 /* Control context will always use the slow path interrupt handler */
1059 needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
1061 init_packet(rcd, &packet);
1063 if (!get_dma_rtail_setting(rcd)) {
1064 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
1065 last = RCV_PKT_DONE;
1070 hdrqtail = get_rcvhdrtail(rcd);
1071 if (packet.rhqoff == hdrqtail) {
1072 last = RCV_PKT_DONE;
1075 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
1078 * Control context can potentially receive an invalid
1079 * rhf. Drop such packets.
1081 if (rcd->ctxt == HFI1_CTRL_CTXT)
1082 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
1086 prescan_rxq(rcd, &packet);
1088 while (last == RCV_PKT_OK) {
1089 if (hfi1_need_drop(dd)) {
1090 /* On to the next packet */
1091 packet.rhqoff += packet.rsize;
1092 packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
1095 packet.rhf = rhf_to_cpu(packet.rhf_addr);
1097 } else if (skip_pkt) {
1098 last = skip_rcv_packet(&packet, thread);
1101 if (set_armed_to_active(&packet))
1103 last = process_rcv_packet(&packet, thread);
1106 if (!get_dma_rtail_setting(rcd)) {
1107 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
1108 last = RCV_PKT_DONE;
1110 if (packet.rhqoff == hdrqtail)
1111 last = RCV_PKT_DONE;
1113 * Control context can potentially receive an invalid
1114 * rhf. Drop such packets.
1116 if (rcd->ctxt == HFI1_CTRL_CTXT) {
1119 lseq = hfi1_seq_incr(rcd,
1120 rhf_rcv_seq(packet.rhf));
1128 set_all_fastpath(dd, rcd);
1130 process_rcv_update(last, &packet);
1133 process_rcv_qp_work(&packet);
1134 hfi1_set_rcd_head(rcd, packet.rhqoff);
1138 * Always write head at end, and setup rcv interrupt, even
1139 * if no packets were processed.
1141 finish_packet(&packet);
1146 * handle_receive_interrupt_napi_sp - receive a packet
1148 * @budget: polling budget
1150 * Called from interrupt handler for errors or receive interrupt.
1151 * This is the slow path interrupt handler
1152 * when executing napi soft irq environment.
1154 int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget)
1156 struct hfi1_devdata *dd = rcd->dd;
1157 int last = RCV_PKT_OK;
1158 bool needset = true;
1159 struct hfi1_packet packet;
1161 init_packet(rcd, &packet);
1162 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
1165 while (last != RCV_PKT_DONE && packet.numpkt < budget) {
1166 if (hfi1_need_drop(dd)) {
1167 /* On to the next packet */
1168 packet.rhqoff += packet.rsize;
1169 packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
1172 packet.rhf = rhf_to_cpu(packet.rhf_addr);
1175 if (set_armed_to_active(&packet))
1177 process_rcv_packet_napi(&packet);
1180 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
1181 last = RCV_PKT_DONE;
1185 set_all_fastpath(dd, rcd);
1188 process_rcv_update(last, &packet);
1191 hfi1_set_rcd_head(rcd, packet.rhqoff);
1195 * Always write head at end, and setup rcv interrupt, even
1196 * if no packets were processed.
1198 finish_packet(&packet);
1199 return packet.numpkt;
1203 * We may discover in the interrupt that the hardware link state has
1204 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
1205 * and we need to update the driver's notion of the link state. We cannot
1206 * run set_link_state from interrupt context, so we queue this function on
1209 * We delay the regular interrupt processing until after the state changes
1210 * so that the link will be in the correct state by the time any application
1211 * we wake up attempts to send a reply to any message it received.
1212 * (Subsequent receive interrupts may possibly force the wakeup before we
1213 * update the link state.)
1215 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
1216 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
1217 * so we're safe from use-after-free of the rcd.
1219 void receive_interrupt_work(struct work_struct *work)
1221 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
1222 linkstate_active_work);
1223 struct hfi1_devdata *dd = ppd->dd;
1224 struct hfi1_ctxtdata *rcd;
1227 /* Received non-SC15 packet implies neighbor_normal */
1228 ppd->neighbor_normal = 1;
1229 set_link_state(ppd, HLS_UP_ACTIVE);
1232 * Interrupt all statically allocated kernel contexts that could
1233 * have had an interrupt during auto activation.
1235 for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) {
1236 rcd = hfi1_rcd_get_by_index(dd, i);
1238 force_recv_intr(rcd);
1244 * Convert a given MTU size to the on-wire MAD packet enumeration.
1245 * Return -1 if the size is invalid.
1247 int mtu_to_enum(u32 mtu, int default_if_bad)
1250 case 0: return OPA_MTU_0;
1251 case 256: return OPA_MTU_256;
1252 case 512: return OPA_MTU_512;
1253 case 1024: return OPA_MTU_1024;
1254 case 2048: return OPA_MTU_2048;
1255 case 4096: return OPA_MTU_4096;
1256 case 8192: return OPA_MTU_8192;
1257 case 10240: return OPA_MTU_10240;
1259 return default_if_bad;
1262 u16 enum_to_mtu(int mtu)
1265 case OPA_MTU_0: return 0;
1266 case OPA_MTU_256: return 256;
1267 case OPA_MTU_512: return 512;
1268 case OPA_MTU_1024: return 1024;
1269 case OPA_MTU_2048: return 2048;
1270 case OPA_MTU_4096: return 4096;
1271 case OPA_MTU_8192: return 8192;
1272 case OPA_MTU_10240: return 10240;
1273 default: return 0xffff;
1278 * set_mtu - set the MTU
1279 * @ppd: the per port data
1281 * We can handle "any" incoming size, the issue here is whether we
1282 * need to restrict our outgoing size. We do not deal with what happens
1283 * to programs that are already running when the size changes.
1285 int set_mtu(struct hfi1_pportdata *ppd)
1287 struct hfi1_devdata *dd = ppd->dd;
1288 int i, drain, ret = 0, is_up = 0;
1291 for (i = 0; i < ppd->vls_supported; i++)
1292 if (ppd->ibmtu < dd->vld[i].mtu)
1293 ppd->ibmtu = dd->vld[i].mtu;
1294 ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
1296 mutex_lock(&ppd->hls_lock);
1297 if (ppd->host_link_state == HLS_UP_INIT ||
1298 ppd->host_link_state == HLS_UP_ARMED ||
1299 ppd->host_link_state == HLS_UP_ACTIVE)
1302 drain = !is_ax(dd) && is_up;
1306 * MTU is specified per-VL. To ensure that no packet gets
1307 * stuck (due, e.g., to the MTU for the packet's VL being
1308 * reduced), empty the per-VL FIFOs before adjusting MTU.
1310 ret = stop_drain_data_vls(dd);
1313 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
1318 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
1321 open_fill_data_vls(dd); /* reopen all VLs */
1324 mutex_unlock(&ppd->hls_lock);
1329 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
1331 struct hfi1_devdata *dd = ppd->dd;
1335 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
1337 dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
1342 void shutdown_led_override(struct hfi1_pportdata *ppd)
1344 struct hfi1_devdata *dd = ppd->dd;
1347 * This pairs with the memory barrier in hfi1_start_led_override to
1348 * ensure that we read the correct state of LED beaconing represented
1349 * by led_override_timer_active
1352 if (atomic_read(&ppd->led_override_timer_active)) {
1353 del_timer_sync(&ppd->led_override_timer);
1354 atomic_set(&ppd->led_override_timer_active, 0);
1355 /* Ensure the atomic_set is visible to all CPUs */
1359 /* Hand control of the LED to the DC for normal operation */
1360 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
1363 static void run_led_override(struct timer_list *t)
1365 struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
1366 struct hfi1_devdata *dd = ppd->dd;
1367 unsigned long timeout;
1370 if (!(dd->flags & HFI1_INITTED))
1373 phase_idx = ppd->led_override_phase & 1;
1375 setextled(dd, phase_idx);
1377 timeout = ppd->led_override_vals[phase_idx];
1379 /* Set up for next phase */
1380 ppd->led_override_phase = !ppd->led_override_phase;
1382 mod_timer(&ppd->led_override_timer, jiffies + timeout);
1386 * To have the LED blink in a particular pattern, provide timeon and timeoff
1388 * To turn off custom blinking and return to normal operation, use
1389 * shutdown_led_override()
1391 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
1392 unsigned int timeoff)
1394 if (!(ppd->dd->flags & HFI1_INITTED))
1397 /* Convert to jiffies for direct use in timer */
1398 ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
1399 ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
1401 /* Arbitrarily start from LED on phase */
1402 ppd->led_override_phase = 1;
1405 * If the timer has not already been started, do so. Use a "quick"
1406 * timeout so the handler will be called soon to look at our request.
1408 if (!timer_pending(&ppd->led_override_timer)) {
1409 timer_setup(&ppd->led_override_timer, run_led_override, 0);
1410 ppd->led_override_timer.expires = jiffies + 1;
1411 add_timer(&ppd->led_override_timer);
1412 atomic_set(&ppd->led_override_timer_active, 1);
1413 /* Ensure the atomic_set is visible to all CPUs */
1419 * hfi1_reset_device - reset the chip if possible
1420 * @unit: the device to reset
1422 * Whether or not reset is successful, we attempt to re-initialize the chip
1423 * (that is, much like a driver unload/reload). We clear the INITTED flag
1424 * so that the various entry points will fail until we reinitialize. For
1425 * now, we only allow this if no user contexts are open that use chip resources
1427 int hfi1_reset_device(int unit)
1430 struct hfi1_devdata *dd = hfi1_lookup(unit);
1431 struct hfi1_pportdata *ppd;
1439 dd_dev_info(dd, "Reset on unit %u requested\n", unit);
1441 if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) {
1443 "Invalid unit number %u or not initialized or not present\n",
1449 /* If there are any user/vnic contexts, we cannot reset */
1450 mutex_lock(&hfi1_mutex);
1452 if (hfi1_stats.sps_ctxts) {
1453 mutex_unlock(&hfi1_mutex);
1457 mutex_unlock(&hfi1_mutex);
1459 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1460 ppd = dd->pport + pidx;
1462 shutdown_led_override(ppd);
1464 if (dd->flags & HFI1_HAS_SEND_DMA)
1467 hfi1_reset_cpu_counters(dd);
1469 ret = hfi1_init(dd, 1);
1473 "Reinitialize unit %u after reset failed with %d\n",
1476 dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
1483 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
1485 packet->hdr = (struct hfi1_ib_message_header *)
1486 hfi1_get_msgheader(packet->rcd,
1488 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
1491 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
1493 struct hfi1_pportdata *ppd = packet->rcd->ppd;
1495 /* slid and dlid cannot be 0 */
1496 if ((!packet->slid) || (!packet->dlid))
1499 /* Compare port lid with incoming packet dlid */
1500 if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
1502 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
1503 if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
1507 /* No multicast packets with SC15 */
1508 if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF))
1511 /* Packets with permissive DLID always on SC15 */
1512 if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE),
1514 (packet->sc != 0xF))
1520 static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
1522 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1523 struct ib_header *hdr;
1526 hfi1_setup_ib_header(packet);
1529 lnh = ib_get_lnh(hdr);
1530 if (lnh == HFI1_LRH_BTH) {
1531 packet->ohdr = &hdr->u.oth;
1533 } else if (lnh == HFI1_LRH_GRH) {
1536 packet->ohdr = &hdr->u.l.oth;
1537 packet->grh = &hdr->u.l.grh;
1538 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1540 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1541 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
1547 /* Query commonly used fields from packet header */
1548 packet->payload = packet->ebuf;
1549 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1550 packet->slid = ib_get_slid(hdr);
1551 packet->dlid = ib_get_dlid(hdr);
1552 if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
1553 (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE))))
1554 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1555 be16_to_cpu(IB_MULTICAST_LID_BASE);
1556 packet->sl = ib_get_sl(hdr);
1557 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf);
1558 packet->pad = ib_bth_get_pad(packet->ohdr);
1559 packet->extra_byte = 0;
1560 packet->pkey = ib_bth_get_pkey(packet->ohdr);
1561 packet->migrated = ib_bth_is_migration(packet->ohdr);
1565 ibp->rvp.n_pkt_drops++;
1569 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
1572 * Bypass packets have a different header/payload split
1573 * compared to an IB packet.
1574 * Current split is set such that 16 bytes of the actual
1575 * header is in the header buffer and the remining is in
1576 * the eager buffer. We chose 16 since hfi1 driver only
1577 * supports 16B bypass packets and we will be able to
1578 * receive the entire LRH with such a split.
1581 struct hfi1_ctxtdata *rcd = packet->rcd;
1582 struct hfi1_pportdata *ppd = rcd->ppd;
1583 struct hfi1_ibport *ibp = &ppd->ibport_data;
1586 packet->hdr = (struct hfi1_16b_header *)
1587 hfi1_get_16B_header(packet->rcd,
1589 l4 = hfi1_16B_get_l4(packet->hdr);
1590 if (l4 == OPA_16B_L4_IB_LOCAL) {
1591 packet->ohdr = packet->ebuf;
1593 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1594 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1595 /* hdr_len_by_opcode already has an IB LRH factored in */
1596 packet->hlen = hdr_len_by_opcode[packet->opcode] +
1597 (LRH_16B_BYTES - LRH_9B_BYTES);
1598 packet->migrated = opa_bth_is_migration(packet->ohdr);
1599 } else if (l4 == OPA_16B_L4_IB_GLOBAL) {
1601 u8 grh_len = sizeof(struct ib_grh);
1603 packet->ohdr = packet->ebuf + grh_len;
1604 packet->grh = packet->ebuf;
1605 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1606 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1607 /* hdr_len_by_opcode already has an IB LRH factored in */
1608 packet->hlen = hdr_len_by_opcode[packet->opcode] +
1609 (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
1610 packet->migrated = opa_bth_is_migration(packet->ohdr);
1612 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1614 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1615 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
1617 } else if (l4 == OPA_16B_L4_FM) {
1618 packet->mgmt = packet->ebuf;
1619 packet->ohdr = NULL;
1621 packet->opcode = IB_OPCODE_UD_SEND_ONLY;
1622 packet->pad = OPA_16B_L4_FM_PAD;
1623 packet->hlen = OPA_16B_L4_FM_HLEN;
1624 packet->migrated = false;
1629 /* Query commonly used fields from packet header */
1630 packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
1631 packet->slid = hfi1_16B_get_slid(packet->hdr);
1632 packet->dlid = hfi1_16B_get_dlid(packet->hdr);
1633 if (unlikely(hfi1_is_16B_mcast(packet->dlid)))
1634 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1635 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR),
1637 packet->sc = hfi1_16B_get_sc(packet->hdr);
1638 packet->sl = ibp->sc_to_sl[packet->sc];
1639 packet->extra_byte = SIZE_OF_LT;
1640 packet->pkey = hfi1_16B_get_pkey(packet->hdr);
1642 if (hfi1_bypass_ingress_pkt_check(packet))
1647 hfi1_cdbg(PKT, "%s: packet dropped\n", __func__);
1648 ibp->rvp.n_pkt_drops++;
1652 static void show_eflags_errs(struct hfi1_packet *packet)
1654 struct hfi1_ctxtdata *rcd = packet->rcd;
1655 u32 rte = rhf_rcv_type_err(packet->rhf);
1658 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n",
1659 rcd->ctxt, packet->rhf,
1660 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
1661 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
1662 packet->rhf & RHF_DC_ERR ? "dc " : "",
1663 packet->rhf & RHF_TID_ERR ? "tid " : "",
1664 packet->rhf & RHF_LEN_ERR ? "len " : "",
1665 packet->rhf & RHF_ECC_ERR ? "ecc " : "",
1666 packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
1670 void handle_eflags(struct hfi1_packet *packet)
1672 struct hfi1_ctxtdata *rcd = packet->rcd;
1674 rcv_hdrerr(rcd, rcd->ppd, packet);
1675 if (rhf_err_flags(packet->rhf))
1676 show_eflags_errs(packet);
1679 static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
1681 struct hfi1_ibport *ibp;
1682 struct net_device *netdev;
1683 struct hfi1_ctxtdata *rcd = packet->rcd;
1684 struct napi_struct *napi = rcd->napi;
1685 struct sk_buff *skb;
1686 struct hfi1_netdev_rxq *rxq = container_of(napi,
1687 struct hfi1_netdev_rxq, napi);
1690 bool do_work, do_cnp;
1691 struct hfi1_ipoib_dev_priv *priv;
1693 trace_hfi1_rcvhdr(packet);
1695 hfi1_setup_ib_header(packet);
1697 packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth;
1700 if (unlikely(rhf_err_flags(packet->rhf))) {
1701 handle_eflags(packet);
1705 qpnum = ib_bth_get_qpn(packet->ohdr);
1706 netdev = hfi1_netdev_get_data(rcd->dd, qpnum);
1710 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
1711 trace_ctxt_rsm_hist(rcd->ctxt);
1713 /* handle congestion notifications */
1714 do_work = hfi1_may_ecn(packet);
1715 if (unlikely(do_work)) {
1716 do_cnp = (packet->opcode != IB_OPCODE_CNP);
1717 (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp,
1722 * We have split point after last byte of DETH
1723 * lets strip padding and CRC and ICRC.
1724 * tlen is whole packet len so we need to
1725 * subtract header size as well.
1727 tlen = packet->tlen;
1728 extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) +
1730 if (unlikely(tlen < extra_bytes))
1733 tlen -= extra_bytes;
1735 skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf);
1739 priv = hfi1_ipoib_priv(netdev);
1740 hfi1_ipoib_update_rx_netstats(priv, 1, skb->len);
1743 skb->pkt_type = PACKET_HOST;
1744 netif_receive_skb(skb);
1749 ++netdev->stats.rx_dropped;
1751 ibp = rcd_to_iport(packet->rcd);
1752 ++ibp->rvp.n_pkt_drops;
1756 * The following functions are called by the interrupt handler. They are type
1757 * specific handlers for each packet type.
1759 static void process_receive_ib(struct hfi1_packet *packet)
1761 if (hfi1_setup_9B_packet(packet))
1764 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1767 trace_hfi1_rcvhdr(packet);
1769 if (unlikely(rhf_err_flags(packet->rhf))) {
1770 handle_eflags(packet);
1774 hfi1_ib_rcv(packet);
1777 static void process_receive_bypass(struct hfi1_packet *packet)
1779 struct hfi1_devdata *dd = packet->rcd->dd;
1781 if (hfi1_setup_bypass_packet(packet))
1784 trace_hfi1_rcvhdr(packet);
1786 if (unlikely(rhf_err_flags(packet->rhf))) {
1787 handle_eflags(packet);
1791 if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
1792 hfi1_16B_rcv(packet);
1795 "Bypass packets other than 16B are not supported in normal operation. Dropping\n");
1796 incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
1797 if (!(dd->err_info_rcvport.status_and_code &
1798 OPA_EI_STATUS_SMASK)) {
1799 u64 *flits = packet->ebuf;
1801 if (flits && !(packet->rhf & RHF_LEN_ERR)) {
1802 dd->err_info_rcvport.packet_flit1 = flits[0];
1803 dd->err_info_rcvport.packet_flit2 =
1804 packet->tlen > sizeof(flits[0]) ?
1807 dd->err_info_rcvport.status_and_code |=
1808 (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
1813 static void process_receive_error(struct hfi1_packet *packet)
1815 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */
1817 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
1818 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
1819 packet->rhf & RHF_DC_ERR)))
1822 hfi1_setup_ib_header(packet);
1823 handle_eflags(packet);
1825 if (unlikely(rhf_err_flags(packet->rhf)))
1826 dd_dev_err(packet->rcd->dd,
1827 "Unhandled error packet received. Dropping.\n");
1830 static void kdeth_process_expected(struct hfi1_packet *packet)
1832 hfi1_setup_9B_packet(packet);
1833 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1836 if (unlikely(rhf_err_flags(packet->rhf))) {
1837 struct hfi1_ctxtdata *rcd = packet->rcd;
1839 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1843 hfi1_kdeth_expected_rcv(packet);
1846 static void kdeth_process_eager(struct hfi1_packet *packet)
1848 hfi1_setup_9B_packet(packet);
1849 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1852 trace_hfi1_rcvhdr(packet);
1853 if (unlikely(rhf_err_flags(packet->rhf))) {
1854 struct hfi1_ctxtdata *rcd = packet->rcd;
1856 show_eflags_errs(packet);
1857 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1861 hfi1_kdeth_eager_rcv(packet);
1864 static void process_receive_invalid(struct hfi1_packet *packet)
1866 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
1867 rhf_rcv_type(packet->rhf));
1870 #define HFI1_RCVHDR_DUMP_MAX 5
1872 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
1874 struct hfi1_packet packet;
1875 struct ps_mdata mdata;
1878 seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n",
1879 rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd),
1880 get_dma_rtail_setting(rcd) ?
1881 "dma_rtail" : "nodma_rtail",
1882 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL),
1883 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS),
1884 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
1885 RCV_HDR_HEAD_HEAD_MASK,
1886 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL),
1889 init_packet(rcd, &packet);
1890 init_ps_mdata(&mdata, &packet);
1892 for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) {
1893 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
1895 struct ib_header *hdr;
1896 u64 rhf = rhf_to_cpu(rhf_addr);
1897 u32 etype = rhf_rcv_type(rhf), qpn;
1902 if (ps_done(&mdata, rhf, rcd))
1905 if (ps_skip(&mdata, rhf, rcd))
1908 if (etype > RHF_RCV_TYPE_IB)
1911 packet.hdr = hfi1_get_msgheader(rcd, rhf_addr);
1914 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
1916 if (lnh == HFI1_LRH_BTH)
1917 packet.ohdr = &hdr->u.oth;
1918 else if (lnh == HFI1_LRH_GRH)
1919 packet.ohdr = &hdr->u.l.oth;
1921 goto next; /* just in case */
1923 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24);
1924 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK;
1925 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2]));
1927 seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n",
1928 mdata.ps_head, opcode, qpn, psn);
1930 update_ps_mdata(&mdata, rcd);
1934 const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
1935 [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected,
1936 [RHF_RCV_TYPE_EAGER] = kdeth_process_eager,
1937 [RHF_RCV_TYPE_IB] = process_receive_ib,
1938 [RHF_RCV_TYPE_ERROR] = process_receive_error,
1939 [RHF_RCV_TYPE_BYPASS] = process_receive_bypass,
1940 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
1941 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
1942 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
1945 const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = {
1946 [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid,
1947 [RHF_RCV_TYPE_EAGER] = process_receive_invalid,
1948 [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv,
1949 [RHF_RCV_TYPE_ERROR] = process_receive_error,
1950 [RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv,
1951 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
1952 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
1953 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,