2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_user_verbs.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/rculist.h>
42 #include <linux/random.h>
43 #include <linux/vmalloc.h>
44 #include <rdma/rdma_vt.h>
47 #include "qib_common.h"
49 static unsigned int ib_qib_qp_table_size = 256;
50 module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(qp_table_size, "QP table size");
53 static unsigned int qib_lkey_table_size = 16;
54 module_param_named(lkey_table_size, qib_lkey_table_size, uint,
56 MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
59 static unsigned int ib_qib_max_pds = 0xFFFF;
60 module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61 MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
64 static unsigned int ib_qib_max_ahs = 0xFFFF;
65 module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
68 unsigned int ib_qib_max_cqes = 0x2FFFF;
69 module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70 MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
73 unsigned int ib_qib_max_cqs = 0x1FFFF;
74 module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
77 unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78 module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
81 unsigned int ib_qib_max_qps = 16384;
82 module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
85 unsigned int ib_qib_max_sges = 0x60;
86 module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
89 unsigned int ib_qib_max_mcast_grps = 16384;
90 module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91 MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
94 unsigned int ib_qib_max_mcast_qp_attached = 16;
95 module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
97 MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
100 unsigned int ib_qib_max_srqs = 1024;
101 module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
104 unsigned int ib_qib_max_srq_sges = 128;
105 module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
108 unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109 module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
112 static unsigned int ib_qib_disable_sma;
113 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
117 * Translate ib_wr_opcode into ib_wc_opcode.
119 const enum ib_wc_opcode ib_qib_wc_opcode[] = {
120 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
121 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
122 [IB_WR_SEND] = IB_WC_SEND,
123 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
124 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
125 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
126 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
132 __be64 ib_qib_sys_image_guid;
135 * qib_copy_sge - copy data to SGE memory
137 * @data: the data to copy
138 * @length: the length of the data
140 void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
142 struct rvt_sge *sge = &ss->sge;
145 u32 len = rvt_get_sge_length(sge, length);
147 WARN_ON_ONCE(len == 0);
148 memcpy(sge->vaddr, data, len);
149 rvt_update_sge(ss, len, release);
156 * Count the number of DMA descriptors needed to send length bytes of data.
157 * Don't modify the qib_sge_state to get the count.
158 * Return zero if any of the segments is not aligned.
160 static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
162 struct rvt_sge *sg_list = ss->sg_list;
163 struct rvt_sge sge = ss->sge;
164 u8 num_sge = ss->num_sge;
165 u32 ndesc = 1; /* count the header */
168 u32 len = sge.length;
172 if (len > sge.sge_length)
173 len = sge.sge_length;
175 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
176 (len != length && (len & (sizeof(u32) - 1)))) {
183 sge.sge_length -= len;
184 if (sge.sge_length == 0) {
187 } else if (sge.length == 0 && sge.mr->lkey) {
188 if (++sge.n >= RVT_SEGSZ) {
189 if (++sge.m >= sge.mr->mapsz)
194 sge.mr->map[sge.m]->segs[sge.n].vaddr;
196 sge.mr->map[sge.m]->segs[sge.n].length;
204 * Copy from the SGEs to the data buffer.
206 static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
208 struct rvt_sge *sge = &ss->sge;
211 u32 len = sge->length;
215 if (len > sge->sge_length)
216 len = sge->sge_length;
218 memcpy(data, sge->vaddr, len);
221 sge->sge_length -= len;
222 if (sge->sge_length == 0) {
224 *sge = *ss->sg_list++;
225 } else if (sge->length == 0 && sge->mr->lkey) {
226 if (++sge->n >= RVT_SEGSZ) {
227 if (++sge->m >= sge->mr->mapsz)
232 sge->mr->map[sge->m]->segs[sge->n].vaddr;
234 sge->mr->map[sge->m]->segs[sge->n].length;
242 * qib_qp_rcv - processing an incoming packet on a QP
243 * @rcd: the context pointer
244 * @hdr: the packet header
245 * @has_grh: true if the packet has a GRH
246 * @data: the packet data
247 * @tlen: the packet length
248 * @qp: the QP the packet came on
250 * This is called from qib_ib_rcv() to process an incoming packet
252 * Called at interrupt level.
254 static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
255 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
257 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
259 spin_lock(&qp->r_lock);
261 /* Check for valid receive state. */
262 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
263 ibp->rvp.n_pkt_drops++;
267 switch (qp->ibqp.qp_type) {
270 if (ib_qib_disable_sma)
274 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
278 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
282 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
290 spin_unlock(&qp->r_lock);
294 * qib_ib_rcv - process an incoming packet
295 * @rcd: the context pointer
296 * @rhdr: the header of the packet
297 * @data: the packet payload
298 * @tlen: the packet length
300 * This is called from qib_kreceive() to process an incoming packet at
301 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
303 void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
305 struct qib_pportdata *ppd = rcd->ppd;
306 struct qib_ibport *ibp = &ppd->ibport_data;
307 struct ib_header *hdr = rhdr;
308 struct qib_devdata *dd = ppd->dd;
309 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
310 struct ib_other_headers *ohdr;
317 /* 24 == LRH+BTH+CRC */
318 if (unlikely(tlen < 24))
321 /* Check for a valid destination LID (see ch. 7.11.1). */
322 lid = be16_to_cpu(hdr->lrh[1]);
323 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
324 lid &= ~((1 << ppd->lmc) - 1);
325 if (unlikely(lid != ppd->lid))
330 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
331 if (lnh == QIB_LRH_BTH)
333 else if (lnh == QIB_LRH_GRH) {
336 ohdr = &hdr->u.l.oth;
337 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
339 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
340 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
345 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
346 #ifdef CONFIG_DEBUG_FS
347 rcd->opstats->stats[opcode].n_bytes += tlen;
348 rcd->opstats->stats[opcode].n_packets++;
351 /* Get the destination QP number. */
352 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
353 if (qp_num == QIB_MULTICAST_QPN) {
354 struct rvt_mcast *mcast;
355 struct rvt_mcast_qp *p;
357 if (lnh != QIB_LRH_GRH)
359 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid);
362 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
364 list_for_each_entry_rcu(p, &mcast->qp_list, list)
365 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
368 * Notify rvt_multicast_detach() if it is waiting for us
371 if (atomic_dec_return(&mcast->refcount) <= 1)
372 wake_up(&mcast->wait);
375 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
380 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
381 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
387 ibp->rvp.n_pkt_drops++;
391 * This is called from a timer to check for QPs
392 * which need kernel memory in order to send a packet.
394 static void mem_timer(unsigned long data)
396 struct qib_ibdev *dev = (struct qib_ibdev *) data;
397 struct list_head *list = &dev->memwait;
398 struct rvt_qp *qp = NULL;
399 struct qib_qp_priv *priv = NULL;
402 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
403 if (!list_empty(list)) {
404 priv = list_entry(list->next, struct qib_qp_priv, iowait);
406 list_del_init(&priv->iowait);
408 if (!list_empty(list))
409 mod_timer(&dev->mem_timer, jiffies + 1);
411 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
414 spin_lock_irqsave(&qp->s_lock, flags);
415 if (qp->s_flags & RVT_S_WAIT_KMEM) {
416 qp->s_flags &= ~RVT_S_WAIT_KMEM;
417 qib_schedule_send(qp);
419 spin_unlock_irqrestore(&qp->s_lock, flags);
424 #ifdef __LITTLE_ENDIAN
425 static inline u32 get_upper_bits(u32 data, u32 shift)
427 return data >> shift;
430 static inline u32 set_upper_bits(u32 data, u32 shift)
432 return data << shift;
435 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
437 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
438 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
442 static inline u32 get_upper_bits(u32 data, u32 shift)
444 return data << shift;
447 static inline u32 set_upper_bits(u32 data, u32 shift)
449 return data >> shift;
452 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
454 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
455 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
460 static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
461 u32 length, unsigned flush_wc)
468 u32 len = ss->sge.length;
473 if (len > ss->sge.sge_length)
474 len = ss->sge.sge_length;
476 /* If the source address is not aligned, try to align it. */
477 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
479 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
481 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
484 y = sizeof(u32) - off;
487 if (len + extra >= sizeof(u32)) {
488 data |= set_upper_bits(v, extra *
490 len = sizeof(u32) - extra;
495 __raw_writel(data, piobuf);
500 /* Clear unused upper bytes */
501 data |= clear_upper_bytes(v, len, extra);
509 /* Source address is aligned. */
510 u32 *addr = (u32 *) ss->sge.vaddr;
511 int shift = extra * BITS_PER_BYTE;
512 int ushift = 32 - shift;
515 while (l >= sizeof(u32)) {
518 data |= set_upper_bits(v, shift);
519 __raw_writel(data, piobuf);
520 data = get_upper_bits(v, ushift);
526 * We still have 'extra' number of bytes leftover.
531 if (l + extra >= sizeof(u32)) {
532 data |= set_upper_bits(v, shift);
533 len -= l + extra - sizeof(u32);
538 __raw_writel(data, piobuf);
543 /* Clear unused upper bytes */
544 data |= clear_upper_bytes(v, l, extra);
551 } else if (len == length) {
555 } else if (len == length) {
559 * Need to round up for the last dword in the
563 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
565 last = ((u32 *) ss->sge.vaddr)[w - 1];
570 qib_pio_copy(piobuf, ss->sge.vaddr, w);
573 extra = len & (sizeof(u32) - 1);
575 u32 v = ((u32 *) ss->sge.vaddr)[w];
577 /* Clear unused upper bytes */
578 data = clear_upper_bytes(v, extra, 0);
581 rvt_update_sge(ss, len, false);
584 /* Update address before sending packet. */
585 rvt_update_sge(ss, length, false);
587 /* must flush early everything before trigger word */
589 __raw_writel(last, piobuf);
590 /* be sure trigger word is written */
593 __raw_writel(last, piobuf);
596 static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
599 struct qib_qp_priv *priv = qp->priv;
600 struct qib_verbs_txreq *tx;
603 spin_lock_irqsave(&qp->s_lock, flags);
604 spin_lock(&dev->rdi.pending_lock);
606 if (!list_empty(&dev->txreq_free)) {
607 struct list_head *l = dev->txreq_free.next;
610 spin_unlock(&dev->rdi.pending_lock);
611 spin_unlock_irqrestore(&qp->s_lock, flags);
612 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
614 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
615 list_empty(&priv->iowait)) {
617 qp->s_flags |= RVT_S_WAIT_TX;
618 list_add_tail(&priv->iowait, &dev->txwait);
620 qp->s_flags &= ~RVT_S_BUSY;
621 spin_unlock(&dev->rdi.pending_lock);
622 spin_unlock_irqrestore(&qp->s_lock, flags);
623 tx = ERR_PTR(-EBUSY);
628 static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
631 struct qib_verbs_txreq *tx;
634 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
635 /* assume the list non empty */
636 if (likely(!list_empty(&dev->txreq_free))) {
637 struct list_head *l = dev->txreq_free.next;
640 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
641 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
643 /* call slow path to get the extra lock */
644 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
645 tx = __get_txreq(dev, qp);
650 void qib_put_txreq(struct qib_verbs_txreq *tx)
652 struct qib_ibdev *dev;
654 struct qib_qp_priv *priv;
658 dev = to_idev(qp->ibqp.device);
664 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
665 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
666 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
667 tx->txreq.addr, tx->hdr_dwords << 2,
669 kfree(tx->align_buf);
672 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
674 /* Put struct back on free list */
675 list_add(&tx->txreq.list, &dev->txreq_free);
677 if (!list_empty(&dev->txwait)) {
678 /* Wake up first QP wanting a free struct */
679 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
682 list_del_init(&priv->iowait);
684 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
686 spin_lock_irqsave(&qp->s_lock, flags);
687 if (qp->s_flags & RVT_S_WAIT_TX) {
688 qp->s_flags &= ~RVT_S_WAIT_TX;
689 qib_schedule_send(qp);
691 spin_unlock_irqrestore(&qp->s_lock, flags);
695 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
699 * This is called when there are send DMA descriptors that might be
702 * This is called with ppd->sdma_lock held.
704 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
706 struct rvt_qp *qp, *nqp;
707 struct qib_qp_priv *qpp, *nqpp;
708 struct rvt_qp *qps[20];
709 struct qib_ibdev *dev;
713 dev = &ppd->dd->verbs_dev;
714 spin_lock(&dev->rdi.pending_lock);
716 /* Search wait list for first QP wanting DMA descriptors. */
717 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
720 if (qp->port_num != ppd->port)
722 if (n == ARRAY_SIZE(qps))
724 if (qpp->s_tx->txreq.sg_count > avail)
726 avail -= qpp->s_tx->txreq.sg_count;
727 list_del_init(&qpp->iowait);
732 spin_unlock(&dev->rdi.pending_lock);
734 for (i = 0; i < n; i++) {
736 spin_lock(&qp->s_lock);
737 if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
738 qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
739 qib_schedule_send(qp);
741 spin_unlock(&qp->s_lock);
747 * This is called with ppd->sdma_lock held.
749 static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
751 struct qib_verbs_txreq *tx =
752 container_of(cookie, struct qib_verbs_txreq, txreq);
753 struct rvt_qp *qp = tx->qp;
754 struct qib_qp_priv *priv = qp->priv;
756 spin_lock(&qp->s_lock);
758 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
759 else if (qp->ibqp.qp_type == IB_QPT_RC) {
760 struct ib_header *hdr;
762 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
763 hdr = &tx->align_buf->hdr;
765 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
767 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
769 qib_rc_send_complete(qp, hdr);
771 if (atomic_dec_and_test(&priv->s_dma_busy)) {
772 if (qp->state == IB_QPS_RESET)
773 wake_up(&priv->wait_dma);
774 else if (qp->s_flags & RVT_S_WAIT_DMA) {
775 qp->s_flags &= ~RVT_S_WAIT_DMA;
776 qib_schedule_send(qp);
779 spin_unlock(&qp->s_lock);
784 static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
786 struct qib_qp_priv *priv = qp->priv;
790 spin_lock_irqsave(&qp->s_lock, flags);
791 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
792 spin_lock(&dev->rdi.pending_lock);
793 if (list_empty(&priv->iowait)) {
794 if (list_empty(&dev->memwait))
795 mod_timer(&dev->mem_timer, jiffies + 1);
796 qp->s_flags |= RVT_S_WAIT_KMEM;
797 list_add_tail(&priv->iowait, &dev->memwait);
799 spin_unlock(&dev->rdi.pending_lock);
800 qp->s_flags &= ~RVT_S_BUSY;
803 spin_unlock_irqrestore(&qp->s_lock, flags);
808 static int qib_verbs_send_dma(struct rvt_qp *qp, struct ib_header *hdr,
809 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
810 u32 plen, u32 dwords)
812 struct qib_qp_priv *priv = qp->priv;
813 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
814 struct qib_devdata *dd = dd_from_dev(dev);
815 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
816 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
817 struct qib_verbs_txreq *tx;
818 struct qib_pio_header *phdr;
826 /* resend previously constructed packet */
827 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
831 tx = get_txreq(dev, qp);
835 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
836 be16_to_cpu(hdr->lrh[0]) >> 12);
839 tx->mr = qp->s_rdma_mr;
841 qp->s_rdma_mr = NULL;
842 tx->txreq.callback = sdma_complete;
843 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
844 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
846 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
847 if (plen + 1 > dd->piosize2kmax_dwords)
848 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
852 * Don't try to DMA if it takes more descriptors than
855 ndesc = qib_count_sge(ss, len);
856 if (ndesc >= ppd->sdma_descq_cnt)
861 phdr = &dev->pio_hdrs[tx->hdr_inx];
862 phdr->pbc[0] = cpu_to_le32(plen);
863 phdr->pbc[1] = cpu_to_le32(control);
864 memcpy(&phdr->hdr, hdr, hdrwords << 2);
865 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
866 tx->txreq.sg_count = ndesc;
867 tx->txreq.addr = dev->pio_hdrs_phys +
868 tx->hdr_inx * sizeof(struct qib_pio_header);
869 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
870 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
874 /* Allocate a buffer and copy the header and payload to it. */
875 tx->hdr_dwords = plen + 1;
876 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
879 phdr->pbc[0] = cpu_to_le32(plen);
880 phdr->pbc[1] = cpu_to_le32(control);
881 memcpy(&phdr->hdr, hdr, hdrwords << 2);
882 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
884 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
885 tx->hdr_dwords << 2, DMA_TO_DEVICE);
886 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
888 tx->align_buf = phdr;
889 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
890 tx->txreq.sg_count = 1;
891 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
898 ret = wait_kmem(dev, qp);
900 ibp->rvp.n_unaligned++;
909 * If we are now in the error state, return zero to flush the
912 static int no_bufs_available(struct rvt_qp *qp)
914 struct qib_qp_priv *priv = qp->priv;
915 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
916 struct qib_devdata *dd;
921 * Note that as soon as want_buffer() is called and
922 * possibly before it returns, qib_ib_piobufavail()
923 * could be called. Therefore, put QP on the I/O wait list before
924 * enabling the PIO avail interrupt.
926 spin_lock_irqsave(&qp->s_lock, flags);
927 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
928 spin_lock(&dev->rdi.pending_lock);
929 if (list_empty(&priv->iowait)) {
931 qp->s_flags |= RVT_S_WAIT_PIO;
932 list_add_tail(&priv->iowait, &dev->piowait);
933 dd = dd_from_dev(dev);
934 dd->f_wantpiobuf_intr(dd, 1);
936 spin_unlock(&dev->rdi.pending_lock);
937 qp->s_flags &= ~RVT_S_BUSY;
940 spin_unlock_irqrestore(&qp->s_lock, flags);
944 static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
945 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
946 u32 plen, u32 dwords)
948 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
949 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
950 u32 *hdr = (u32 *) ibhdr;
951 u32 __iomem *piobuf_orig;
959 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
960 be16_to_cpu(ibhdr->lrh[0]) >> 12);
961 pbc = ((u64) control << 32) | plen;
962 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
963 if (unlikely(piobuf == NULL))
964 return no_bufs_available(qp);
968 * We have to flush after the PBC for correctness on some cpus
969 * or WC buffer can be written out of order.
972 piobuf_orig = piobuf;
975 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
978 * If there is just the header portion, must flush before
979 * writing last word of header for correctness, and after
980 * the last header word (trigger word).
984 qib_pio_copy(piobuf, hdr, hdrwords - 1);
986 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
989 qib_pio_copy(piobuf, hdr, hdrwords);
995 qib_pio_copy(piobuf, hdr, hdrwords);
998 /* The common case is aligned and contained in one segment. */
999 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1000 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1001 u32 *addr = (u32 *) ss->sge.vaddr;
1003 /* Update address before sending packet. */
1004 rvt_update_sge(ss, len, false);
1006 qib_pio_copy(piobuf, addr, dwords - 1);
1007 /* must flush early everything before trigger word */
1009 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1010 /* be sure trigger word is written */
1013 qib_pio_copy(piobuf, addr, dwords);
1016 copy_io(piobuf, ss, len, flush_wc);
1018 if (dd->flags & QIB_USE_SPCL_TRIG) {
1019 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1022 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1024 qib_sendbuf_done(dd, pbufn);
1025 if (qp->s_rdma_mr) {
1026 rvt_put_mr(qp->s_rdma_mr);
1027 qp->s_rdma_mr = NULL;
1030 spin_lock_irqsave(&qp->s_lock, flags);
1031 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1032 spin_unlock_irqrestore(&qp->s_lock, flags);
1033 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1034 spin_lock_irqsave(&qp->s_lock, flags);
1035 qib_rc_send_complete(qp, ibhdr);
1036 spin_unlock_irqrestore(&qp->s_lock, flags);
1042 * qib_verbs_send - send a packet
1043 * @qp: the QP to send on
1044 * @hdr: the packet header
1045 * @hdrwords: the number of 32-bit words in the header
1046 * @ss: the SGE to send
1047 * @len: the length of the packet in bytes
1049 * Return zero if packet is sent or queued OK.
1050 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1052 int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
1053 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
1055 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1058 u32 dwords = (len + 3) >> 2;
1061 * Calculate the send buffer trigger address.
1062 * The +1 counts for the pbc control dword following the pbc length.
1064 plen = hdrwords + dwords + 1;
1067 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1068 * can defer SDMA restart until link goes ACTIVE without
1069 * worrying about just how we got there.
1071 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1072 !(dd->flags & QIB_HAS_SEND_DMA))
1073 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1076 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1082 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1083 u64 *rwords, u64 *spkts, u64 *rpkts,
1087 struct qib_devdata *dd = ppd->dd;
1089 if (!(dd->flags & QIB_PRESENT)) {
1090 /* no hardware, freeze, etc. */
1094 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1095 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1096 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1097 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1098 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1107 * qib_get_counters - get various chip counters
1108 * @dd: the qlogic_ib device
1109 * @cntrs: counters are placed here
1111 * Return the counters needed by recv_pma_get_portcounters().
1113 int qib_get_counters(struct qib_pportdata *ppd,
1114 struct qib_verbs_counters *cntrs)
1118 if (!(ppd->dd->flags & QIB_PRESENT)) {
1119 /* no hardware, freeze, etc. */
1123 cntrs->symbol_error_counter =
1124 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1125 cntrs->link_error_recovery_counter =
1126 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1128 * The link downed counter counts when the other side downs the
1129 * connection. We add in the number of times we downed the link
1130 * due to local link integrity errors to compensate.
1132 cntrs->link_downed_counter =
1133 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1134 cntrs->port_rcv_errors =
1135 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1136 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1137 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1138 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1139 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1140 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1141 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1142 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1143 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1144 cntrs->port_rcv_errors +=
1145 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1146 cntrs->port_rcv_errors +=
1147 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1148 cntrs->port_rcv_remphys_errors =
1149 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1150 cntrs->port_xmit_discards =
1151 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1152 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1153 QIBPORTCNTR_WORDSEND);
1154 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1155 QIBPORTCNTR_WORDRCV);
1156 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1157 QIBPORTCNTR_PKTSEND);
1158 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1159 QIBPORTCNTR_PKTRCV);
1160 cntrs->local_link_integrity_errors =
1161 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1162 cntrs->excessive_buffer_overrun_errors =
1163 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1164 cntrs->vl15_dropped =
1165 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1174 * qib_ib_piobufavail - callback when a PIO buffer is available
1175 * @dd: the device pointer
1177 * This is called from qib_intr() at interrupt level when a PIO buffer is
1178 * available after qib_verbs_send() returned an error that no buffers were
1179 * available. Disable the interrupt if there are no more QPs waiting.
1181 void qib_ib_piobufavail(struct qib_devdata *dd)
1183 struct qib_ibdev *dev = &dd->verbs_dev;
1184 struct list_head *list;
1185 struct rvt_qp *qps[5];
1187 unsigned long flags;
1189 struct qib_qp_priv *priv;
1191 list = &dev->piowait;
1195 * Note: checking that the piowait list is empty and clearing
1196 * the buffer available interrupt needs to be atomic or we
1197 * could end up with QPs on the wait list with the interrupt
1200 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
1201 while (!list_empty(list)) {
1202 if (n == ARRAY_SIZE(qps))
1204 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1206 list_del_init(&priv->iowait);
1210 dd->f_wantpiobuf_intr(dd, 0);
1212 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1214 for (i = 0; i < n; i++) {
1217 spin_lock_irqsave(&qp->s_lock, flags);
1218 if (qp->s_flags & RVT_S_WAIT_PIO) {
1219 qp->s_flags &= ~RVT_S_WAIT_PIO;
1220 qib_schedule_send(qp);
1222 spin_unlock_irqrestore(&qp->s_lock, flags);
1224 /* Notify qib_destroy_qp() if it is waiting. */
1229 static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num,
1230 struct ib_port_attr *props)
1232 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1233 struct qib_devdata *dd = dd_from_dev(ibdev);
1234 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
1238 /* props being zeroed by the caller, avoid zeroing it here */
1239 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1240 props->lmc = ppd->lmc;
1241 props->state = dd->f_iblink_state(ppd->lastibcstat);
1242 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1243 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1244 props->active_width = ppd->link_width_active;
1245 /* See rate_show() */
1246 props->active_speed = ppd->link_speed_active;
1247 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1249 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1250 switch (ppd->ibmtu) {
1269 props->active_mtu = mtu;
1274 static int qib_modify_device(struct ib_device *device,
1275 int device_modify_mask,
1276 struct ib_device_modify *device_modify)
1278 struct qib_devdata *dd = dd_from_ibdev(device);
1282 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1283 IB_DEVICE_MODIFY_NODE_DESC)) {
1288 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1289 memcpy(device->node_desc, device_modify->node_desc,
1290 IB_DEVICE_NODE_DESC_MAX);
1291 for (i = 0; i < dd->num_pports; i++) {
1292 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1294 qib_node_desc_chg(ibp);
1298 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1299 ib_qib_sys_image_guid =
1300 cpu_to_be64(device_modify->sys_image_guid);
1301 for (i = 0; i < dd->num_pports; i++) {
1302 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1304 qib_sys_guid_chg(ibp);
1314 static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
1316 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1317 struct qib_devdata *dd = dd_from_dev(ibdev);
1318 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
1320 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1325 static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1326 int guid_index, __be64 *guid)
1328 struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp);
1329 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1331 if (guid_index == 0)
1333 else if (guid_index < QIB_GUIDS_PER_PORT)
1334 *guid = ibp->guids[guid_index - 1];
1341 int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1343 if (rdma_ah_get_sl(ah_attr) > 15)
1346 if (rdma_ah_get_dlid(ah_attr) == 0)
1348 if (rdma_ah_get_dlid(ah_attr) >=
1349 be16_to_cpu(IB_MULTICAST_LID_BASE) &&
1350 rdma_ah_get_dlid(ah_attr) !=
1351 be16_to_cpu(IB_LID_PERMISSIVE) &&
1352 !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
1358 static void qib_notify_new_ah(struct ib_device *ibdev,
1359 struct rdma_ah_attr *ah_attr,
1362 struct qib_ibport *ibp;
1363 struct qib_pportdata *ppd;
1366 * Do not trust reading anything from rvt_ah at this point as it is not
1367 * done being setup. We can however modify things which we need to set.
1370 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
1371 ppd = ppd_from_ibp(ibp);
1372 ah->vl = ibp->sl_to_vl[rdma_ah_get_sl(&ah->attr)];
1373 ah->log_pmtu = ilog2(ppd->ibmtu);
1376 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1378 struct rdma_ah_attr attr;
1379 struct ib_ah *ah = ERR_PTR(-EINVAL);
1381 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1382 struct qib_devdata *dd = dd_from_ppd(ppd);
1383 u8 port_num = ppd->port;
1385 memset(&attr, 0, sizeof(attr));
1386 attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
1387 rdma_ah_set_dlid(&attr, dlid);
1388 rdma_ah_set_port_num(&attr, port_num);
1390 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1392 ah = rdma_create_ah(qp0->ibqp.pd, &attr);
1398 * qib_get_npkeys - return the size of the PKEY table for context 0
1399 * @dd: the qlogic_ib device
1401 unsigned qib_get_npkeys(struct qib_devdata *dd)
1403 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1407 * Return the indexed PKEY from the port PKEY table.
1408 * No need to validate rcd[ctxt]; the port is setup if we are here.
1410 unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1412 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1413 struct qib_devdata *dd = ppd->dd;
1414 unsigned ctxt = ppd->hw_pidx;
1417 /* dd->rcd null if mini_init or some init failures */
1418 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1421 ret = dd->rcd[ctxt]->pkeys[index];
1426 static void init_ibport(struct qib_pportdata *ppd)
1428 struct qib_verbs_counters cntrs;
1429 struct qib_ibport *ibp = &ppd->ibport_data;
1431 spin_lock_init(&ibp->rvp.lock);
1432 /* Set the prefix to the default value (see ch. 4.1.1) */
1433 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1434 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1435 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1436 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1437 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1438 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1439 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1440 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1441 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1442 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1443 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1444 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1445 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1446 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1448 /* Snapshot current HW counters to "clear" them. */
1449 qib_get_counters(ppd, &cntrs);
1450 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1451 ibp->z_link_error_recovery_counter =
1452 cntrs.link_error_recovery_counter;
1453 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1454 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1455 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1456 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1457 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1458 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1459 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1460 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1461 ibp->z_local_link_integrity_errors =
1462 cntrs.local_link_integrity_errors;
1463 ibp->z_excessive_buffer_overrun_errors =
1464 cntrs.excessive_buffer_overrun_errors;
1465 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1466 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1467 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1471 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1472 * @dd: the device data structure
1474 static void qib_fill_device_attr(struct qib_devdata *dd)
1476 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1478 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1480 rdi->dparms.props.max_pd = ib_qib_max_pds;
1481 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1482 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1483 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1484 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1485 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1486 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1487 rdi->dparms.props.vendor_id =
1488 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1489 rdi->dparms.props.vendor_part_id = dd->deviceid;
1490 rdi->dparms.props.hw_ver = dd->minrev;
1491 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
1492 rdi->dparms.props.max_mr_size = ~0ULL;
1493 rdi->dparms.props.max_qp = ib_qib_max_qps;
1494 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
1495 rdi->dparms.props.max_sge = ib_qib_max_sges;
1496 rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
1497 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1498 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1499 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1500 rdi->dparms.props.max_map_per_fmr = 32767;
1501 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1502 rdi->dparms.props.max_qp_init_rd_atom = 255;
1503 rdi->dparms.props.max_srq = ib_qib_max_srqs;
1504 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
1505 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
1506 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1507 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
1508 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
1509 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1510 rdi->dparms.props.max_total_mcast_qp_attach =
1511 rdi->dparms.props.max_mcast_qp_attach *
1512 rdi->dparms.props.max_mcast_grp;
1513 /* post send table */
1514 dd->verbs_dev.rdi.post_parms = qib_post_parms;
1518 * qib_register_ib_device - register our device with the infiniband core
1519 * @dd: the device data structure
1520 * Return the allocated qib_ibdev pointer or NULL on error.
1522 int qib_register_ib_device(struct qib_devdata *dd)
1524 struct qib_ibdev *dev = &dd->verbs_dev;
1525 struct ib_device *ibdev = &dev->rdi.ibdev;
1526 struct qib_pportdata *ppd = dd->pport;
1530 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
1531 for (i = 0; i < dd->num_pports; i++)
1532 init_ibport(ppd + i);
1534 /* Only need to initialize non-zero fields. */
1535 setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
1537 INIT_LIST_HEAD(&dev->piowait);
1538 INIT_LIST_HEAD(&dev->dmawait);
1539 INIT_LIST_HEAD(&dev->txwait);
1540 INIT_LIST_HEAD(&dev->memwait);
1541 INIT_LIST_HEAD(&dev->txreq_free);
1543 if (ppd->sdma_descq_cnt) {
1544 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1545 ppd->sdma_descq_cnt *
1546 sizeof(struct qib_pio_header),
1547 &dev->pio_hdrs_phys,
1549 if (!dev->pio_hdrs) {
1555 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1556 struct qib_verbs_txreq *tx;
1558 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
1564 list_add(&tx->txreq.list, &dev->txreq_free);
1568 * The system image GUID is supposed to be the same for all
1569 * IB HCAs in a single system but since there can be other
1570 * device types in the system, we can't be sure this is unique.
1572 if (!ib_qib_sys_image_guid)
1573 ib_qib_sys_image_guid = ppd->guid;
1575 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
1576 ibdev->owner = THIS_MODULE;
1577 ibdev->node_guid = ppd->guid;
1578 ibdev->phys_port_cnt = dd->num_pports;
1579 ibdev->dev.parent = &dd->pcidev->dev;
1580 ibdev->modify_device = qib_modify_device;
1581 ibdev->process_mad = qib_process_mad;
1583 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
1584 "Intel Infiniband HCA %s", init_utsname()->nodename);
1587 * Fill in rvt info object.
1589 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
1590 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
1591 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
1592 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
1593 dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
1594 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
1595 dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
1596 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
1597 dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
1598 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
1599 dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
1600 dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
1601 dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
1602 dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
1603 dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
1604 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
1605 dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
1606 dd->verbs_dev.rdi.driver_f.notify_restart_rc = qib_restart_rc;
1607 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
1608 dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
1609 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
1610 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
1611 dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
1612 dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
1613 dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
1614 dd->verbs_dev.rdi.driver_f.notify_create_mad_agent =
1615 qib_notify_create_mad_agent;
1616 dd->verbs_dev.rdi.driver_f.notify_free_mad_agent =
1617 qib_notify_free_mad_agent;
1619 dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
1620 dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
1621 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
1622 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
1623 dd->verbs_dev.rdi.dparms.qpn_start = 1;
1624 dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
1625 dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
1626 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1627 dd->verbs_dev.rdi.dparms.qos_shift = 1;
1628 dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
1629 dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
1630 dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
1631 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1632 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
1633 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
1634 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1635 dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
1637 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1638 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1639 "qib_cq%d", dd->unit);
1641 qib_fill_device_attr(dd);
1644 for (i = 0; i < dd->num_pports; i++, ppd++) {
1645 ctxt = ppd->hw_pidx;
1646 rvt_init_port(&dd->verbs_dev.rdi,
1647 &ppd->ibport_data.rvp,
1649 dd->rcd[ctxt]->pkeys);
1652 ret = rvt_register_device(&dd->verbs_dev.rdi);
1656 ret = qib_verbs_register_sysfs(dd);
1663 rvt_unregister_device(&dd->verbs_dev.rdi);
1665 while (!list_empty(&dev->txreq_free)) {
1666 struct list_head *l = dev->txreq_free.next;
1667 struct qib_verbs_txreq *tx;
1670 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1673 if (ppd->sdma_descq_cnt)
1674 dma_free_coherent(&dd->pcidev->dev,
1675 ppd->sdma_descq_cnt *
1676 sizeof(struct qib_pio_header),
1677 dev->pio_hdrs, dev->pio_hdrs_phys);
1679 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1683 void qib_unregister_ib_device(struct qib_devdata *dd)
1685 struct qib_ibdev *dev = &dd->verbs_dev;
1687 qib_verbs_unregister_sysfs(dd);
1689 rvt_unregister_device(&dd->verbs_dev.rdi);
1691 if (!list_empty(&dev->piowait))
1692 qib_dev_err(dd, "piowait list not empty!\n");
1693 if (!list_empty(&dev->dmawait))
1694 qib_dev_err(dd, "dmawait list not empty!\n");
1695 if (!list_empty(&dev->txwait))
1696 qib_dev_err(dd, "txwait list not empty!\n");
1697 if (!list_empty(&dev->memwait))
1698 qib_dev_err(dd, "memwait list not empty!\n");
1700 del_timer_sync(&dev->mem_timer);
1701 while (!list_empty(&dev->txreq_free)) {
1702 struct list_head *l = dev->txreq_free.next;
1703 struct qib_verbs_txreq *tx;
1706 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1709 if (dd->pport->sdma_descq_cnt)
1710 dma_free_coherent(&dd->pcidev->dev,
1711 dd->pport->sdma_descq_cnt *
1712 sizeof(struct qib_pio_header),
1713 dev->pio_hdrs, dev->pio_hdrs_phys);
1717 * _qib_schedule_send - schedule progress
1720 * This schedules progress w/o regard to the s_flags.
1722 * It is only used in post send, which doesn't hold
1725 void _qib_schedule_send(struct rvt_qp *qp)
1727 struct qib_ibport *ibp =
1728 to_iport(qp->ibqp.device, qp->port_num);
1729 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1730 struct qib_qp_priv *priv = qp->priv;
1732 queue_work(ppd->qib_wq, &priv->s_work);
1736 * qib_schedule_send - schedule progress
1739 * This schedules qp progress. The s_lock
1742 void qib_schedule_send(struct rvt_qp *qp)
1744 if (qib_send_ok(qp))
1745 _qib_schedule_send(qp);