2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_cm.h>
36 #include <linux/icmpv6.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/moduleparam.h>
44 int ipoib_max_conn_qp = 128;
46 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
47 MODULE_PARM_DESC(max_nonsrq_conn_qp,
48 "Max number of connected-mode QPs per interface "
49 "(applied only if shared receive queue is not available)");
51 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
52 static int data_debug_level;
54 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
55 MODULE_PARM_DESC(cm_data_debug_level,
56 "Enable data path debug tracing for connected mode if > 0");
59 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
61 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
62 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
63 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
64 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
66 #define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
68 static struct ib_qp_attr ipoib_cm_err_attr = {
69 .qp_state = IB_QPS_ERR
72 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
74 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
78 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
79 struct ib_cm_event *event);
81 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
82 u64 mapping[IPOIB_CM_RX_SG])
86 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
88 for (i = 0; i < frags; ++i)
89 ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
92 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
94 struct ipoib_dev_priv *priv = netdev_priv(dev);
95 struct ib_recv_wr *bad_wr;
98 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
100 for (i = 0; i < priv->cm.num_frags; ++i)
101 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
103 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
105 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
106 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
107 priv->cm.srq_ring[id].mapping);
108 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
109 priv->cm.srq_ring[id].skb = NULL;
115 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
116 struct ipoib_cm_rx *rx,
117 struct ib_recv_wr *wr,
118 struct ib_sge *sge, int id)
120 struct ipoib_dev_priv *priv = netdev_priv(dev);
121 struct ib_recv_wr *bad_wr;
124 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
126 for (i = 0; i < IPOIB_CM_RX_SG; ++i)
127 sge[i].addr = rx->rx_ring[id].mapping[i];
129 ret = ib_post_recv(rx->qp, wr, &bad_wr);
131 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
132 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
133 rx->rx_ring[id].mapping);
134 dev_kfree_skb_any(rx->rx_ring[id].skb);
135 rx->rx_ring[id].skb = NULL;
141 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
142 struct ipoib_cm_rx_buf *rx_ring,
144 u64 mapping[IPOIB_CM_RX_SG],
147 struct ipoib_dev_priv *priv = netdev_priv(dev);
151 skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
156 * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
157 * IP header to a multiple of 16.
159 skb_reserve(skb, IPOIB_CM_RX_RESERVE);
161 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
163 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
164 dev_kfree_skb_any(skb);
168 for (i = 0; i < frags; i++) {
169 struct page *page = alloc_page(gfp);
173 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
175 mapping[i + 1] = ib_dma_map_page(priv->ca, page,
176 0, PAGE_SIZE, DMA_FROM_DEVICE);
177 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
181 rx_ring[id].skb = skb;
186 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
189 ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
191 dev_kfree_skb_any(skb);
195 static void ipoib_cm_free_rx_ring(struct net_device *dev,
196 struct ipoib_cm_rx_buf *rx_ring)
198 struct ipoib_dev_priv *priv = netdev_priv(dev);
201 for (i = 0; i < ipoib_recvq_size; ++i)
202 if (rx_ring[i].skb) {
203 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
205 dev_kfree_skb_any(rx_ring[i].skb);
211 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
213 struct ib_send_wr *bad_wr;
214 struct ipoib_cm_rx *p;
216 /* We only reserved 1 extra slot in CQ for drain WRs, so
217 * make sure we have at most 1 outstanding WR. */
218 if (list_empty(&priv->cm.rx_flush_list) ||
219 !list_empty(&priv->cm.rx_drain_list))
223 * QPs on flush list are error state. This way, a "flush
224 * error" WC will be immediately generated for each WR we post.
226 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
227 ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
228 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
229 ipoib_warn(priv, "failed to post drain wr\n");
231 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
234 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
236 struct ipoib_cm_rx *p = ctx;
237 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
240 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
243 spin_lock_irqsave(&priv->lock, flags);
244 list_move(&p->list, &priv->cm.rx_flush_list);
245 p->state = IPOIB_CM_RX_FLUSH;
246 ipoib_cm_start_rx_drain(priv);
247 spin_unlock_irqrestore(&priv->lock, flags);
250 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
251 struct ipoib_cm_rx *p)
253 struct ipoib_dev_priv *priv = netdev_priv(dev);
254 struct ib_qp_init_attr attr = {
255 .event_handler = ipoib_cm_rx_event_handler,
256 .send_cq = priv->recv_cq, /* For drain WR */
257 .recv_cq = priv->recv_cq,
259 .cap.max_send_wr = 1, /* For drain WR */
260 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
261 .sq_sig_type = IB_SIGNAL_ALL_WR,
262 .qp_type = IB_QPT_RC,
266 if (!ipoib_cm_has_srq(dev)) {
267 attr.cap.max_recv_wr = ipoib_recvq_size;
268 attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
271 return ib_create_qp(priv->pd, &attr);
274 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
275 struct ib_cm_id *cm_id, struct ib_qp *qp,
278 struct ipoib_dev_priv *priv = netdev_priv(dev);
279 struct ib_qp_attr qp_attr;
280 int qp_attr_mask, ret;
282 qp_attr.qp_state = IB_QPS_INIT;
283 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
285 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
288 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
290 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
293 qp_attr.qp_state = IB_QPS_RTR;
294 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
296 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
299 qp_attr.rq_psn = psn;
300 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
302 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
307 * Current Mellanox HCA firmware won't generate completions
308 * with error for drain WRs unless the QP has been moved to
309 * RTS first. This work-around leaves a window where a QP has
310 * moved to error asynchronously, but this will eventually get
311 * fixed in firmware, so let's not error out if modify QP
314 qp_attr.qp_state = IB_QPS_RTS;
315 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
317 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
320 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
322 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
329 static void ipoib_cm_init_rx_wr(struct net_device *dev,
330 struct ib_recv_wr *wr,
333 struct ipoib_dev_priv *priv = netdev_priv(dev);
336 for (i = 0; i < priv->cm.num_frags; ++i)
337 sge[i].lkey = priv->pd->local_dma_lkey;
339 sge[0].length = IPOIB_CM_HEAD_SIZE;
340 for (i = 1; i < priv->cm.num_frags; ++i)
341 sge[i].length = PAGE_SIZE;
345 wr->num_sge = priv->cm.num_frags;
348 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
349 struct ipoib_cm_rx *rx)
351 struct ipoib_dev_priv *priv = netdev_priv(dev);
353 struct ib_recv_wr wr;
354 struct ib_sge sge[IPOIB_CM_RX_SG];
359 rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
361 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
362 priv->ca->name, ipoib_recvq_size);
366 t = kmalloc(sizeof *t, GFP_KERNEL);
372 ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
374 spin_lock_irq(&priv->lock);
376 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
377 spin_unlock_irq(&priv->lock);
378 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
382 ++priv->cm.nonsrq_conn_qp;
384 spin_unlock_irq(&priv->lock);
386 for (i = 0; i < ipoib_recvq_size; ++i) {
387 if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
388 rx->rx_ring[i].mapping,
390 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
394 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
396 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
397 "failed for buf %d\n", i);
403 rx->recv_count = ipoib_recvq_size;
410 spin_lock_irq(&priv->lock);
411 --priv->cm.nonsrq_conn_qp;
412 spin_unlock_irq(&priv->lock);
416 ipoib_cm_free_rx_ring(dev, rx->rx_ring);
421 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
422 struct ib_qp *qp, struct ib_cm_req_event_param *req,
425 struct ipoib_dev_priv *priv = netdev_priv(dev);
426 struct ipoib_cm_data data = {};
427 struct ib_cm_rep_param rep = {};
429 data.qpn = cpu_to_be32(priv->qp->qp_num);
430 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
432 rep.private_data = &data;
433 rep.private_data_len = sizeof data;
434 rep.flow_control = 0;
435 rep.rnr_retry_count = req->rnr_retry_count;
436 rep.srq = ipoib_cm_has_srq(dev);
437 rep.qp_num = qp->qp_num;
438 rep.starting_psn = psn;
439 return ib_send_cm_rep(cm_id, &rep);
442 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
444 struct net_device *dev = cm_id->context;
445 struct ipoib_dev_priv *priv = netdev_priv(dev);
446 struct ipoib_cm_rx *p;
450 ipoib_dbg(priv, "REQ arrived\n");
451 p = kzalloc(sizeof *p, GFP_KERNEL);
457 p->state = IPOIB_CM_RX_LIVE;
458 p->jiffies = jiffies;
459 INIT_LIST_HEAD(&p->list);
461 p->qp = ipoib_cm_create_rx_qp(dev, p);
463 ret = PTR_ERR(p->qp);
467 psn = prandom_u32() & 0xffffff;
468 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
472 if (!ipoib_cm_has_srq(dev)) {
473 ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
478 spin_lock_irq(&priv->lock);
479 queue_delayed_work(priv->wq,
480 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
481 /* Add this entry to passive ids list head, but do not re-add it
482 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
483 p->jiffies = jiffies;
484 if (p->state == IPOIB_CM_RX_LIVE)
485 list_move(&p->list, &priv->cm.passive_ids);
486 spin_unlock_irq(&priv->lock);
488 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
490 ipoib_warn(priv, "failed to send REP: %d\n", ret);
491 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
492 ipoib_warn(priv, "unable to move qp to error state\n");
497 ib_destroy_qp(p->qp);
503 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
504 struct ib_cm_event *event)
506 struct ipoib_cm_rx *p;
507 struct ipoib_dev_priv *priv;
509 switch (event->event) {
510 case IB_CM_REQ_RECEIVED:
511 return ipoib_cm_req_handler(cm_id, event);
512 case IB_CM_DREQ_RECEIVED:
514 ib_send_cm_drep(cm_id, NULL, 0);
516 case IB_CM_REJ_RECEIVED:
518 priv = netdev_priv(p->dev);
519 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
520 ipoib_warn(priv, "unable to move qp to error state\n");
526 /* Adjust length of skb with fragments to match received data */
527 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
528 unsigned int length, struct sk_buff *toskb)
533 /* put header into skb */
534 size = min(length, hdr_space);
539 num_frags = skb_shinfo(skb)->nr_frags;
540 for (i = 0; i < num_frags; i++) {
541 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
544 /* don't need this page */
545 skb_fill_page_desc(toskb, i, skb_frag_page(frag),
547 --skb_shinfo(skb)->nr_frags;
549 size = min(length, (unsigned) PAGE_SIZE);
551 skb_frag_size_set(frag, size);
552 skb->data_len += size;
553 skb->truesize += size;
560 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
562 struct ipoib_dev_priv *priv = netdev_priv(dev);
563 struct ipoib_cm_rx_buf *rx_ring;
564 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
565 struct sk_buff *skb, *newskb;
566 struct ipoib_cm_rx *p;
568 u64 mapping[IPOIB_CM_RX_SG];
571 struct sk_buff *small_skb;
573 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
576 if (unlikely(wr_id >= ipoib_recvq_size)) {
577 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
578 spin_lock_irqsave(&priv->lock, flags);
579 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
580 ipoib_cm_start_rx_drain(priv);
581 queue_work(priv->wq, &priv->cm.rx_reap_task);
582 spin_unlock_irqrestore(&priv->lock, flags);
584 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
585 wr_id, ipoib_recvq_size);
589 p = wc->qp->qp_context;
591 has_srq = ipoib_cm_has_srq(dev);
592 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
594 skb = rx_ring[wr_id].skb;
596 if (unlikely(wc->status != IB_WC_SUCCESS)) {
597 ipoib_dbg(priv, "cm recv error "
598 "(status=%d, wrid=%d vend_err %x)\n",
599 wc->status, wr_id, wc->vendor_err);
600 ++dev->stats.rx_dropped;
604 if (!--p->recv_count) {
605 spin_lock_irqsave(&priv->lock, flags);
606 list_move(&p->list, &priv->cm.rx_reap_list);
607 spin_unlock_irqrestore(&priv->lock, flags);
608 queue_work(priv->wq, &priv->cm.rx_reap_task);
614 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
615 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
616 spin_lock_irqsave(&priv->lock, flags);
617 p->jiffies = jiffies;
618 /* Move this entry to list head, but do not re-add it
619 * if it has been moved out of list. */
620 if (p->state == IPOIB_CM_RX_LIVE)
621 list_move(&p->list, &priv->cm.passive_ids);
622 spin_unlock_irqrestore(&priv->lock, flags);
626 if (wc->byte_len < IPOIB_CM_COPYBREAK) {
627 int dlen = wc->byte_len;
629 small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
631 skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
632 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
633 dlen, DMA_FROM_DEVICE);
634 skb_copy_from_linear_data(skb, small_skb->data, dlen);
635 ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
636 dlen, DMA_FROM_DEVICE);
637 skb_put(small_skb, dlen);
643 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
644 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
646 newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
647 mapping, GFP_ATOMIC);
648 if (unlikely(!newskb)) {
650 * If we can't allocate a new RX buffer, dump
651 * this packet and reuse the old buffer.
653 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
654 ++dev->stats.rx_dropped;
658 ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
659 memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
661 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
662 wc->byte_len, wc->slid);
664 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
667 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
668 skb_add_pseudo_hdr(skb);
670 ++dev->stats.rx_packets;
671 dev->stats.rx_bytes += skb->len;
674 /* XXX get correct PACKET_ type here */
675 skb->pkt_type = PACKET_HOST;
676 netif_receive_skb(skb);
680 if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
681 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
682 "for buf %d\n", wr_id);
684 if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
689 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
690 "for buf %d\n", wr_id);
695 static inline int post_send(struct ipoib_dev_priv *priv,
696 struct ipoib_cm_tx *tx,
698 struct ipoib_tx_buf *tx_req)
700 struct ib_send_wr *bad_wr;
702 ipoib_build_sge(priv, tx_req);
704 priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
706 return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
709 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
711 struct ipoib_dev_priv *priv = netdev_priv(dev);
712 struct ipoib_tx_buf *tx_req;
714 unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
716 if (unlikely(skb->len > tx->mtu)) {
717 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
719 ++dev->stats.tx_dropped;
720 ++dev->stats.tx_errors;
721 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
724 if (skb_shinfo(skb)->nr_frags > usable_sge) {
725 if (skb_linearize(skb) < 0) {
726 ipoib_warn(priv, "skb could not be linearized\n");
727 ++dev->stats.tx_dropped;
728 ++dev->stats.tx_errors;
729 dev_kfree_skb_any(skb);
732 /* Does skb_linearize return ok without reducing nr_frags? */
733 if (skb_shinfo(skb)->nr_frags > usable_sge) {
734 ipoib_warn(priv, "too many frags after skb linearize\n");
735 ++dev->stats.tx_dropped;
736 ++dev->stats.tx_errors;
737 dev_kfree_skb_any(skb);
741 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
742 tx->tx_head, skb->len, tx->qp->qp_num);
745 * We put the skb into the tx_ring _before_ we call post_send()
746 * because it's entirely possible that the completion handler will
747 * run before we execute anything after the post_send(). That
748 * means we have to make sure everything is properly recorded and
749 * our state is consistent before we call post_send().
751 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
754 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
755 ++dev->stats.tx_errors;
756 dev_kfree_skb_any(skb);
763 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
765 ipoib_warn(priv, "post_send failed, error %d\n", rc);
766 ++dev->stats.tx_errors;
767 ipoib_dma_unmap_tx(priv, tx_req);
768 dev_kfree_skb_any(skb);
770 netif_trans_update(dev);
773 if (++priv->tx_outstanding == ipoib_sendq_size) {
774 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
776 netif_stop_queue(dev);
777 rc = ib_req_notify_cq(priv->send_cq,
778 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
780 ipoib_warn(priv, "request notify on send CQ failed\n");
782 ipoib_send_comp_handler(priv->send_cq, dev);
787 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
789 struct ipoib_dev_priv *priv = netdev_priv(dev);
790 struct ipoib_cm_tx *tx = wc->qp->qp_context;
791 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
792 struct ipoib_tx_buf *tx_req;
795 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
798 if (unlikely(wr_id >= ipoib_sendq_size)) {
799 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
800 wr_id, ipoib_sendq_size);
804 tx_req = &tx->tx_ring[wr_id];
806 ipoib_dma_unmap_tx(priv, tx_req);
808 /* FIXME: is this right? Shouldn't we only increment on success? */
809 ++dev->stats.tx_packets;
810 dev->stats.tx_bytes += tx_req->skb->len;
812 dev_kfree_skb_any(tx_req->skb);
817 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
818 netif_queue_stopped(dev) &&
819 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
820 netif_wake_queue(dev);
822 if (wc->status != IB_WC_SUCCESS &&
823 wc->status != IB_WC_WR_FLUSH_ERR) {
824 struct ipoib_neigh *neigh;
826 ipoib_dbg(priv, "failed cm send event "
827 "(status=%d, wrid=%d vend_err %x)\n",
828 wc->status, wr_id, wc->vendor_err);
830 spin_lock_irqsave(&priv->lock, flags);
835 ipoib_neigh_free(neigh);
840 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
841 list_move(&tx->list, &priv->cm.reap_list);
842 queue_work(priv->wq, &priv->cm.reap_task);
845 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
847 spin_unlock_irqrestore(&priv->lock, flags);
850 netif_tx_unlock(dev);
853 int ipoib_cm_dev_open(struct net_device *dev)
855 struct ipoib_dev_priv *priv = netdev_priv(dev);
858 if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
861 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
862 if (IS_ERR(priv->cm.id)) {
863 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
864 ret = PTR_ERR(priv->cm.id);
868 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
871 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
872 IPOIB_CM_IETF_ID | priv->qp->qp_num);
879 ib_destroy_cm_id(priv->cm.id);
885 static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
887 struct ipoib_dev_priv *priv = netdev_priv(dev);
888 struct ipoib_cm_rx *rx, *n;
891 spin_lock_irq(&priv->lock);
892 list_splice_init(&priv->cm.rx_reap_list, &list);
893 spin_unlock_irq(&priv->lock);
895 list_for_each_entry_safe(rx, n, &list, list) {
896 ib_destroy_cm_id(rx->id);
897 ib_destroy_qp(rx->qp);
898 if (!ipoib_cm_has_srq(dev)) {
899 ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
900 spin_lock_irq(&priv->lock);
901 --priv->cm.nonsrq_conn_qp;
902 spin_unlock_irq(&priv->lock);
908 void ipoib_cm_dev_stop(struct net_device *dev)
910 struct ipoib_dev_priv *priv = netdev_priv(dev);
911 struct ipoib_cm_rx *p;
915 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
918 ib_destroy_cm_id(priv->cm.id);
921 spin_lock_irq(&priv->lock);
922 while (!list_empty(&priv->cm.passive_ids)) {
923 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
924 list_move(&p->list, &priv->cm.rx_error_list);
925 p->state = IPOIB_CM_RX_ERROR;
926 spin_unlock_irq(&priv->lock);
927 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
929 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
930 spin_lock_irq(&priv->lock);
933 /* Wait for all RX to be drained */
936 while (!list_empty(&priv->cm.rx_error_list) ||
937 !list_empty(&priv->cm.rx_flush_list) ||
938 !list_empty(&priv->cm.rx_drain_list)) {
939 if (time_after(jiffies, begin + 5 * HZ)) {
940 ipoib_warn(priv, "RX drain timing out\n");
943 * assume the HW is wedged and just free up everything.
945 list_splice_init(&priv->cm.rx_flush_list,
946 &priv->cm.rx_reap_list);
947 list_splice_init(&priv->cm.rx_error_list,
948 &priv->cm.rx_reap_list);
949 list_splice_init(&priv->cm.rx_drain_list,
950 &priv->cm.rx_reap_list);
953 spin_unlock_irq(&priv->lock);
956 spin_lock_irq(&priv->lock);
959 spin_unlock_irq(&priv->lock);
961 ipoib_cm_free_rx_reap_list(dev);
963 cancel_delayed_work(&priv->cm.stale_task);
966 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
968 struct ipoib_cm_tx *p = cm_id->context;
969 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
970 struct ipoib_cm_data *data = event->private_data;
971 struct sk_buff_head skqueue;
972 struct ib_qp_attr qp_attr;
973 int qp_attr_mask, ret;
976 p->mtu = be32_to_cpu(data->mtu);
978 if (p->mtu <= IPOIB_ENCAP_LEN) {
979 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
980 p->mtu, IPOIB_ENCAP_LEN);
984 qp_attr.qp_state = IB_QPS_RTR;
985 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
987 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
991 qp_attr.rq_psn = 0 /* FIXME */;
992 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
994 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
998 qp_attr.qp_state = IB_QPS_RTS;
999 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
1001 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
1004 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
1006 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
1010 skb_queue_head_init(&skqueue);
1012 netif_tx_lock_bh(p->dev);
1013 spin_lock_irq(&priv->lock);
1014 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
1016 while ((skb = __skb_dequeue(&p->neigh->queue)))
1017 __skb_queue_tail(&skqueue, skb);
1018 spin_unlock_irq(&priv->lock);
1019 netif_tx_unlock_bh(p->dev);
1021 while ((skb = __skb_dequeue(&skqueue))) {
1023 if (dev_queue_xmit(skb))
1024 ipoib_warn(priv, "dev_queue_xmit failed "
1025 "to requeue packet\n");
1028 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1030 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
1036 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
1038 struct ipoib_dev_priv *priv = netdev_priv(dev);
1039 struct ib_qp_init_attr attr = {
1040 .send_cq = priv->recv_cq,
1041 .recv_cq = priv->recv_cq,
1042 .srq = priv->cm.srq,
1043 .cap.max_send_wr = ipoib_sendq_size,
1044 .cap.max_send_sge = 1,
1045 .sq_sig_type = IB_SIGNAL_ALL_WR,
1046 .qp_type = IB_QPT_RC,
1048 .create_flags = IB_QP_CREATE_USE_GFP_NOIO
1051 struct ib_qp *tx_qp;
1053 if (dev->features & NETIF_F_SG)
1054 attr.cap.max_send_sge =
1055 min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
1057 tx_qp = ib_create_qp(priv->pd, &attr);
1058 if (PTR_ERR(tx_qp) == -EINVAL) {
1059 attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
1060 tx_qp = ib_create_qp(priv->pd, &attr);
1062 tx->max_send_sge = attr.cap.max_send_sge;
1066 static int ipoib_cm_send_req(struct net_device *dev,
1067 struct ib_cm_id *id, struct ib_qp *qp,
1069 struct ib_sa_path_rec *pathrec)
1071 struct ipoib_dev_priv *priv = netdev_priv(dev);
1072 struct ipoib_cm_data data = {};
1073 struct ib_cm_req_param req = {};
1075 data.qpn = cpu_to_be32(priv->qp->qp_num);
1076 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
1078 req.primary_path = pathrec;
1079 req.alternate_path = NULL;
1080 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
1081 req.qp_num = qp->qp_num;
1082 req.qp_type = qp->qp_type;
1083 req.private_data = &data;
1084 req.private_data_len = sizeof data;
1085 req.flow_control = 0;
1087 req.starting_psn = 0; /* FIXME */
1090 * Pick some arbitrary defaults here; we could make these
1091 * module parameters if anyone cared about setting them.
1093 req.responder_resources = 4;
1094 req.remote_cm_response_timeout = 20;
1095 req.local_cm_response_timeout = 20;
1096 req.retry_count = 0; /* RFC draft warns against retries */
1097 req.rnr_retry_count = 0; /* RFC draft warns against retries */
1098 req.max_cm_retries = 15;
1099 req.srq = ipoib_cm_has_srq(dev);
1100 return ib_send_cm_req(id, &req);
1103 static int ipoib_cm_modify_tx_init(struct net_device *dev,
1104 struct ib_cm_id *cm_id, struct ib_qp *qp)
1106 struct ipoib_dev_priv *priv = netdev_priv(dev);
1107 struct ib_qp_attr qp_attr;
1108 int qp_attr_mask, ret;
1109 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1111 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1115 qp_attr.qp_state = IB_QPS_INIT;
1116 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1117 qp_attr.port_num = priv->port;
1118 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1120 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1122 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1128 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1129 struct ib_sa_path_rec *pathrec)
1131 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1134 p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
1135 GFP_NOIO, PAGE_KERNEL);
1137 ipoib_warn(priv, "failed to allocate tx ring\n");
1141 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1143 p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1144 if (IS_ERR(p->qp)) {
1145 ret = PTR_ERR(p->qp);
1146 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1150 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1151 if (IS_ERR(p->id)) {
1152 ret = PTR_ERR(p->id);
1153 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1157 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
1159 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1163 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1165 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1169 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1170 p->qp->qp_num, pathrec->dgid.raw, qpn);
1176 ib_destroy_cm_id(p->id);
1179 ib_destroy_qp(p->qp);
1187 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1189 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1190 struct ipoib_tx_buf *tx_req;
1191 unsigned long begin;
1193 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1194 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1197 ib_destroy_cm_id(p->id);
1200 /* Wait for all sends to complete */
1202 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1203 if (time_after(jiffies, begin + 5 * HZ)) {
1204 ipoib_warn(priv, "timing out; %d sends not completed\n",
1205 p->tx_head - p->tx_tail);
1215 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1216 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1217 ipoib_dma_unmap_tx(priv, tx_req);
1218 dev_kfree_skb_any(tx_req->skb);
1220 netif_tx_lock_bh(p->dev);
1221 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1222 netif_queue_stopped(p->dev) &&
1223 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1224 netif_wake_queue(p->dev);
1225 netif_tx_unlock_bh(p->dev);
1229 ib_destroy_qp(p->qp);
1235 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1236 struct ib_cm_event *event)
1238 struct ipoib_cm_tx *tx = cm_id->context;
1239 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1240 struct net_device *dev = priv->dev;
1241 struct ipoib_neigh *neigh;
1242 unsigned long flags;
1245 switch (event->event) {
1246 case IB_CM_DREQ_RECEIVED:
1247 ipoib_dbg(priv, "DREQ received.\n");
1248 ib_send_cm_drep(cm_id, NULL, 0);
1250 case IB_CM_REP_RECEIVED:
1251 ipoib_dbg(priv, "REP received.\n");
1252 ret = ipoib_cm_rep_handler(cm_id, event);
1254 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1257 case IB_CM_REQ_ERROR:
1258 case IB_CM_REJ_RECEIVED:
1259 case IB_CM_TIMEWAIT_EXIT:
1260 ipoib_dbg(priv, "CM error %d.\n", event->event);
1261 netif_tx_lock_bh(dev);
1262 spin_lock_irqsave(&priv->lock, flags);
1267 ipoib_neigh_free(neigh);
1272 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1273 list_move(&tx->list, &priv->cm.reap_list);
1274 queue_work(priv->wq, &priv->cm.reap_task);
1277 spin_unlock_irqrestore(&priv->lock, flags);
1278 netif_tx_unlock_bh(dev);
1287 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1288 struct ipoib_neigh *neigh)
1290 struct ipoib_dev_priv *priv = netdev_priv(dev);
1291 struct ipoib_cm_tx *tx;
1293 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1301 list_add(&tx->list, &priv->cm.start_list);
1302 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1303 queue_work(priv->wq, &priv->cm.start_task);
1307 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1309 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1310 unsigned long flags;
1311 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1312 spin_lock_irqsave(&priv->lock, flags);
1313 list_move(&tx->list, &priv->cm.reap_list);
1314 queue_work(priv->wq, &priv->cm.reap_task);
1315 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1316 tx->neigh->daddr + 4);
1318 spin_unlock_irqrestore(&priv->lock, flags);
1322 #define QPN_AND_OPTIONS_OFFSET 4
1324 static void ipoib_cm_tx_start(struct work_struct *work)
1326 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1328 struct net_device *dev = priv->dev;
1329 struct ipoib_neigh *neigh;
1330 struct ipoib_cm_tx *p;
1331 unsigned long flags;
1332 struct ipoib_path *path;
1335 struct ib_sa_path_rec pathrec;
1338 netif_tx_lock_bh(dev);
1339 spin_lock_irqsave(&priv->lock, flags);
1341 while (!list_empty(&priv->cm.start_list)) {
1342 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1343 list_del_init(&p->list);
1346 qpn = IPOIB_QPN(neigh->daddr);
1348 * As long as the search is with these 2 locks,
1349 * path existence indicates its validity.
1351 path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1353 pr_info("%s ignore not valid path %pI6\n",
1355 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1358 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1360 spin_unlock_irqrestore(&priv->lock, flags);
1361 netif_tx_unlock_bh(dev);
1363 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1365 netif_tx_lock_bh(dev);
1366 spin_lock_irqsave(&priv->lock, flags);
1373 ipoib_neigh_free(neigh);
1380 spin_unlock_irqrestore(&priv->lock, flags);
1381 netif_tx_unlock_bh(dev);
1384 static void ipoib_cm_tx_reap(struct work_struct *work)
1386 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1388 struct net_device *dev = priv->dev;
1389 struct ipoib_cm_tx *p;
1390 unsigned long flags;
1392 netif_tx_lock_bh(dev);
1393 spin_lock_irqsave(&priv->lock, flags);
1395 while (!list_empty(&priv->cm.reap_list)) {
1396 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1397 list_del_init(&p->list);
1398 spin_unlock_irqrestore(&priv->lock, flags);
1399 netif_tx_unlock_bh(dev);
1400 ipoib_cm_tx_destroy(p);
1401 netif_tx_lock_bh(dev);
1402 spin_lock_irqsave(&priv->lock, flags);
1405 spin_unlock_irqrestore(&priv->lock, flags);
1406 netif_tx_unlock_bh(dev);
1409 static void ipoib_cm_skb_reap(struct work_struct *work)
1411 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1413 struct net_device *dev = priv->dev;
1414 struct sk_buff *skb;
1415 unsigned long flags;
1416 unsigned mtu = priv->mcast_mtu;
1418 netif_tx_lock_bh(dev);
1419 spin_lock_irqsave(&priv->lock, flags);
1421 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1422 spin_unlock_irqrestore(&priv->lock, flags);
1423 netif_tx_unlock_bh(dev);
1425 if (skb->protocol == htons(ETH_P_IP)) {
1426 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1427 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1429 #if IS_ENABLED(CONFIG_IPV6)
1430 else if (skb->protocol == htons(ETH_P_IPV6)) {
1431 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
1432 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1435 dev_kfree_skb_any(skb);
1437 netif_tx_lock_bh(dev);
1438 spin_lock_irqsave(&priv->lock, flags);
1441 spin_unlock_irqrestore(&priv->lock, flags);
1442 netif_tx_unlock_bh(dev);
1445 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1448 struct ipoib_dev_priv *priv = netdev_priv(dev);
1449 int e = skb_queue_empty(&priv->cm.skb_queue);
1452 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1454 skb_queue_tail(&priv->cm.skb_queue, skb);
1456 queue_work(priv->wq, &priv->cm.skb_task);
1459 static void ipoib_cm_rx_reap(struct work_struct *work)
1461 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1462 cm.rx_reap_task)->dev);
1465 static void ipoib_cm_stale_task(struct work_struct *work)
1467 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1468 cm.stale_task.work);
1469 struct ipoib_cm_rx *p;
1472 spin_lock_irq(&priv->lock);
1473 while (!list_empty(&priv->cm.passive_ids)) {
1474 /* List is sorted by LRU, start from tail,
1475 * stop when we see a recently used entry */
1476 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1477 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1479 list_move(&p->list, &priv->cm.rx_error_list);
1480 p->state = IPOIB_CM_RX_ERROR;
1481 spin_unlock_irq(&priv->lock);
1482 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1484 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1485 spin_lock_irq(&priv->lock);
1488 if (!list_empty(&priv->cm.passive_ids))
1489 queue_delayed_work(priv->wq,
1490 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1491 spin_unlock_irq(&priv->lock);
1494 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1497 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
1499 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1500 return sprintf(buf, "connected\n");
1502 return sprintf(buf, "datagram\n");
1505 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1506 const char *buf, size_t count)
1508 struct net_device *dev = to_net_dev(d);
1510 struct ipoib_dev_priv *priv = netdev_priv(dev);
1512 if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
1515 if (!rtnl_trylock())
1516 return restart_syscall();
1518 ret = ipoib_set_mode(dev, buf);
1520 /* The assumption is that the function ipoib_set_mode returned
1521 * with the rtnl held by it, if not the value -EBUSY returned,
1522 * then no need to rtnl_unlock
1527 return (!ret || ret == -EBUSY) ? count : ret;
1530 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1532 int ipoib_cm_add_mode_attr(struct net_device *dev)
1534 return device_create_file(&dev->dev, &dev_attr_mode);
1537 static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1539 struct ipoib_dev_priv *priv = netdev_priv(dev);
1540 struct ib_srq_init_attr srq_init_attr = {
1541 .srq_type = IB_SRQT_BASIC,
1543 .max_wr = ipoib_recvq_size,
1548 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1549 if (IS_ERR(priv->cm.srq)) {
1550 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1551 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1552 priv->ca->name, PTR_ERR(priv->cm.srq));
1553 priv->cm.srq = NULL;
1557 priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1558 if (!priv->cm.srq_ring) {
1559 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1560 priv->ca->name, ipoib_recvq_size);
1561 ib_destroy_srq(priv->cm.srq);
1562 priv->cm.srq = NULL;
1568 int ipoib_cm_dev_init(struct net_device *dev)
1570 struct ipoib_dev_priv *priv = netdev_priv(dev);
1573 INIT_LIST_HEAD(&priv->cm.passive_ids);
1574 INIT_LIST_HEAD(&priv->cm.reap_list);
1575 INIT_LIST_HEAD(&priv->cm.start_list);
1576 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1577 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1578 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1579 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1580 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1581 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1582 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1583 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1584 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1586 skb_queue_head_init(&priv->cm.skb_queue);
1588 ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
1590 max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
1591 ipoib_cm_create_srq(dev, max_srq_sge);
1592 if (ipoib_cm_has_srq(dev)) {
1593 priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
1594 priv->cm.num_frags = max_srq_sge;
1595 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1596 priv->cm.max_cm_mtu, priv->cm.num_frags);
1598 priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1599 priv->cm.num_frags = IPOIB_CM_RX_SG;
1602 ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
1604 if (ipoib_cm_has_srq(dev)) {
1605 for (i = 0; i < ipoib_recvq_size; ++i) {
1606 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1607 priv->cm.num_frags - 1,
1608 priv->cm.srq_ring[i].mapping,
1610 ipoib_warn(priv, "failed to allocate "
1611 "receive buffer %d\n", i);
1612 ipoib_cm_dev_cleanup(dev);
1616 if (ipoib_cm_post_receive_srq(dev, i)) {
1617 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1618 "failed for buf %d\n", i);
1619 ipoib_cm_dev_cleanup(dev);
1625 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1629 void ipoib_cm_dev_cleanup(struct net_device *dev)
1631 struct ipoib_dev_priv *priv = netdev_priv(dev);
1637 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1639 ret = ib_destroy_srq(priv->cm.srq);
1641 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1643 priv->cm.srq = NULL;
1644 if (!priv->cm.srq_ring)
1647 ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
1648 priv->cm.srq_ring = NULL;