2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/slab.h>
35 #include <linux/workqueue.h>
36 #include <linux/skbuff.h>
37 #include <linux/timer.h>
38 #include <linux/notifier.h>
39 #include <linux/inetdevice.h>
41 #include <net/neighbour.h>
42 #include <net/netevent.h>
43 #include <net/route.h>
46 #include "cxgb3_offload.h"
48 #include "iwch_provider.h"
51 static char *states[] = {
68 module_param(peer2peer, int, 0644);
69 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
71 static int ep_timeout_secs = 60;
72 module_param(ep_timeout_secs, int, 0644);
73 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
74 "in seconds (default=60)");
76 static int mpa_rev = 1;
77 module_param(mpa_rev, int, 0644);
78 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
79 "1 is spec compliant. (default=1)");
81 static int markers_enabled = 0;
82 module_param(markers_enabled, int, 0644);
83 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
85 static int crc_enabled = 1;
86 module_param(crc_enabled, int, 0644);
87 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
89 static int rcv_win = 256 * 1024;
90 module_param(rcv_win, int, 0644);
91 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
93 static int snd_win = 32 * 1024;
94 module_param(snd_win, int, 0644);
95 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
97 static unsigned int nocong = 0;
98 module_param(nocong, uint, 0644);
99 MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
101 static unsigned int cong_flavor = 1;
102 module_param(cong_flavor, uint, 0644);
103 MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
105 static struct workqueue_struct *workq;
107 static struct sk_buff_head rxq;
109 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
110 static void ep_timeout(unsigned long arg);
111 static void connect_reply_upcall(struct iwch_ep *ep, int status);
113 static void start_ep_timer(struct iwch_ep *ep)
115 pr_debug("%s ep %p\n", __func__, ep);
116 if (timer_pending(&ep->timer)) {
117 pr_debug("%s stopped / restarted timer ep %p\n", __func__, ep);
118 del_timer_sync(&ep->timer);
121 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
122 ep->timer.data = (unsigned long)ep;
123 ep->timer.function = ep_timeout;
124 add_timer(&ep->timer);
127 static void stop_ep_timer(struct iwch_ep *ep)
129 pr_debug("%s ep %p\n", __func__, ep);
130 if (!timer_pending(&ep->timer)) {
131 WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
132 __func__, ep, ep->com.state);
135 del_timer_sync(&ep->timer);
139 static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
142 struct cxio_rdev *rdev;
144 rdev = (struct cxio_rdev *)tdev->ulp;
145 if (cxio_fatal_error(rdev)) {
149 error = l2t_send(tdev, skb, l2e);
152 return error < 0 ? error : 0;
155 int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
158 struct cxio_rdev *rdev;
160 rdev = (struct cxio_rdev *)tdev->ulp;
161 if (cxio_fatal_error(rdev)) {
165 error = cxgb3_ofld_send(tdev, skb);
168 return error < 0 ? error : 0;
171 static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
173 struct cpl_tid_release *req;
175 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
178 req = skb_put(skb, sizeof(*req));
179 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
180 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
181 skb->priority = CPL_PRIORITY_SETUP;
182 iwch_cxgb3_ofld_send(tdev, skb);
186 int iwch_quiesce_tid(struct iwch_ep *ep)
188 struct cpl_set_tcb_field *req;
189 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
193 req = skb_put(skb, sizeof(*req));
194 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
195 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
196 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
199 req->word = htons(W_TCB_RX_QUIESCE);
200 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
201 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
203 skb->priority = CPL_PRIORITY_DATA;
204 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
207 int iwch_resume_tid(struct iwch_ep *ep)
209 struct cpl_set_tcb_field *req;
210 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
214 req = skb_put(skb, sizeof(*req));
215 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
216 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
217 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
220 req->word = htons(W_TCB_RX_QUIESCE);
221 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
224 skb->priority = CPL_PRIORITY_DATA;
225 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
228 static void set_emss(struct iwch_ep *ep, u16 opt)
230 pr_debug("%s ep %p opt %u\n", __func__, ep, opt);
231 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
232 if (G_TCPOPT_TSTAMP(opt))
236 pr_debug("emss=%d\n", ep->emss);
239 static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
242 enum iwch_ep_state state;
244 spin_lock_irqsave(&epc->lock, flags);
246 spin_unlock_irqrestore(&epc->lock, flags);
250 static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
255 static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
259 spin_lock_irqsave(&epc->lock, flags);
260 pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
261 __state_set(epc, new);
262 spin_unlock_irqrestore(&epc->lock, flags);
266 static void *alloc_ep(int size, gfp_t gfp)
268 struct iwch_ep_common *epc;
270 epc = kzalloc(size, gfp);
272 kref_init(&epc->kref);
273 spin_lock_init(&epc->lock);
274 init_waitqueue_head(&epc->waitq);
276 pr_debug("%s alloc ep %p\n", __func__, epc);
280 void __free_ep(struct kref *kref)
283 ep = container_of(container_of(kref, struct iwch_ep_common, kref),
284 struct iwch_ep, com);
285 pr_debug("%s ep %p state %s\n",
286 __func__, ep, states[state_read(&ep->com)]);
287 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
288 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
289 dst_release(ep->dst);
290 l2t_release(ep->com.tdev, ep->l2t);
295 static void release_ep_resources(struct iwch_ep *ep)
297 pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
298 set_bit(RELEASE_RESOURCES, &ep->com.flags);
302 static int status2errno(int status)
307 case CPL_ERR_CONN_RESET:
309 case CPL_ERR_ARP_MISS:
310 return -EHOSTUNREACH;
311 case CPL_ERR_CONN_TIMEDOUT:
313 case CPL_ERR_TCAM_FULL:
315 case CPL_ERR_CONN_EXIST:
323 * Try and reuse skbs already allocated...
325 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
327 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
331 skb = alloc_skb(len, gfp);
336 static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
337 __be32 peer_ip, __be16 local_port,
338 __be16 peer_port, u8 tos)
343 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
344 peer_port, local_port, IPPROTO_TCP,
351 static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
355 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
360 static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
362 pr_debug("%s t3cdev %p\n", __func__, dev);
367 * Handle an ARP failure for an active open.
369 static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
371 pr_err("ARP failure during connect\n");
376 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
379 static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
381 struct cpl_abort_req *req = cplhdr(skb);
383 pr_debug("%s t3cdev %p\n", __func__, dev);
384 req->cmd = CPL_ABORT_NO_RST;
385 iwch_cxgb3_ofld_send(dev, skb);
388 static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
390 struct cpl_close_con_req *req;
393 pr_debug("%s ep %p\n", __func__, ep);
394 skb = get_skb(NULL, sizeof(*req), gfp);
396 pr_err("%s - failed to alloc skb\n", __func__);
399 skb->priority = CPL_PRIORITY_DATA;
400 set_arp_failure_handler(skb, arp_failure_discard);
401 req = skb_put(skb, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
403 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
404 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
405 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
408 static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
410 struct cpl_abort_req *req;
412 pr_debug("%s ep %p\n", __func__, ep);
413 skb = get_skb(skb, sizeof(*req), gfp);
415 pr_err("%s - failed to alloc skb\n", __func__);
418 skb->priority = CPL_PRIORITY_DATA;
419 set_arp_failure_handler(skb, abort_arp_failure);
420 req = skb_put_zero(skb, sizeof(*req));
421 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
422 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
423 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
424 req->cmd = CPL_ABORT_SEND_RST;
425 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
428 static int send_connect(struct iwch_ep *ep)
430 struct cpl_act_open_req *req;
432 u32 opt0h, opt0l, opt2;
433 unsigned int mtu_idx;
436 pr_debug("%s ep %p\n", __func__, ep);
438 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
440 pr_err("%s - failed to alloc skb\n", __func__);
443 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
444 wscale = compute_wscale(rcv_win);
449 V_WND_SCALE(wscale) |
451 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
452 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
453 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
454 V_CONG_CONTROL_FLAVOR(cong_flavor);
455 skb->priority = CPL_PRIORITY_SETUP;
456 set_arp_failure_handler(skb, act_open_req_arp_failure);
458 req = skb_put(skb, sizeof(*req));
459 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
460 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
461 req->local_port = ep->com.local_addr.sin_port;
462 req->peer_port = ep->com.remote_addr.sin_port;
463 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
464 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
465 req->opt0h = htonl(opt0h);
466 req->opt0l = htonl(opt0l);
468 req->opt2 = htonl(opt2);
469 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
472 static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
475 struct tx_data_wr *req;
476 struct mpa_message *mpa;
479 pr_debug("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
481 BUG_ON(skb_cloned(skb));
483 mpalen = sizeof(*mpa) + ep->plen;
484 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
486 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
488 connect_reply_upcall(ep, -ENOMEM);
493 skb_reserve(skb, sizeof(*req));
494 skb_put(skb, mpalen);
495 skb->priority = CPL_PRIORITY_DATA;
496 mpa = (struct mpa_message *) skb->data;
497 memset(mpa, 0, sizeof(*mpa));
498 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
499 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
500 (markers_enabled ? MPA_MARKERS : 0);
501 mpa->private_data_size = htons(ep->plen);
502 mpa->revision = mpa_rev;
505 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
508 * Reference the mpa skb. This ensures the data area
509 * will remain in memory until the hw acks the tx.
510 * Function tx_ack() will deref it.
513 set_arp_failure_handler(skb, arp_failure_discard);
514 skb_reset_transport_header(skb);
516 req = skb_push(skb, sizeof(*req));
517 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
518 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
519 req->len = htonl(len);
520 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
521 V_TX_SNDBUF(snd_win>>15));
522 req->flags = htonl(F_TX_INIT);
523 req->sndseq = htonl(ep->snd_seq);
526 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
528 state_set(&ep->com, MPA_REQ_SENT);
532 static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
535 struct tx_data_wr *req;
536 struct mpa_message *mpa;
539 pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
541 mpalen = sizeof(*mpa) + plen;
543 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
545 pr_err("%s - cannot alloc skb!\n", __func__);
548 skb_reserve(skb, sizeof(*req));
549 mpa = skb_put(skb, mpalen);
550 memset(mpa, 0, sizeof(*mpa));
551 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
552 mpa->flags = MPA_REJECT;
553 mpa->revision = mpa_rev;
554 mpa->private_data_size = htons(plen);
556 memcpy(mpa->private_data, pdata, plen);
559 * Reference the mpa skb again. This ensures the data area
560 * will remain in memory until the hw acks the tx.
561 * Function tx_ack() will deref it.
564 skb->priority = CPL_PRIORITY_DATA;
565 set_arp_failure_handler(skb, arp_failure_discard);
566 skb_reset_transport_header(skb);
567 req = skb_push(skb, sizeof(*req));
568 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
569 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
570 req->len = htonl(mpalen);
571 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
572 V_TX_SNDBUF(snd_win>>15));
573 req->flags = htonl(F_TX_INIT);
574 req->sndseq = htonl(ep->snd_seq);
577 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
580 static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
583 struct tx_data_wr *req;
584 struct mpa_message *mpa;
588 pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
590 mpalen = sizeof(*mpa) + plen;
592 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
594 pr_err("%s - cannot alloc skb!\n", __func__);
597 skb->priority = CPL_PRIORITY_DATA;
598 skb_reserve(skb, sizeof(*req));
599 mpa = skb_put(skb, mpalen);
600 memset(mpa, 0, sizeof(*mpa));
601 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
602 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
603 (markers_enabled ? MPA_MARKERS : 0);
604 mpa->revision = mpa_rev;
605 mpa->private_data_size = htons(plen);
607 memcpy(mpa->private_data, pdata, plen);
610 * Reference the mpa skb. This ensures the data area
611 * will remain in memory until the hw acks the tx.
612 * Function tx_ack() will deref it.
615 set_arp_failure_handler(skb, arp_failure_discard);
616 skb_reset_transport_header(skb);
618 req = skb_push(skb, sizeof(*req));
619 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
620 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
621 req->len = htonl(len);
622 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
623 V_TX_SNDBUF(snd_win>>15));
624 req->flags = htonl(F_TX_INIT);
625 req->sndseq = htonl(ep->snd_seq);
627 state_set(&ep->com, MPA_REP_SENT);
628 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
631 static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
633 struct iwch_ep *ep = ctx;
634 struct cpl_act_establish *req = cplhdr(skb);
635 unsigned int tid = GET_TID(req);
637 pr_debug("%s ep %p tid %d\n", __func__, ep, tid);
639 dst_confirm(ep->dst);
641 /* setup the hwtid for this connection */
643 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
645 ep->snd_seq = ntohl(req->snd_isn);
646 ep->rcv_seq = ntohl(req->rcv_isn);
648 set_emss(ep, ntohs(req->tcp_opt));
650 /* dealloc the atid */
651 cxgb3_free_atid(ep->com.tdev, ep->atid);
653 /* start MPA negotiation */
654 send_mpa_req(ep, skb);
659 static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
661 pr_debug("%s ep %p\n", __FILE__, ep);
662 state_set(&ep->com, ABORTING);
663 send_abort(ep, skb, gfp);
666 static void close_complete_upcall(struct iwch_ep *ep)
668 struct iw_cm_event event;
670 pr_debug("%s ep %p\n", __func__, ep);
671 memset(&event, 0, sizeof(event));
672 event.event = IW_CM_EVENT_CLOSE;
674 pr_debug("close complete delivered ep %p cm_id %p tid %d\n",
675 ep, ep->com.cm_id, ep->hwtid);
676 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
677 ep->com.cm_id->rem_ref(ep->com.cm_id);
678 ep->com.cm_id = NULL;
683 static void peer_close_upcall(struct iwch_ep *ep)
685 struct iw_cm_event event;
687 pr_debug("%s ep %p\n", __func__, ep);
688 memset(&event, 0, sizeof(event));
689 event.event = IW_CM_EVENT_DISCONNECT;
691 pr_debug("peer close delivered ep %p cm_id %p tid %d\n",
692 ep, ep->com.cm_id, ep->hwtid);
693 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
697 static void peer_abort_upcall(struct iwch_ep *ep)
699 struct iw_cm_event event;
701 pr_debug("%s ep %p\n", __func__, ep);
702 memset(&event, 0, sizeof(event));
703 event.event = IW_CM_EVENT_CLOSE;
704 event.status = -ECONNRESET;
706 pr_debug("abort delivered ep %p cm_id %p tid %d\n", ep,
707 ep->com.cm_id, ep->hwtid);
708 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
709 ep->com.cm_id->rem_ref(ep->com.cm_id);
710 ep->com.cm_id = NULL;
715 static void connect_reply_upcall(struct iwch_ep *ep, int status)
717 struct iw_cm_event event;
719 pr_debug("%s ep %p status %d\n", __func__, ep, status);
720 memset(&event, 0, sizeof(event));
721 event.event = IW_CM_EVENT_CONNECT_REPLY;
722 event.status = status;
723 memcpy(&event.local_addr, &ep->com.local_addr,
724 sizeof(ep->com.local_addr));
725 memcpy(&event.remote_addr, &ep->com.remote_addr,
726 sizeof(ep->com.remote_addr));
728 if ((status == 0) || (status == -ECONNREFUSED)) {
729 event.private_data_len = ep->plen;
730 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
733 pr_debug("%s ep %p tid %d status %d\n", __func__, ep,
735 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
738 ep->com.cm_id->rem_ref(ep->com.cm_id);
739 ep->com.cm_id = NULL;
744 static void connect_request_upcall(struct iwch_ep *ep)
746 struct iw_cm_event event;
748 pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
749 memset(&event, 0, sizeof(event));
750 event.event = IW_CM_EVENT_CONNECT_REQUEST;
751 memcpy(&event.local_addr, &ep->com.local_addr,
752 sizeof(ep->com.local_addr));
753 memcpy(&event.remote_addr, &ep->com.remote_addr,
754 sizeof(ep->com.local_addr));
755 event.private_data_len = ep->plen;
756 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
757 event.provider_data = ep;
759 * Until ird/ord negotiation via MPAv2 support is added, send max
762 event.ird = event.ord = 8;
763 if (state_read(&ep->parent_ep->com) != DEAD) {
765 ep->parent_ep->com.cm_id->event_handler(
766 ep->parent_ep->com.cm_id,
769 put_ep(&ep->parent_ep->com);
770 ep->parent_ep = NULL;
773 static void established_upcall(struct iwch_ep *ep)
775 struct iw_cm_event event;
777 pr_debug("%s ep %p\n", __func__, ep);
778 memset(&event, 0, sizeof(event));
779 event.event = IW_CM_EVENT_ESTABLISHED;
781 * Until ird/ord negotiation via MPAv2 support is added, send max
784 event.ird = event.ord = 8;
786 pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
787 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
791 static int update_rx_credits(struct iwch_ep *ep, u32 credits)
793 struct cpl_rx_data_ack *req;
796 pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
797 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
799 pr_err("update_rx_credits - cannot alloc skb!\n");
803 req = skb_put(skb, sizeof(*req));
804 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
805 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
806 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
807 skb->priority = CPL_PRIORITY_ACK;
808 iwch_cxgb3_ofld_send(ep->com.tdev, skb);
812 static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
814 struct mpa_message *mpa;
816 struct iwch_qp_attributes attrs;
817 enum iwch_qp_attr_mask mask;
820 pr_debug("%s ep %p\n", __func__, ep);
823 * Stop mpa timer. If it expired, then the state has
824 * changed and we bail since ep_timeout already aborted
828 if (state_read(&ep->com) != MPA_REQ_SENT)
832 * If we get more than the supported amount of private data
833 * then we must fail this connection.
835 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
841 * copy the new data into our accumulation buffer.
843 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
845 ep->mpa_pkt_len += skb->len;
848 * if we don't even have the mpa message, then bail.
850 if (ep->mpa_pkt_len < sizeof(*mpa))
852 mpa = (struct mpa_message *) ep->mpa_pkt;
854 /* Validate MPA header. */
855 if (mpa->revision != mpa_rev) {
859 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
864 plen = ntohs(mpa->private_data_size);
867 * Fail if there's too much private data.
869 if (plen > MPA_MAX_PRIVATE_DATA) {
875 * If plen does not account for pkt size
877 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
882 ep->plen = (u8) plen;
885 * If we don't have all the pdata yet, then bail.
886 * We'll continue process when more data arrives.
888 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
891 if (mpa->flags & MPA_REJECT) {
897 * If we get here we have accumulated the entire mpa
898 * start reply message including private data. And
899 * the MPA header is valid.
901 state_set(&ep->com, FPDU_MODE);
902 ep->mpa_attr.initiator = 1;
903 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
904 ep->mpa_attr.recv_marker_enabled = markers_enabled;
905 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
906 ep->mpa_attr.version = mpa_rev;
907 pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
909 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
910 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
912 attrs.mpa_attr = ep->mpa_attr;
913 attrs.max_ird = ep->ird;
914 attrs.max_ord = ep->ord;
915 attrs.llp_stream_handle = ep;
916 attrs.next_state = IWCH_QP_STATE_RTS;
918 mask = IWCH_QP_ATTR_NEXT_STATE |
919 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
920 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
922 /* bind QP and TID with INIT_WR */
923 err = iwch_modify_qp(ep->com.qp->rhp,
924 ep->com.qp, mask, &attrs, 1);
928 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
929 iwch_post_zb_read(ep);
934 abort_connection(ep, skb, GFP_KERNEL);
936 connect_reply_upcall(ep, err);
940 static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
942 struct mpa_message *mpa;
945 pr_debug("%s ep %p\n", __func__, ep);
948 * Stop mpa timer. If it expired, then the state has
949 * changed and we bail since ep_timeout already aborted
953 if (state_read(&ep->com) != MPA_REQ_WAIT)
957 * If we get more than the supported amount of private data
958 * then we must fail this connection.
960 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
961 abort_connection(ep, skb, GFP_KERNEL);
965 pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
968 * Copy the new data into our accumulation buffer.
970 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
972 ep->mpa_pkt_len += skb->len;
975 * If we don't even have the mpa message, then bail.
976 * We'll continue process when more data arrives.
978 if (ep->mpa_pkt_len < sizeof(*mpa))
980 pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
981 mpa = (struct mpa_message *) ep->mpa_pkt;
984 * Validate MPA Header.
986 if (mpa->revision != mpa_rev) {
987 abort_connection(ep, skb, GFP_KERNEL);
991 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
992 abort_connection(ep, skb, GFP_KERNEL);
996 plen = ntohs(mpa->private_data_size);
999 * Fail if there's too much private data.
1001 if (plen > MPA_MAX_PRIVATE_DATA) {
1002 abort_connection(ep, skb, GFP_KERNEL);
1007 * If plen does not account for pkt size
1009 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1010 abort_connection(ep, skb, GFP_KERNEL);
1013 ep->plen = (u8) plen;
1016 * If we don't have all the pdata yet, then bail.
1018 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1022 * If we get here we have accumulated the entire mpa
1023 * start reply message including private data.
1025 ep->mpa_attr.initiator = 0;
1026 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1027 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1028 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1029 ep->mpa_attr.version = mpa_rev;
1030 pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
1032 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1033 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1035 state_set(&ep->com, MPA_REQ_RCVD);
1038 connect_request_upcall(ep);
1042 static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1044 struct iwch_ep *ep = ctx;
1045 struct cpl_rx_data *hdr = cplhdr(skb);
1046 unsigned int dlen = ntohs(hdr->len);
1048 pr_debug("%s ep %p dlen %u\n", __func__, ep, dlen);
1050 skb_pull(skb, sizeof(*hdr));
1051 skb_trim(skb, dlen);
1053 ep->rcv_seq += dlen;
1054 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1056 switch (state_read(&ep->com)) {
1058 process_mpa_reply(ep, skb);
1061 process_mpa_request(ep, skb);
1066 pr_err("%s Unexpected streaming data. ep %p state %d tid %d\n",
1067 __func__, ep, state_read(&ep->com), ep->hwtid);
1070 * The ep will timeout and inform the ULP of the failure.
1076 /* update RX credits */
1077 update_rx_credits(ep, dlen);
1079 return CPL_RET_BUF_DONE;
1083 * Upcall from the adapter indicating data has been transmitted.
1084 * For us its just the single MPA request or reply. We can now free
1085 * the skb holding the mpa message.
1087 static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1089 struct iwch_ep *ep = ctx;
1090 struct cpl_wr_ack *hdr = cplhdr(skb);
1091 unsigned int credits = ntohs(hdr->credits);
1092 unsigned long flags;
1095 pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
1098 pr_debug("%s 0 credit ack ep %p state %u\n",
1099 __func__, ep, state_read(&ep->com));
1100 return CPL_RET_BUF_DONE;
1103 spin_lock_irqsave(&ep->com.lock, flags);
1104 BUG_ON(credits != 1);
1105 dst_confirm(ep->dst);
1107 pr_debug("%s rdma_init wr_ack ep %p state %u\n",
1108 __func__, ep, ep->com.state);
1109 if (ep->mpa_attr.initiator) {
1110 pr_debug("%s initiator ep %p state %u\n",
1111 __func__, ep, ep->com.state);
1112 if (peer2peer && ep->com.state == FPDU_MODE)
1115 pr_debug("%s responder ep %p state %u\n",
1116 __func__, ep, ep->com.state);
1117 if (ep->com.state == MPA_REQ_RCVD) {
1118 ep->com.rpl_done = 1;
1119 wake_up(&ep->com.waitq);
1123 pr_debug("%s lsm ack ep %p state %u freeing skb\n",
1124 __func__, ep, ep->com.state);
1125 kfree_skb(ep->mpa_skb);
1128 spin_unlock_irqrestore(&ep->com.lock, flags);
1130 iwch_post_zb_read(ep);
1131 return CPL_RET_BUF_DONE;
1134 static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1136 struct iwch_ep *ep = ctx;
1137 unsigned long flags;
1140 pr_debug("%s ep %p\n", __func__, ep);
1144 * We get 2 abort replies from the HW. The first one must
1145 * be ignored except for scribbling that we need one more.
1147 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
1148 return CPL_RET_BUF_DONE;
1151 spin_lock_irqsave(&ep->com.lock, flags);
1152 switch (ep->com.state) {
1154 close_complete_upcall(ep);
1155 __state_set(&ep->com, DEAD);
1159 pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
1162 spin_unlock_irqrestore(&ep->com.lock, flags);
1165 release_ep_resources(ep);
1166 return CPL_RET_BUF_DONE;
1170 * Return whether a failed active open has allocated a TID
1172 static inline int act_open_has_tid(int status)
1174 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1175 status != CPL_ERR_ARP_MISS;
1178 static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1180 struct iwch_ep *ep = ctx;
1181 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1183 pr_debug("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
1184 status2errno(rpl->status));
1185 connect_reply_upcall(ep, status2errno(rpl->status));
1186 state_set(&ep->com, DEAD);
1187 if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
1188 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1189 cxgb3_free_atid(ep->com.tdev, ep->atid);
1190 dst_release(ep->dst);
1191 l2t_release(ep->com.tdev, ep->l2t);
1193 return CPL_RET_BUF_DONE;
1196 static int listen_start(struct iwch_listen_ep *ep)
1198 struct sk_buff *skb;
1199 struct cpl_pass_open_req *req;
1201 pr_debug("%s ep %p\n", __func__, ep);
1202 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1204 pr_err("t3c_listen_start failed to alloc skb!\n");
1208 req = skb_put(skb, sizeof(*req));
1209 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1210 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
1211 req->local_port = ep->com.local_addr.sin_port;
1212 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
1215 req->peer_netmask = 0;
1216 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
1217 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
1218 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1221 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1224 static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1226 struct iwch_listen_ep *ep = ctx;
1227 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1229 pr_debug("%s ep %p status %d error %d\n", __func__, ep,
1230 rpl->status, status2errno(rpl->status));
1231 ep->com.rpl_err = status2errno(rpl->status);
1232 ep->com.rpl_done = 1;
1233 wake_up(&ep->com.waitq);
1235 return CPL_RET_BUF_DONE;
1238 static int listen_stop(struct iwch_listen_ep *ep)
1240 struct sk_buff *skb;
1241 struct cpl_close_listserv_req *req;
1243 pr_debug("%s ep %p\n", __func__, ep);
1244 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1246 pr_err("%s - failed to alloc skb\n", __func__);
1249 req = skb_put(skb, sizeof(*req));
1250 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1252 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1254 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1257 static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1260 struct iwch_listen_ep *ep = ctx;
1261 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1263 pr_debug("%s ep %p\n", __func__, ep);
1264 ep->com.rpl_err = status2errno(rpl->status);
1265 ep->com.rpl_done = 1;
1266 wake_up(&ep->com.waitq);
1267 return CPL_RET_BUF_DONE;
1270 static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1272 struct cpl_pass_accept_rpl *rpl;
1273 unsigned int mtu_idx;
1274 u32 opt0h, opt0l, opt2;
1277 pr_debug("%s ep %p\n", __func__, ep);
1278 BUG_ON(skb_cloned(skb));
1279 skb_trim(skb, sizeof(*rpl));
1281 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
1282 wscale = compute_wscale(rcv_win);
1283 opt0h = V_NAGLE(0) |
1287 V_WND_SCALE(wscale) |
1288 V_MSS_IDX(mtu_idx) |
1289 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1290 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1291 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1292 V_CONG_CONTROL_FLAVOR(cong_flavor);
1295 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1296 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
1297 rpl->peer_ip = peer_ip;
1298 rpl->opt0h = htonl(opt0h);
1299 rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
1300 rpl->opt2 = htonl(opt2);
1301 rpl->rsvd = rpl->opt2; /* workaround for HW bug */
1302 skb->priority = CPL_PRIORITY_SETUP;
1303 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
1308 static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1309 struct sk_buff *skb)
1311 pr_debug("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
1313 BUG_ON(skb_cloned(skb));
1314 skb_trim(skb, sizeof(struct cpl_tid_release));
1317 if (tdev->type != T3A)
1318 release_tid(tdev, hwtid, skb);
1320 struct cpl_pass_accept_rpl *rpl;
1323 skb->priority = CPL_PRIORITY_SETUP;
1324 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1325 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1327 rpl->peer_ip = peer_ip;
1328 rpl->opt0h = htonl(F_TCAM_BYPASS);
1329 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1331 rpl->rsvd = rpl->opt2;
1332 iwch_cxgb3_ofld_send(tdev, skb);
1336 static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1338 struct iwch_ep *child_ep, *parent_ep = ctx;
1339 struct cpl_pass_accept_req *req = cplhdr(skb);
1340 unsigned int hwtid = GET_TID(req);
1341 struct dst_entry *dst;
1342 struct l2t_entry *l2t;
1346 pr_debug("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1348 if (state_read(&parent_ep->com) != LISTEN) {
1349 pr_err("%s - listening ep not in LISTEN\n", __func__);
1354 * Find the netdev for this connection request.
1356 tim.mac_addr = req->dst_mac;
1357 tim.vlan_tag = ntohs(req->vlan_tag);
1358 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1359 pr_err("%s bad dst mac %pM\n", __func__, req->dst_mac);
1363 /* Find output route */
1364 rt = find_route(tdev,
1368 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1370 pr_err("%s - failed to find dst entry!\n", __func__);
1374 l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
1376 pr_err("%s - failed to allocate l2t entry!\n", __func__);
1380 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1382 pr_err("%s - failed to allocate ep entry!\n", __func__);
1383 l2t_release(tdev, l2t);
1387 state_set(&child_ep->com, CONNECTING);
1388 child_ep->com.tdev = tdev;
1389 child_ep->com.cm_id = NULL;
1390 child_ep->com.local_addr.sin_family = AF_INET;
1391 child_ep->com.local_addr.sin_port = req->local_port;
1392 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
1393 child_ep->com.remote_addr.sin_family = AF_INET;
1394 child_ep->com.remote_addr.sin_port = req->peer_port;
1395 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
1396 get_ep(&parent_ep->com);
1397 child_ep->parent_ep = parent_ep;
1398 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
1399 child_ep->l2t = l2t;
1400 child_ep->dst = dst;
1401 child_ep->hwtid = hwtid;
1402 init_timer(&child_ep->timer);
1403 cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
1404 accept_cr(child_ep, req->peer_ip, skb);
1407 reject_cr(tdev, hwtid, req->peer_ip, skb);
1409 return CPL_RET_BUF_DONE;
1412 static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1414 struct iwch_ep *ep = ctx;
1415 struct cpl_pass_establish *req = cplhdr(skb);
1417 pr_debug("%s ep %p\n", __func__, ep);
1418 ep->snd_seq = ntohl(req->snd_isn);
1419 ep->rcv_seq = ntohl(req->rcv_isn);
1421 set_emss(ep, ntohs(req->tcp_opt));
1423 dst_confirm(ep->dst);
1424 state_set(&ep->com, MPA_REQ_WAIT);
1427 return CPL_RET_BUF_DONE;
1430 static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1432 struct iwch_ep *ep = ctx;
1433 struct iwch_qp_attributes attrs;
1434 unsigned long flags;
1438 pr_debug("%s ep %p\n", __func__, ep);
1439 dst_confirm(ep->dst);
1441 spin_lock_irqsave(&ep->com.lock, flags);
1442 switch (ep->com.state) {
1444 __state_set(&ep->com, CLOSING);
1447 __state_set(&ep->com, CLOSING);
1448 connect_reply_upcall(ep, -ECONNRESET);
1453 * We're gonna mark this puppy DEAD, but keep
1454 * the reference on it until the ULP accepts or
1455 * rejects the CR. Also wake up anyone waiting
1456 * in rdma connection migration (see iwch_accept_cr()).
1458 __state_set(&ep->com, CLOSING);
1459 ep->com.rpl_done = 1;
1460 ep->com.rpl_err = -ECONNRESET;
1461 pr_debug("waking up ep %p\n", ep);
1462 wake_up(&ep->com.waitq);
1465 __state_set(&ep->com, CLOSING);
1466 ep->com.rpl_done = 1;
1467 ep->com.rpl_err = -ECONNRESET;
1468 pr_debug("waking up ep %p\n", ep);
1469 wake_up(&ep->com.waitq);
1473 __state_set(&ep->com, CLOSING);
1474 attrs.next_state = IWCH_QP_STATE_CLOSING;
1475 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1476 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1477 peer_close_upcall(ep);
1483 __state_set(&ep->com, MORIBUND);
1488 if (ep->com.cm_id && ep->com.qp) {
1489 attrs.next_state = IWCH_QP_STATE_IDLE;
1490 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1491 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1493 close_complete_upcall(ep);
1494 __state_set(&ep->com, DEAD);
1504 spin_unlock_irqrestore(&ep->com.lock, flags);
1506 iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1508 release_ep_resources(ep);
1509 return CPL_RET_BUF_DONE;
1513 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1515 static int is_neg_adv_abort(unsigned int status)
1517 return status == CPL_ERR_RTX_NEG_ADVICE ||
1518 status == CPL_ERR_PERSIST_NEG_ADVICE;
1521 static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1523 struct cpl_abort_req_rss *req = cplhdr(skb);
1524 struct iwch_ep *ep = ctx;
1525 struct cpl_abort_rpl *rpl;
1526 struct sk_buff *rpl_skb;
1527 struct iwch_qp_attributes attrs;
1530 unsigned long flags;
1532 if (is_neg_adv_abort(req->status)) {
1533 pr_debug("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1535 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1536 return CPL_RET_BUF_DONE;
1540 * We get 2 peer aborts from the HW. The first one must
1541 * be ignored except for scribbling that we need one more.
1543 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
1544 return CPL_RET_BUF_DONE;
1547 spin_lock_irqsave(&ep->com.lock, flags);
1548 pr_debug("%s ep %p state %u\n", __func__, ep, ep->com.state);
1549 switch (ep->com.state) {
1557 connect_reply_upcall(ep, -ECONNRESET);
1560 ep->com.rpl_done = 1;
1561 ep->com.rpl_err = -ECONNRESET;
1562 pr_debug("waking up ep %p\n", ep);
1563 wake_up(&ep->com.waitq);
1568 * We're gonna mark this puppy DEAD, but keep
1569 * the reference on it until the ULP accepts or
1570 * rejects the CR. Also wake up anyone waiting
1571 * in rdma connection migration (see iwch_accept_cr()).
1573 ep->com.rpl_done = 1;
1574 ep->com.rpl_err = -ECONNRESET;
1575 pr_debug("waking up ep %p\n", ep);
1576 wake_up(&ep->com.waitq);
1583 if (ep->com.cm_id && ep->com.qp) {
1584 attrs.next_state = IWCH_QP_STATE_ERROR;
1585 ret = iwch_modify_qp(ep->com.qp->rhp,
1586 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1589 pr_err("%s - qp <- error failed!\n", __func__);
1591 peer_abort_upcall(ep);
1596 pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1597 spin_unlock_irqrestore(&ep->com.lock, flags);
1598 return CPL_RET_BUF_DONE;
1603 dst_confirm(ep->dst);
1604 if (ep->com.state != ABORTING) {
1605 __state_set(&ep->com, DEAD);
1608 spin_unlock_irqrestore(&ep->com.lock, flags);
1610 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1612 pr_err("%s - cannot allocate skb!\n", __func__);
1616 rpl_skb->priority = CPL_PRIORITY_DATA;
1617 rpl = skb_put(rpl_skb, sizeof(*rpl));
1618 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
1619 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1620 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1621 rpl->cmd = CPL_ABORT_NO_RST;
1622 iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
1625 release_ep_resources(ep);
1626 return CPL_RET_BUF_DONE;
1629 static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1631 struct iwch_ep *ep = ctx;
1632 struct iwch_qp_attributes attrs;
1633 unsigned long flags;
1636 pr_debug("%s ep %p\n", __func__, ep);
1639 /* The cm_id may be null if we failed to connect */
1640 spin_lock_irqsave(&ep->com.lock, flags);
1641 switch (ep->com.state) {
1643 __state_set(&ep->com, MORIBUND);
1647 if ((ep->com.cm_id) && (ep->com.qp)) {
1648 attrs.next_state = IWCH_QP_STATE_IDLE;
1649 iwch_modify_qp(ep->com.qp->rhp,
1651 IWCH_QP_ATTR_NEXT_STATE,
1654 close_complete_upcall(ep);
1655 __state_set(&ep->com, DEAD);
1665 spin_unlock_irqrestore(&ep->com.lock, flags);
1667 release_ep_resources(ep);
1668 return CPL_RET_BUF_DONE;
1672 * T3A does 3 things when a TERM is received:
1673 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1674 * 2) generate an async event on the QP with the TERMINATE opcode
1675 * 3) post a TERMINATE opcode cqe into the associated CQ.
1677 * For (1), we save the message in the qp for later consumer consumption.
1678 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1679 * For (3), we toss the CQE in cxio_poll_cq().
1681 * terminate() handles case (1)...
1683 static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1685 struct iwch_ep *ep = ctx;
1687 if (state_read(&ep->com) != FPDU_MODE)
1688 return CPL_RET_BUF_DONE;
1690 pr_debug("%s ep %p\n", __func__, ep);
1691 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1692 pr_debug("%s saving %d bytes of term msg\n", __func__, skb->len);
1693 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1695 ep->com.qp->attr.terminate_msg_len = skb->len;
1696 ep->com.qp->attr.is_terminate_local = 0;
1697 return CPL_RET_BUF_DONE;
1700 static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1702 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1703 struct iwch_ep *ep = ctx;
1705 pr_debug("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
1708 struct iwch_qp_attributes attrs;
1710 pr_err("%s BAD CLOSE - Aborting tid %u\n",
1711 __func__, ep->hwtid);
1713 attrs.next_state = IWCH_QP_STATE_ERROR;
1714 iwch_modify_qp(ep->com.qp->rhp,
1715 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1717 abort_connection(ep, NULL, GFP_KERNEL);
1719 return CPL_RET_BUF_DONE;
1722 static void ep_timeout(unsigned long arg)
1724 struct iwch_ep *ep = (struct iwch_ep *)arg;
1725 struct iwch_qp_attributes attrs;
1726 unsigned long flags;
1729 spin_lock_irqsave(&ep->com.lock, flags);
1730 pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1732 switch (ep->com.state) {
1734 __state_set(&ep->com, ABORTING);
1735 connect_reply_upcall(ep, -ETIMEDOUT);
1738 __state_set(&ep->com, ABORTING);
1742 if (ep->com.cm_id && ep->com.qp) {
1743 attrs.next_state = IWCH_QP_STATE_ERROR;
1744 iwch_modify_qp(ep->com.qp->rhp,
1745 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1748 __state_set(&ep->com, ABORTING);
1751 WARN(1, "%s unexpected state ep %p state %u\n",
1752 __func__, ep, ep->com.state);
1755 spin_unlock_irqrestore(&ep->com.lock, flags);
1757 abort_connection(ep, NULL, GFP_ATOMIC);
1761 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1764 struct iwch_ep *ep = to_ep(cm_id);
1765 pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1767 if (state_read(&ep->com) == DEAD) {
1771 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1773 abort_connection(ep, NULL, GFP_KERNEL);
1775 err = send_mpa_reject(ep, pdata, pdata_len);
1776 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1782 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1785 struct iwch_qp_attributes attrs;
1786 enum iwch_qp_attr_mask mask;
1787 struct iwch_ep *ep = to_ep(cm_id);
1788 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1789 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1791 pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1792 if (state_read(&ep->com) == DEAD) {
1797 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1800 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1801 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1802 abort_connection(ep, NULL, GFP_KERNEL);
1807 cm_id->add_ref(cm_id);
1808 ep->com.cm_id = cm_id;
1811 ep->ird = conn_param->ird;
1812 ep->ord = conn_param->ord;
1814 if (peer2peer && ep->ird == 0)
1817 pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1819 /* bind QP to EP and move to RTS */
1820 attrs.mpa_attr = ep->mpa_attr;
1821 attrs.max_ird = ep->ird;
1822 attrs.max_ord = ep->ord;
1823 attrs.llp_stream_handle = ep;
1824 attrs.next_state = IWCH_QP_STATE_RTS;
1826 /* bind QP and TID with INIT_WR */
1827 mask = IWCH_QP_ATTR_NEXT_STATE |
1828 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1829 IWCH_QP_ATTR_MPA_ATTR |
1830 IWCH_QP_ATTR_MAX_IRD |
1831 IWCH_QP_ATTR_MAX_ORD;
1833 err = iwch_modify_qp(ep->com.qp->rhp,
1834 ep->com.qp, mask, &attrs, 1);
1838 /* if needed, wait for wr_ack */
1839 if (iwch_rqes_posted(qp)) {
1840 wait_event(ep->com.waitq, ep->com.rpl_done);
1841 err = ep->com.rpl_err;
1846 err = send_mpa_reply(ep, conn_param->private_data,
1847 conn_param->private_data_len);
1852 state_set(&ep->com, FPDU_MODE);
1853 established_upcall(ep);
1857 ep->com.cm_id = NULL;
1859 cm_id->rem_ref(cm_id);
1865 static int is_loopback_dst(struct iw_cm_id *cm_id)
1867 struct net_device *dev;
1868 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
1870 dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
1877 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1879 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1883 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
1884 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
1886 if (cm_id->m_remote_addr.ss_family != PF_INET) {
1891 if (is_loopback_dst(cm_id)) {
1896 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1898 pr_err("%s - cannot alloc ep\n", __func__);
1902 init_timer(&ep->timer);
1903 ep->plen = conn_param->private_data_len;
1905 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1906 conn_param->private_data, ep->plen);
1907 ep->ird = conn_param->ird;
1908 ep->ord = conn_param->ord;
1910 if (peer2peer && ep->ord == 0)
1913 ep->com.tdev = h->rdev.t3cdev_p;
1915 cm_id->add_ref(cm_id);
1916 ep->com.cm_id = cm_id;
1917 ep->com.qp = get_qhp(h, conn_param->qpn);
1918 BUG_ON(!ep->com.qp);
1919 pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1923 * Allocate an active TID to initiate a TCP connection.
1925 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1926 if (ep->atid == -1) {
1927 pr_err("%s - cannot alloc atid\n", __func__);
1933 rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
1934 raddr->sin_addr.s_addr, laddr->sin_port,
1935 raddr->sin_port, IPTOS_LOWDELAY);
1937 pr_err("%s - cannot find route\n", __func__);
1938 err = -EHOSTUNREACH;
1942 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
1943 &raddr->sin_addr.s_addr);
1945 pr_err("%s - cannot alloc l2e\n", __func__);
1950 state_set(&ep->com, CONNECTING);
1951 ep->tos = IPTOS_LOWDELAY;
1952 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
1953 sizeof(ep->com.local_addr));
1954 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
1955 sizeof(ep->com.remote_addr));
1957 /* send connect request to rnic */
1958 err = send_connect(ep);
1962 l2t_release(h->rdev.t3cdev_p, ep->l2t);
1964 dst_release(ep->dst);
1966 cxgb3_free_atid(ep->com.tdev, ep->atid);
1968 cm_id->rem_ref(cm_id);
1974 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1977 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1978 struct iwch_listen_ep *ep;
1983 if (cm_id->m_local_addr.ss_family != PF_INET) {
1988 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1990 pr_err("%s - cannot alloc ep\n", __func__);
1994 pr_debug("%s ep %p\n", __func__, ep);
1995 ep->com.tdev = h->rdev.t3cdev_p;
1996 cm_id->add_ref(cm_id);
1997 ep->com.cm_id = cm_id;
1998 ep->backlog = backlog;
1999 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
2000 sizeof(ep->com.local_addr));
2003 * Allocate a server TID.
2005 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
2006 if (ep->stid == -1) {
2007 pr_err("%s - cannot alloc atid\n", __func__);
2012 state_set(&ep->com, LISTEN);
2013 err = listen_start(ep);
2017 /* wait for pass_open_rpl */
2018 wait_event(ep->com.waitq, ep->com.rpl_done);
2019 err = ep->com.rpl_err;
2021 cm_id->provider_data = ep;
2025 cxgb3_free_stid(ep->com.tdev, ep->stid);
2027 cm_id->rem_ref(cm_id);
2034 int iwch_destroy_listen(struct iw_cm_id *cm_id)
2037 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
2039 pr_debug("%s ep %p\n", __func__, ep);
2042 state_set(&ep->com, DEAD);
2043 ep->com.rpl_done = 0;
2044 ep->com.rpl_err = 0;
2045 err = listen_stop(ep);
2048 wait_event(ep->com.waitq, ep->com.rpl_done);
2049 cxgb3_free_stid(ep->com.tdev, ep->stid);
2051 err = ep->com.rpl_err;
2052 cm_id->rem_ref(cm_id);
2057 int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2060 unsigned long flags;
2063 struct t3cdev *tdev;
2064 struct cxio_rdev *rdev;
2066 spin_lock_irqsave(&ep->com.lock, flags);
2068 pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
2069 states[ep->com.state], abrupt);
2071 tdev = (struct t3cdev *)ep->com.tdev;
2072 rdev = (struct cxio_rdev *)tdev->ulp;
2073 if (cxio_fatal_error(rdev)) {
2075 close_complete_upcall(ep);
2076 ep->com.state = DEAD;
2078 switch (ep->com.state) {
2086 ep->com.state = ABORTING;
2088 ep->com.state = CLOSING;
2091 set_bit(CLOSE_SENT, &ep->com.flags);
2094 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2098 ep->com.state = ABORTING;
2100 ep->com.state = MORIBUND;
2106 pr_debug("%s ignoring disconnect ep %p state %u\n",
2107 __func__, ep, ep->com.state);
2114 spin_unlock_irqrestore(&ep->com.lock, flags);
2117 ret = send_abort(ep, NULL, gfp);
2119 ret = send_halfclose(ep, gfp);
2124 release_ep_resources(ep);
2128 int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2129 struct l2t_entry *l2t)
2131 struct iwch_ep *ep = ctx;
2136 pr_debug("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2139 l2t_release(ep->com.tdev, ep->l2t);
2147 * All the CM events are handled on a work queue to have a safe context.
2148 * These are the real handlers that are called from the work queue.
2150 static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
2151 [CPL_ACT_ESTABLISH] = act_establish,
2152 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2153 [CPL_RX_DATA] = rx_data,
2154 [CPL_TX_DMA_ACK] = tx_ack,
2155 [CPL_ABORT_RPL_RSS] = abort_rpl,
2156 [CPL_ABORT_RPL] = abort_rpl,
2157 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2158 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2159 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2160 [CPL_PASS_ESTABLISH] = pass_establish,
2161 [CPL_PEER_CLOSE] = peer_close,
2162 [CPL_ABORT_REQ_RSS] = peer_abort,
2163 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2164 [CPL_RDMA_TERMINATE] = terminate,
2165 [CPL_RDMA_EC_STATUS] = ec_status,
2168 static void process_work(struct work_struct *work)
2170 struct sk_buff *skb = NULL;
2172 struct t3cdev *tdev;
2175 while ((skb = skb_dequeue(&rxq))) {
2176 ep = *((void **) (skb->cb));
2177 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
2178 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
2179 if (ret & CPL_RET_BUF_DONE)
2183 * ep was referenced in sched(), and is freed here.
2185 put_ep((struct iwch_ep_common *)ep);
2189 static DECLARE_WORK(skb_work, process_work);
2191 static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2193 struct iwch_ep_common *epc = ctx;
2198 * Save ctx and tdev in the skb->cb area.
2200 *((void **) skb->cb) = ctx;
2201 *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
2204 * Queue the skb and schedule the worker thread.
2206 skb_queue_tail(&rxq, skb);
2207 queue_work(workq, &skb_work);
2211 static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2213 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2215 if (rpl->status != CPL_ERR_NONE) {
2216 pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
2217 rpl->status, GET_TID(rpl));
2219 return CPL_RET_BUF_DONE;
2223 * All upcalls from the T3 Core go to sched() to schedule the
2224 * processing on a work queue.
2226 cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
2227 [CPL_ACT_ESTABLISH] = sched,
2228 [CPL_ACT_OPEN_RPL] = sched,
2229 [CPL_RX_DATA] = sched,
2230 [CPL_TX_DMA_ACK] = sched,
2231 [CPL_ABORT_RPL_RSS] = sched,
2232 [CPL_ABORT_RPL] = sched,
2233 [CPL_PASS_OPEN_RPL] = sched,
2234 [CPL_CLOSE_LISTSRV_RPL] = sched,
2235 [CPL_PASS_ACCEPT_REQ] = sched,
2236 [CPL_PASS_ESTABLISH] = sched,
2237 [CPL_PEER_CLOSE] = sched,
2238 [CPL_CLOSE_CON_RPL] = sched,
2239 [CPL_ABORT_REQ_RSS] = sched,
2240 [CPL_RDMA_TERMINATE] = sched,
2241 [CPL_RDMA_EC_STATUS] = sched,
2242 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2245 int __init iwch_cm_init(void)
2247 skb_queue_head_init(&rxq);
2249 workq = alloc_ordered_workqueue("iw_cxgb3", WQ_MEM_RECLAIM);
2256 void __exit iwch_cm_term(void)
2258 flush_workqueue(workq);
2259 destroy_workqueue(workq);