2 * Copyright (c) 2016 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/workqueue.h>
12 #include <linux/skbuff.h>
13 #include <linux/timer.h>
14 #include <linux/notifier.h>
15 #include <linux/inetdevice.h>
17 #include <linux/tcp.h>
18 #include <linux/if_vlan.h>
20 #include <net/neighbour.h>
21 #include <net/netevent.h>
22 #include <net/route.h>
24 #include <net/ip6_route.h>
25 #include <net/addrconf.h>
27 #include <libcxgb_cm.h>
31 static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
34 reinit_completion(&wr_waitp->completion);
38 cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
40 if (ret == CPL_ERR_NONE)
46 pr_err("%s: err:%u", func, ret);
48 complete(&wr_waitp->completion);
52 cxgbit_wait_for_reply(struct cxgbit_device *cdev,
53 struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
58 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
63 ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
65 pr_info("%s - Device %s not responding tid %u\n",
66 func, pci_name(cdev->lldi.pdev), tid);
67 wr_waitp->ret = -ETIMEDOUT;
71 pr_info("%s: FW reply %d tid %u\n",
72 pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
76 static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
78 return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
81 static struct np_info *
82 cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
85 struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
88 int bucket = cxgbit_np_hashfn(cnp);
92 spin_lock(&cdev->np_lock);
93 p->next = cdev->np_hash_tab[bucket];
94 cdev->np_hash_tab[bucket] = p;
95 spin_unlock(&cdev->np_lock);
102 cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
104 int stid = -1, bucket = cxgbit_np_hashfn(cnp);
107 spin_lock(&cdev->np_lock);
108 for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
114 spin_unlock(&cdev->np_lock);
119 static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
121 int stid = -1, bucket = cxgbit_np_hashfn(cnp);
122 struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
124 spin_lock(&cdev->np_lock);
125 for (p = *prev; p; prev = &p->next, p = p->next) {
133 spin_unlock(&cdev->np_lock);
138 void _cxgbit_free_cnp(struct kref *kref)
140 struct cxgbit_np *cnp;
142 cnp = container_of(kref, struct cxgbit_np, kref);
147 cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
148 struct cxgbit_np *cnp)
150 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
151 &cnp->com.local_addr;
155 pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
156 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
158 addr_type = ipv6_addr_type((const struct in6_addr *)
160 if (addr_type != IPV6_ADDR_ANY) {
161 ret = cxgb4_clip_get(cdev->lldi.ports[0],
162 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
164 pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
165 sin6->sin6_addr.s6_addr, ret);
171 cxgbit_init_wr_wait(&cnp->com.wr_wait);
173 ret = cxgb4_create_server6(cdev->lldi.ports[0],
174 stid, &sin6->sin6_addr,
176 cdev->lldi.rxq_ids[0]);
178 ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
181 ret = net_xmit_errno(ret);
186 if (ret != -ETIMEDOUT)
187 cxgb4_clip_release(cdev->lldi.ports[0],
188 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
190 pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
191 ret, stid, sin6->sin6_addr.s6_addr,
192 ntohs(sin6->sin6_port));
199 cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
200 struct cxgbit_np *cnp)
202 struct sockaddr_in *sin = (struct sockaddr_in *)
203 &cnp->com.local_addr;
206 pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
207 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
210 cxgbit_init_wr_wait(&cnp->com.wr_wait);
212 ret = cxgb4_create_server(cdev->lldi.ports[0],
213 stid, sin->sin_addr.s_addr,
215 cdev->lldi.rxq_ids[0]);
217 ret = cxgbit_wait_for_reply(cdev,
221 ret = net_xmit_errno(ret);
226 pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
227 ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
231 struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
233 struct cxgbit_device *cdev;
236 list_for_each_entry(cdev, &cdev_list_head, list) {
237 struct cxgb4_lld_info *lldi = &cdev->lldi;
239 for (i = 0; i < lldi->nports; i++) {
240 if (lldi->ports[i] == ndev) {
251 static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
253 if (ndev->priv_flags & IFF_BONDING) {
254 pr_err("Bond devices are not supported. Interface:%s\n",
259 if (is_vlan_dev(ndev))
260 return vlan_dev_real_dev(ndev);
265 static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
267 struct net_device *ndev;
269 ndev = __ip_dev_find(&init_net, saddr, false);
273 return cxgbit_get_real_dev(ndev);
276 static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
278 struct net_device *ndev = NULL;
281 if (IS_ENABLED(CONFIG_IPV6)) {
282 for_each_netdev_rcu(&init_net, ndev)
283 if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
290 return cxgbit_get_real_dev(ndev);
293 static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
295 struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
296 int ss_family = sockaddr->ss_family;
297 struct net_device *ndev = NULL;
298 struct cxgbit_device *cdev = NULL;
301 if (ss_family == AF_INET) {
302 struct sockaddr_in *sin;
304 sin = (struct sockaddr_in *)sockaddr;
305 ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
306 } else if (ss_family == AF_INET6) {
307 struct sockaddr_in6 *sin6;
309 sin6 = (struct sockaddr_in6 *)sockaddr;
310 ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
315 cdev = cxgbit_find_device(ndev, NULL);
321 static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
323 struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
324 int ss_family = sockaddr->ss_family;
327 if (ss_family == AF_INET) {
328 struct sockaddr_in *sin;
330 sin = (struct sockaddr_in *)sockaddr;
331 if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
333 } else if (ss_family == AF_INET6) {
334 struct sockaddr_in6 *sin6;
336 sin6 = (struct sockaddr_in6 *)sockaddr;
337 addr_type = ipv6_addr_type((const struct in6_addr *)
339 if (addr_type == IPV6_ADDR_ANY)
346 __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
349 int ss_family = cnp->com.local_addr.ss_family;
351 if (!test_bit(CDEV_STATE_UP, &cdev->flags))
354 stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
358 if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
359 cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
363 if (ss_family == AF_INET)
364 ret = cxgbit_create_server4(cdev, stid, cnp);
366 ret = cxgbit_create_server6(cdev, stid, cnp);
369 if (ret != -ETIMEDOUT)
370 cxgb4_free_stid(cdev->lldi.tids, stid,
372 cxgbit_np_hash_del(cdev, cnp);
378 static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
380 struct cxgbit_device *cdev;
383 mutex_lock(&cdev_list_lock);
384 cdev = cxgbit_find_np_cdev(cnp);
388 if (cxgbit_np_hash_find(cdev, cnp) >= 0)
391 if (__cxgbit_setup_cdev_np(cdev, cnp))
394 cnp->com.cdev = cdev;
397 mutex_unlock(&cdev_list_lock);
401 static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
403 struct cxgbit_device *cdev;
407 mutex_lock(&cdev_list_lock);
408 list_for_each_entry(cdev, &cdev_list_head, list) {
409 if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
410 mutex_unlock(&cdev_list_lock);
415 list_for_each_entry(cdev, &cdev_list_head, list) {
416 ret = __cxgbit_setup_cdev_np(cdev, cnp);
417 if (ret == -ETIMEDOUT)
423 mutex_unlock(&cdev_list_lock);
425 return count ? 0 : -1;
428 int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
430 struct cxgbit_np *cnp;
433 if ((ksockaddr->ss_family != AF_INET) &&
434 (ksockaddr->ss_family != AF_INET6))
437 cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
441 init_waitqueue_head(&cnp->accept_wait);
442 init_completion(&cnp->com.wr_wait.completion);
443 init_completion(&cnp->accept_comp);
444 INIT_LIST_HEAD(&cnp->np_accept_list);
445 spin_lock_init(&cnp->np_accept_lock);
446 kref_init(&cnp->kref);
447 memcpy(&np->np_sockaddr, ksockaddr,
448 sizeof(struct sockaddr_storage));
449 memcpy(&cnp->com.local_addr, &np->np_sockaddr,
450 sizeof(cnp->com.local_addr));
453 cnp->com.cdev = NULL;
455 if (cxgbit_inaddr_any(cnp))
456 ret = cxgbit_setup_all_np(cnp);
458 ret = cxgbit_setup_cdev_np(cnp);
465 np->np_context = cnp;
466 cnp->com.state = CSK_STATE_LISTEN;
471 cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
472 struct cxgbit_sock *csk)
474 conn->login_family = np->np_sockaddr.ss_family;
475 conn->login_sockaddr = csk->com.remote_addr;
476 conn->local_sockaddr = csk->com.local_addr;
479 int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
481 struct cxgbit_np *cnp = np->np_context;
482 struct cxgbit_sock *csk;
486 ret = wait_for_completion_interruptible(&cnp->accept_comp);
490 spin_lock_bh(&np->np_thread_lock);
491 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
492 spin_unlock_bh(&np->np_thread_lock);
494 * No point in stalling here when np_thread
495 * is in state RESET/SHUTDOWN/EXIT - bail
499 spin_unlock_bh(&np->np_thread_lock);
501 spin_lock_bh(&cnp->np_accept_lock);
502 if (list_empty(&cnp->np_accept_list)) {
503 spin_unlock_bh(&cnp->np_accept_lock);
507 csk = list_first_entry(&cnp->np_accept_list,
511 list_del_init(&csk->accept_node);
512 spin_unlock_bh(&cnp->np_accept_lock);
516 cxgbit_set_conn_info(np, conn, csk);
521 __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
526 stid = cxgbit_np_hash_del(cdev, cnp);
529 if (!test_bit(CDEV_STATE_UP, &cdev->flags))
532 if (cnp->np->np_sockaddr.ss_family == AF_INET6)
536 cxgbit_init_wr_wait(&cnp->com.wr_wait);
537 ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
538 cdev->lldi.rxq_ids[0], ipv6);
541 ret = net_xmit_errno(ret);
548 ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
550 if (ret == -ETIMEDOUT)
553 if (ipv6 && cnp->com.cdev) {
554 struct sockaddr_in6 *sin6;
556 sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
557 cxgb4_clip_release(cdev->lldi.ports[0],
558 (const u32 *)&sin6->sin6_addr.s6_addr,
562 cxgb4_free_stid(cdev->lldi.tids, stid,
563 cnp->com.local_addr.ss_family);
567 static void cxgbit_free_all_np(struct cxgbit_np *cnp)
569 struct cxgbit_device *cdev;
572 mutex_lock(&cdev_list_lock);
573 list_for_each_entry(cdev, &cdev_list_head, list) {
574 ret = __cxgbit_free_cdev_np(cdev, cnp);
575 if (ret == -ETIMEDOUT)
578 mutex_unlock(&cdev_list_lock);
581 static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
583 struct cxgbit_device *cdev;
586 mutex_lock(&cdev_list_lock);
587 list_for_each_entry(cdev, &cdev_list_head, list) {
588 if (cdev == cnp->com.cdev) {
596 __cxgbit_free_cdev_np(cdev, cnp);
598 mutex_unlock(&cdev_list_lock);
601 void cxgbit_free_np(struct iscsi_np *np)
603 struct cxgbit_np *cnp = np->np_context;
605 cnp->com.state = CSK_STATE_DEAD;
607 cxgbit_free_cdev_np(cnp);
609 cxgbit_free_all_np(cnp);
611 np->np_context = NULL;
615 static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
618 u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
620 skb = alloc_skb(len, GFP_ATOMIC);
624 cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
627 cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
628 __skb_queue_tail(&csk->txq, skb);
629 cxgbit_push_tx_frames(csk);
632 static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
634 struct cxgbit_sock *csk = handle;
636 pr_debug("%s cxgbit_device %p\n", __func__, handle);
641 static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
643 struct cxgbit_device *cdev = handle;
644 struct cpl_abort_req *req = cplhdr(skb);
646 pr_debug("%s cdev %p\n", __func__, cdev);
647 req->cmd = CPL_ABORT_NO_RST;
648 cxgbit_ofld_send(cdev, skb);
651 static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
654 u32 len = roundup(sizeof(struct cpl_abort_req), 16);
656 pr_debug("%s: csk %p tid %u; state %d\n",
657 __func__, csk, csk->tid, csk->com.state);
659 __skb_queue_purge(&csk->txq);
661 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
662 cxgbit_send_tx_flowc_wr(csk);
664 skb = __skb_dequeue(&csk->skbq);
665 cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
666 csk->com.cdev, cxgbit_abort_arp_failure);
668 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
671 void cxgbit_free_conn(struct iscsi_conn *conn)
673 struct cxgbit_sock *csk = conn->context;
674 bool release = false;
676 pr_debug("%s: state %d\n",
677 __func__, csk->com.state);
679 spin_lock_bh(&csk->lock);
680 switch (csk->com.state) {
681 case CSK_STATE_ESTABLISHED:
682 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
683 csk->com.state = CSK_STATE_CLOSING;
684 cxgbit_send_halfclose(csk);
686 csk->com.state = CSK_STATE_ABORTING;
687 cxgbit_send_abort_req(csk);
690 case CSK_STATE_CLOSING:
691 csk->com.state = CSK_STATE_MORIBUND;
692 cxgbit_send_halfclose(csk);
698 pr_err("%s: csk %p; state %d\n",
699 __func__, csk, csk->com.state);
701 spin_unlock_bh(&csk->lock);
707 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
709 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
710 ((csk->com.remote_addr.ss_family == AF_INET) ?
711 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
712 sizeof(struct tcphdr);
713 csk->mss = csk->emss;
714 if (TCPOPT_TSTAMP_G(opt))
715 csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
719 pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
720 TCPOPT_MSS_G(opt), csk->mss, csk->emss);
721 pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
722 csk->mss, csk->emss);
725 static void cxgbit_free_skb(struct cxgbit_sock *csk)
729 __skb_queue_purge(&csk->txq);
730 __skb_queue_purge(&csk->rxq);
731 __skb_queue_purge(&csk->backlogq);
732 __skb_queue_purge(&csk->ppodq);
733 __skb_queue_purge(&csk->skbq);
735 while ((skb = cxgbit_sock_dequeue_wr(csk)))
738 __kfree_skb(csk->lro_hskb);
741 void _cxgbit_free_csk(struct kref *kref)
743 struct cxgbit_sock *csk;
744 struct cxgbit_device *cdev;
746 csk = container_of(kref, struct cxgbit_sock, kref);
748 pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
750 if (csk->com.local_addr.ss_family == AF_INET6) {
751 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
752 &csk->com.local_addr;
753 cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
755 &sin6->sin6_addr.s6_addr, 1);
758 cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
759 csk->com.local_addr.ss_family);
760 dst_release(csk->dst);
761 cxgb4_l2t_release(csk->l2t);
763 cdev = csk->com.cdev;
764 spin_lock_bh(&cdev->cskq.lock);
765 list_del(&csk->list);
766 spin_unlock_bh(&cdev->cskq.lock);
768 cxgbit_free_skb(csk);
769 cxgbit_put_cdev(cdev);
774 static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
776 unsigned int linkspeed;
779 linkspeed = pi->link_cfg.speed;
780 scale = linkspeed / SPEED_10000;
782 #define CXGBIT_10G_RCV_WIN (256 * 1024)
783 csk->rcv_win = CXGBIT_10G_RCV_WIN;
785 csk->rcv_win *= scale;
787 #define CXGBIT_10G_SND_WIN (256 * 1024)
788 csk->snd_win = CXGBIT_10G_SND_WIN;
790 csk->snd_win *= scale;
792 pr_debug("%s snd_win %d rcv_win %d\n",
793 __func__, csk->snd_win, csk->rcv_win);
796 #ifdef CONFIG_CHELSIO_T4_DCB
797 static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
799 return ndev->dcbnl_ops->getstate(ndev);
802 static int cxgbit_select_priority(int pri_mask)
807 return (ffs(pri_mask) - 1);
810 static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
815 struct dcb_app iscsi_dcb_app = {
816 .protocol = local_port
819 ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
824 if (caps & DCB_CAP_DCBX_VER_IEEE) {
825 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
827 ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
829 } else if (caps & DCB_CAP_DCBX_VER_CEE) {
830 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
832 ret = dcb_getapp(ndev, &iscsi_dcb_app);
835 pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
837 return cxgbit_select_priority(ret);
842 cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
843 u16 local_port, struct dst_entry *dst,
844 struct cxgbit_device *cdev)
848 struct net_device *ndev;
849 u16 rxq_idx, port_id;
850 #ifdef CONFIG_CHELSIO_T4_DCB
854 n = dst_neigh_lookup(dst, peer_ip);
860 if (n->dev->flags & IFF_LOOPBACK) {
862 ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
863 else if (IS_ENABLED(CONFIG_IPV6))
864 ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
873 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
877 csk->mtu = ndev->mtu;
878 csk->tx_chan = cxgb4_port_chan(ndev);
879 csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
880 cxgb4_port_viid(ndev));
881 step = cdev->lldi.ntxq /
883 csk->txq_idx = cxgb4_port_idx(ndev) * step;
884 step = cdev->lldi.nrxq /
886 csk->ctrlq_idx = cxgb4_port_idx(ndev);
887 csk->rss_qid = cdev->lldi.rxq_ids[
888 cxgb4_port_idx(ndev) * step];
889 csk->port_id = cxgb4_port_idx(ndev);
890 cxgbit_set_tcp_window(csk,
891 (struct port_info *)netdev_priv(ndev));
893 ndev = cxgbit_get_real_dev(n->dev);
899 #ifdef CONFIG_CHELSIO_T4_DCB
900 if (cxgbit_get_iscsi_dcb_state(ndev))
901 priority = cxgbit_get_iscsi_dcb_priority(ndev,
904 csk->dcb_priority = priority;
906 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
908 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
912 port_id = cxgb4_port_idx(ndev);
913 csk->mtu = dst_mtu(dst);
914 csk->tx_chan = cxgb4_port_chan(ndev);
915 csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
916 cxgb4_port_viid(ndev));
917 step = cdev->lldi.ntxq /
919 csk->txq_idx = (port_id * step) +
920 (cdev->selectq[port_id][0]++ % step);
921 csk->ctrlq_idx = cxgb4_port_idx(ndev);
922 step = cdev->lldi.nrxq /
924 rxq_idx = (port_id * step) +
925 (cdev->selectq[port_id][1]++ % step);
926 csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
927 csk->port_id = port_id;
928 cxgbit_set_tcp_window(csk,
929 (struct port_info *)netdev_priv(ndev));
938 int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
942 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
944 pr_err("%s - device not up - dropping\n", __func__);
948 ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
951 return ret < 0 ? ret : 0;
954 static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
956 u32 len = roundup(sizeof(struct cpl_tid_release), 16);
959 skb = alloc_skb(len, GFP_ATOMIC);
963 cxgb_mk_tid_release(skb, len, tid, 0);
964 cxgbit_ofld_send(cdev, skb);
968 cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
969 struct l2t_entry *l2e)
973 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
975 pr_err("%s - device not up - dropping\n", __func__);
979 ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
982 return ret < 0 ? ret : 0;
985 static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
987 if (csk->com.state != CSK_STATE_ESTABLISHED) {
992 cxgbit_ofld_send(csk->com.cdev, skb);
996 * CPL connection rx data ack: host ->
997 * Send RX credits through an RX_DATA_ACK CPL message.
998 * Returns the number of credits sent.
1000 int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1002 struct sk_buff *skb;
1003 u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
1006 skb = alloc_skb(len, GFP_KERNEL);
1010 credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1011 RX_CREDITS_V(csk->rx_credits);
1013 cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1016 csk->rx_credits = 0;
1018 spin_lock_bh(&csk->lock);
1019 if (csk->lock_owner) {
1020 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1021 __skb_queue_tail(&csk->backlogq, skb);
1022 spin_unlock_bh(&csk->lock);
1026 cxgbit_send_rx_credits(csk, skb);
1027 spin_unlock_bh(&csk->lock);
1032 #define FLOWC_WR_NPARAMS_MIN 9
1033 #define FLOWC_WR_NPARAMS_MAX 11
1034 static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1036 struct sk_buff *skb;
1040 flowclen = offsetof(struct fw_flowc_wr,
1041 mnemval[FLOWC_WR_NPARAMS_MAX]);
1043 len = max_t(u32, sizeof(struct cpl_abort_req),
1044 sizeof(struct cpl_abort_rpl));
1046 len = max(len, flowclen);
1047 len = roundup(len, 16);
1049 for (i = 0; i < 3; i++) {
1050 skb = alloc_skb(len, GFP_ATOMIC);
1053 __skb_queue_tail(&csk->skbq, skb);
1056 skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1060 memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1061 csk->lro_hskb = skb;
1065 __skb_queue_purge(&csk->skbq);
1070 cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1072 struct sk_buff *skb;
1073 const struct tcphdr *tcph;
1074 struct cpl_t5_pass_accept_rpl *rpl5;
1075 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1076 unsigned int len = roundup(sizeof(*rpl5), 16);
1077 unsigned int mtu_idx;
1083 pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1085 skb = alloc_skb(len, GFP_ATOMIC);
1087 cxgbit_put_csk(csk);
1091 rpl5 = __skb_put_zero(skb, len);
1093 INIT_TP_WR(rpl5, csk->tid);
1094 OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1096 cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1098 (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1099 wscale = cxgb_compute_wscale(csk->rcv_win);
1101 * Specify the largest window that will fit in opt0. The
1102 * remainder will be specified in the rx_data_ack.
1104 win = csk->rcv_win >> 10;
1105 if (win > RCV_BUFSIZ_M)
1107 opt0 = TCAM_BYPASS_F |
1108 WND_SCALE_V(wscale) |
1109 MSS_IDX_V(mtu_idx) |
1110 L2T_IDX_V(csk->l2t->idx) |
1111 TX_CHAN_V(csk->tx_chan) |
1112 SMAC_SEL_V(csk->smac_idx) |
1113 DSCP_V(csk->tos >> 2) |
1114 ULP_MODE_V(ULP_MODE_ISCSI) |
1117 opt2 = RX_CHANNEL_V(0) |
1118 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1120 if (!is_t5(lldi->adapter_type))
1121 opt2 |= RX_FC_DISABLE_F;
1123 if (req->tcpopt.tstamp)
1124 opt2 |= TSTAMPS_EN_F;
1125 if (req->tcpopt.sack)
1128 opt2 |= WND_SCALE_EN_F;
1130 hlen = ntohl(req->hdr_len);
1132 if (is_t5(lldi->adapter_type))
1133 tcph = (struct tcphdr *)((u8 *)(req + 1) +
1134 ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
1136 tcph = (struct tcphdr *)((u8 *)(req + 1) +
1137 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1139 if (tcph->ece && tcph->cwr)
1140 opt2 |= CCTRL_ECN_V(1);
1142 opt2 |= RX_COALESCE_V(3);
1143 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1146 rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1148 opt2 |= T5_OPT_2_VALID_F;
1150 rpl5->opt0 = cpu_to_be64(opt0);
1151 rpl5->opt2 = cpu_to_be32(opt2);
1152 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1153 t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
1154 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1158 cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1160 struct cxgbit_sock *csk = NULL;
1161 struct cxgbit_np *cnp;
1162 struct cpl_pass_accept_req *req = cplhdr(skb);
1163 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1164 struct tid_info *t = cdev->lldi.tids;
1165 unsigned int tid = GET_TID(req);
1166 u16 peer_mss = ntohs(req->tcpopt.mss);
1167 unsigned short hdrs;
1169 struct dst_entry *dst;
1170 __u8 local_ip[16], peer_ip[16];
1171 __be16 local_port, peer_port;
1175 pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1176 __func__, cdev, stid, tid);
1178 cnp = lookup_stid(t, stid);
1180 pr_err("%s connect request on invalid stid %d\n",
1185 if (cnp->com.state != CSK_STATE_LISTEN) {
1186 pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1191 csk = lookup_tid(t, tid);
1193 pr_err("%s csk not null tid %u\n",
1198 cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1199 peer_ip, &local_port, &peer_port);
1201 /* Find output route */
1203 pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1204 "lport %d rport %d peer_mss %d\n"
1205 , __func__, cnp, tid,
1206 local_ip, peer_ip, ntohs(local_port),
1207 ntohs(peer_port), peer_mss);
1208 dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1209 *(__be32 *)local_ip,
1211 local_port, peer_port,
1212 PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1214 pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1215 "lport %d rport %d peer_mss %d\n"
1216 , __func__, cnp, tid,
1217 local_ip, peer_ip, ntohs(local_port),
1218 ntohs(peer_port), peer_mss);
1219 dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1221 local_port, peer_port,
1222 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1223 ((struct sockaddr_in6 *)
1224 &cnp->com.local_addr)->sin6_scope_id);
1227 pr_err("%s - failed to find dst entry!\n",
1232 csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1238 ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1241 pr_err("%s - failed to allocate l2t entry!\n",
1248 kref_init(&csk->kref);
1249 init_completion(&csk->com.wr_wait.completion);
1251 INIT_LIST_HEAD(&csk->accept_node);
1253 hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1254 sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1255 if (peer_mss && csk->mtu > (peer_mss + hdrs))
1256 csk->mtu = peer_mss + hdrs;
1258 csk->com.state = CSK_STATE_CONNECTING;
1259 csk->com.cdev = cdev;
1261 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1264 csk->wr_cred = cdev->lldi.wr_cred -
1265 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1266 csk->wr_max_cred = csk->wr_cred;
1267 csk->wr_una_cred = 0;
1270 struct sockaddr_in *sin = (struct sockaddr_in *)
1271 &csk->com.local_addr;
1272 sin->sin_family = AF_INET;
1273 sin->sin_port = local_port;
1274 sin->sin_addr.s_addr = *(__be32 *)local_ip;
1276 sin = (struct sockaddr_in *)&csk->com.remote_addr;
1277 sin->sin_family = AF_INET;
1278 sin->sin_port = peer_port;
1279 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1281 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1282 &csk->com.local_addr;
1284 sin6->sin6_family = PF_INET6;
1285 sin6->sin6_port = local_port;
1286 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1287 cxgb4_clip_get(cdev->lldi.ports[0],
1288 (const u32 *)&sin6->sin6_addr.s6_addr,
1291 sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1292 sin6->sin6_family = PF_INET6;
1293 sin6->sin6_port = peer_port;
1294 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1297 skb_queue_head_init(&csk->rxq);
1298 skb_queue_head_init(&csk->txq);
1299 skb_queue_head_init(&csk->ppodq);
1300 skb_queue_head_init(&csk->backlogq);
1301 skb_queue_head_init(&csk->skbq);
1302 cxgbit_sock_reset_wr_list(csk);
1303 spin_lock_init(&csk->lock);
1304 init_waitqueue_head(&csk->waitq);
1305 init_waitqueue_head(&csk->ack_waitq);
1306 csk->lock_owner = false;
1308 if (cxgbit_alloc_csk_skb(csk)) {
1314 cxgbit_get_cdev(cdev);
1316 spin_lock(&cdev->cskq.lock);
1317 list_add_tail(&csk->list, &cdev->cskq.list);
1318 spin_unlock(&cdev->cskq.lock);
1319 cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1320 cxgbit_pass_accept_rpl(csk, req);
1324 cxgbit_release_tid(cdev, tid);
1330 cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1333 u32 nparams, flowclen16, flowclen;
1335 nparams = FLOWC_WR_NPARAMS_MIN;
1337 if (csk->snd_wscale)
1340 #ifdef CONFIG_CHELSIO_T4_DCB
1343 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1344 flowclen16 = DIV_ROUND_UP(flowclen, 16);
1345 flowclen = flowclen16 * 16;
1347 * Return the number of 16-byte credits used by the flowc request.
1348 * Pass back the nparams and actual flowc length if requested.
1351 *nparamsp = nparams;
1353 *flowclenp = flowclen;
1357 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1359 struct cxgbit_device *cdev = csk->com.cdev;
1360 struct fw_flowc_wr *flowc;
1361 u32 nparams, flowclen16, flowclen;
1362 struct sk_buff *skb;
1365 #ifdef CONFIG_CHELSIO_T4_DCB
1366 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1369 flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1371 skb = __skb_dequeue(&csk->skbq);
1372 flowc = __skb_put_zero(skb, flowclen);
1374 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1375 FW_FLOWC_WR_NPARAMS_V(nparams));
1376 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1377 FW_WR_FLOWID_V(csk->tid));
1378 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1379 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1380 (csk->com.cdev->lldi.pf));
1381 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1382 flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1383 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1384 flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1385 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1386 flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1387 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1388 flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1389 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1390 flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1391 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1392 flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1393 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1394 flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1396 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1397 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1398 flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1400 flowc->mnemval[8].val = cpu_to_be32(16384);
1404 if (csk->snd_wscale) {
1405 flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1406 flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1410 #ifdef CONFIG_CHELSIO_T4_DCB
1411 flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1412 if (vlan == VLAN_NONE) {
1413 pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1414 flowc->mnemval[index].val = cpu_to_be32(0);
1416 flowc->mnemval[index].val = cpu_to_be32(
1417 (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1420 pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1421 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1422 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1423 csk->rcv_nxt, csk->snd_win, csk->emss);
1424 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1425 cxgbit_ofld_send(csk->com.cdev, skb);
1429 int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1431 struct sk_buff *skb;
1432 struct cpl_set_tcb_field *req;
1433 u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1434 u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1435 unsigned int len = roundup(sizeof(*req), 16);
1438 skb = alloc_skb(len, GFP_KERNEL);
1442 /* set up ulp submode */
1443 req = __skb_put_zero(skb, len);
1445 INIT_TP_WR(req, csk->tid);
1446 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1447 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1448 req->word_cookie = htons(0);
1449 req->mask = cpu_to_be64(0x3 << 4);
1450 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1451 (dcrc ? ULP_CRC_DATA : 0)) << 4);
1452 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1454 cxgbit_get_csk(csk);
1455 cxgbit_init_wr_wait(&csk->com.wr_wait);
1457 cxgbit_ofld_send(csk->com.cdev, skb);
1459 ret = cxgbit_wait_for_reply(csk->com.cdev,
1461 csk->tid, 5, __func__);
1468 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1470 struct sk_buff *skb;
1471 struct cpl_set_tcb_field *req;
1472 unsigned int len = roundup(sizeof(*req), 16);
1475 skb = alloc_skb(len, GFP_KERNEL);
1479 req = __skb_put_zero(skb, len);
1481 INIT_TP_WR(req, csk->tid);
1482 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1483 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1484 req->word_cookie = htons(0);
1485 req->mask = cpu_to_be64(0x3 << 8);
1486 req->val = cpu_to_be64(pg_idx << 8);
1487 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1489 cxgbit_get_csk(csk);
1490 cxgbit_init_wr_wait(&csk->com.wr_wait);
1492 cxgbit_ofld_send(csk->com.cdev, skb);
1494 ret = cxgbit_wait_for_reply(csk->com.cdev,
1496 csk->tid, 5, __func__);
1504 cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1506 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1507 struct tid_info *t = cdev->lldi.tids;
1508 unsigned int stid = GET_TID(rpl);
1509 struct cxgbit_np *cnp = lookup_stid(t, stid);
1511 pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1512 __func__, cnp, stid, rpl->status);
1515 pr_info("%s stid %d lookup failure\n", __func__, stid);
1519 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1520 cxgbit_put_cnp(cnp);
1526 cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1528 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1529 struct tid_info *t = cdev->lldi.tids;
1530 unsigned int stid = GET_TID(rpl);
1531 struct cxgbit_np *cnp = lookup_stid(t, stid);
1533 pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1534 __func__, cnp, stid, rpl->status);
1537 pr_info("%s stid %d lookup failure\n", __func__, stid);
1541 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1542 cxgbit_put_cnp(cnp);
1548 cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1550 struct cpl_pass_establish *req = cplhdr(skb);
1551 struct tid_info *t = cdev->lldi.tids;
1552 unsigned int tid = GET_TID(req);
1553 struct cxgbit_sock *csk;
1554 struct cxgbit_np *cnp;
1555 u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1556 u32 snd_isn = be32_to_cpu(req->snd_isn);
1557 u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1559 csk = lookup_tid(t, tid);
1560 if (unlikely(!csk)) {
1561 pr_err("can't find connection for tid %u.\n", tid);
1566 pr_debug("%s: csk %p; tid %u; cnp %p\n",
1567 __func__, csk, tid, cnp);
1569 csk->write_seq = snd_isn;
1570 csk->snd_una = snd_isn;
1571 csk->snd_nxt = snd_isn;
1573 csk->rcv_nxt = rcv_isn;
1575 if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1576 csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1578 csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1579 cxgbit_set_emss(csk, tcp_opt);
1580 dst_confirm(csk->dst);
1581 csk->com.state = CSK_STATE_ESTABLISHED;
1582 spin_lock_bh(&cnp->np_accept_lock);
1583 list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1584 spin_unlock_bh(&cnp->np_accept_lock);
1585 complete(&cnp->accept_comp);
1590 static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1592 cxgbit_skcb_flags(skb) = 0;
1593 spin_lock_bh(&csk->rxq.lock);
1594 __skb_queue_tail(&csk->rxq, skb);
1595 spin_unlock_bh(&csk->rxq.lock);
1596 wake_up(&csk->waitq);
1599 static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1601 pr_debug("%s: csk %p; tid %u; state %d\n",
1602 __func__, csk, csk->tid, csk->com.state);
1604 switch (csk->com.state) {
1605 case CSK_STATE_ESTABLISHED:
1606 csk->com.state = CSK_STATE_CLOSING;
1607 cxgbit_queue_rx_skb(csk, skb);
1609 case CSK_STATE_CLOSING:
1610 /* simultaneous close */
1611 csk->com.state = CSK_STATE_MORIBUND;
1613 case CSK_STATE_MORIBUND:
1614 csk->com.state = CSK_STATE_DEAD;
1615 cxgbit_put_csk(csk);
1617 case CSK_STATE_ABORTING:
1620 pr_info("%s: cpl_peer_close in bad state %d\n",
1621 __func__, csk->com.state);
1627 static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1629 pr_debug("%s: csk %p; tid %u; state %d\n",
1630 __func__, csk, csk->tid, csk->com.state);
1632 switch (csk->com.state) {
1633 case CSK_STATE_CLOSING:
1634 csk->com.state = CSK_STATE_MORIBUND;
1636 case CSK_STATE_MORIBUND:
1637 csk->com.state = CSK_STATE_DEAD;
1638 cxgbit_put_csk(csk);
1640 case CSK_STATE_ABORTING:
1641 case CSK_STATE_DEAD:
1644 pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1645 __func__, csk->com.state);
1651 static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1653 struct cpl_abort_req_rss *hdr = cplhdr(skb);
1654 unsigned int tid = GET_TID(hdr);
1655 struct sk_buff *rpl_skb;
1656 bool release = false;
1657 bool wakeup_thread = false;
1658 u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
1660 pr_debug("%s: csk %p; tid %u; state %d\n",
1661 __func__, csk, tid, csk->com.state);
1663 if (cxgb_is_neg_adv(hdr->status)) {
1664 pr_err("%s: got neg advise %d on tid %u\n",
1665 __func__, hdr->status, tid);
1669 switch (csk->com.state) {
1670 case CSK_STATE_CONNECTING:
1671 case CSK_STATE_MORIBUND:
1672 csk->com.state = CSK_STATE_DEAD;
1675 case CSK_STATE_ESTABLISHED:
1676 csk->com.state = CSK_STATE_DEAD;
1677 wakeup_thread = true;
1679 case CSK_STATE_CLOSING:
1680 csk->com.state = CSK_STATE_DEAD;
1684 case CSK_STATE_ABORTING:
1687 pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1688 __func__, csk->com.state);
1689 csk->com.state = CSK_STATE_DEAD;
1692 __skb_queue_purge(&csk->txq);
1694 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1695 cxgbit_send_tx_flowc_wr(csk);
1697 rpl_skb = __skb_dequeue(&csk->skbq);
1699 cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1700 cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1702 if (wakeup_thread) {
1703 cxgbit_queue_rx_skb(csk, skb);
1708 cxgbit_put_csk(csk);
1713 static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1715 pr_debug("%s: csk %p; tid %u; state %d\n",
1716 __func__, csk, csk->tid, csk->com.state);
1718 switch (csk->com.state) {
1719 case CSK_STATE_ABORTING:
1720 csk->com.state = CSK_STATE_DEAD;
1721 cxgbit_put_csk(csk);
1724 pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1725 __func__, csk->com.state);
1731 static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1733 const struct sk_buff *skb = csk->wr_pending_head;
1736 if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1737 pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1738 csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1743 credit += (__force u32)skb->csum;
1744 skb = cxgbit_skcb_tx_wr_next(skb);
1747 if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1748 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1749 csk, csk->tid, csk->wr_cred,
1750 credit, csk->wr_max_cred);
1758 static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1760 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1761 u32 credits = rpl->credits;
1762 u32 snd_una = ntohl(rpl->snd_una);
1764 csk->wr_cred += credits;
1765 if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1766 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1769 struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1773 pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1774 csk, csk->tid, credits,
1775 csk->wr_cred, csk->wr_una_cred);
1779 csum = (__force u32)p->csum;
1780 if (unlikely(credits < csum)) {
1781 pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1783 credits, csk->wr_cred, csk->wr_una_cred,
1785 p->csum = (__force __wsum)(csum - credits);
1789 cxgbit_sock_dequeue_wr(csk);
1794 if (unlikely(cxgbit_credit_err(csk))) {
1795 cxgbit_queue_rx_skb(csk, skb);
1799 if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1800 if (unlikely(before(snd_una, csk->snd_una))) {
1801 pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1802 csk, csk->tid, snd_una,
1807 if (csk->snd_una != snd_una) {
1808 csk->snd_una = snd_una;
1809 dst_confirm(csk->dst);
1810 wake_up(&csk->ack_waitq);
1814 if (skb_queue_len(&csk->txq))
1815 cxgbit_push_tx_frames(csk);
1821 static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1823 struct cxgbit_sock *csk;
1824 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1825 unsigned int tid = GET_TID(rpl);
1826 struct cxgb4_lld_info *lldi = &cdev->lldi;
1827 struct tid_info *t = lldi->tids;
1829 csk = lookup_tid(t, tid);
1830 if (unlikely(!csk)) {
1831 pr_err("can't find connection for tid %u.\n", tid);
1834 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1837 cxgbit_put_csk(csk);
1842 static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1844 struct cxgbit_sock *csk;
1845 struct cpl_rx_data *cpl = cplhdr(skb);
1846 unsigned int tid = GET_TID(cpl);
1847 struct cxgb4_lld_info *lldi = &cdev->lldi;
1848 struct tid_info *t = lldi->tids;
1850 csk = lookup_tid(t, tid);
1851 if (unlikely(!csk)) {
1852 pr_err("can't find conn. for tid %u.\n", tid);
1856 cxgbit_queue_rx_skb(csk, skb);
1863 __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1865 spin_lock(&csk->lock);
1866 if (csk->lock_owner) {
1867 __skb_queue_tail(&csk->backlogq, skb);
1868 spin_unlock(&csk->lock);
1872 cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1873 spin_unlock(&csk->lock);
1876 static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1878 cxgbit_get_csk(csk);
1879 __cxgbit_process_rx_cpl(csk, skb);
1880 cxgbit_put_csk(csk);
1883 static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1885 struct cxgbit_sock *csk;
1886 struct cpl_tx_data *cpl = cplhdr(skb);
1887 struct cxgb4_lld_info *lldi = &cdev->lldi;
1888 struct tid_info *t = lldi->tids;
1889 unsigned int tid = GET_TID(cpl);
1890 u8 opcode = cxgbit_skcb_rx_opcode(skb);
1895 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1898 case CPL_PEER_CLOSE:
1899 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1901 case CPL_CLOSE_CON_RPL:
1902 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1904 case CPL_ABORT_REQ_RSS:
1905 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1907 case CPL_ABORT_RPL_RSS:
1908 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1914 csk = lookup_tid(t, tid);
1915 if (unlikely(!csk)) {
1916 pr_err("can't find conn. for tid %u.\n", tid);
1921 cxgbit_process_rx_cpl(csk, skb);
1923 __cxgbit_process_rx_cpl(csk, skb);
1930 cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
1931 [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
1932 [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
1933 [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
1934 [CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
1935 [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
1936 [CPL_RX_DATA] = cxgbit_rx_data,
1937 [CPL_FW4_ACK] = cxgbit_rx_cpl,
1938 [CPL_PEER_CLOSE] = cxgbit_rx_cpl,
1939 [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
1940 [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
1941 [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,