2 * Copyright (c) 2018 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Written by: Atul Gupta (atul.gupta@chelsio.com)
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/workqueue.h>
14 #include <linux/skbuff.h>
15 #include <linux/timer.h>
16 #include <linux/notifier.h>
17 #include <linux/inetdevice.h>
19 #include <linux/tcp.h>
20 #include <linux/sched/signal.h>
21 #include <linux/kallsyms.h>
22 #include <linux/kprobes.h>
23 #include <linux/if_vlan.h>
31 * State transitions and actions for close. Note that if we are in SYN_SENT
32 * we remain in that state as we cannot control a connection while it's in
33 * SYN_SENT; such connections are allowed to establish and are then aborted.
35 static unsigned char new_state[16] = {
36 /* current state: new state: action: */
37 /* (Invalid) */ TCP_CLOSE,
38 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
39 /* TCP_SYN_SENT */ TCP_SYN_SENT,
40 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
41 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
42 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
43 /* TCP_TIME_WAIT */ TCP_CLOSE,
44 /* TCP_CLOSE */ TCP_CLOSE,
45 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
46 /* TCP_LAST_ACK */ TCP_LAST_ACK,
47 /* TCP_LISTEN */ TCP_CLOSE,
48 /* TCP_CLOSING */ TCP_CLOSING,
51 static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev)
53 struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
58 csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC);
59 if (!csk->txdata_skb_cache) {
64 kref_init(&csk->kref);
66 skb_queue_head_init(&csk->txq);
67 csk->wr_skb_head = NULL;
68 csk->wr_skb_tail = NULL;
71 csk->tlshws.txkey = -1;
72 csk->tlshws.rxkey = -1;
73 csk->tlshws.mfs = TLS_MFS;
74 skb_queue_head_init(&csk->tlshws.sk_recv_queue);
78 static void chtls_sock_release(struct kref *ref)
80 struct chtls_sock *csk =
81 container_of(ref, struct chtls_sock, kref);
86 static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev,
89 struct net_device *ndev = cdev->ports[0];
91 if (likely(!inet_sk(sk)->inet_rcv_saddr))
94 ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
98 if (is_vlan_dev(ndev))
99 return vlan_dev_real_dev(ndev);
103 static void assign_rxopt(struct sock *sk, unsigned int opt)
105 const struct chtls_dev *cdev;
106 struct chtls_sock *csk;
109 csk = rcu_dereference_sk_user_data(sk);
113 tp->tcp_header_len = sizeof(struct tcphdr);
114 tp->rx_opt.mss_clamp = cdev->mtus[TCPOPT_MSS_G(opt)] - 40;
115 tp->mss_cache = tp->rx_opt.mss_clamp;
116 tp->rx_opt.tstamp_ok = TCPOPT_TSTAMP_G(opt);
117 tp->rx_opt.snd_wscale = TCPOPT_SACK_G(opt);
118 tp->rx_opt.wscale_ok = TCPOPT_WSCALE_OK_G(opt);
119 SND_WSCALE(tp) = TCPOPT_SND_WSCALE_G(opt);
120 if (!tp->rx_opt.wscale_ok)
121 tp->rx_opt.rcv_wscale = 0;
122 if (tp->rx_opt.tstamp_ok) {
123 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
124 tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED;
125 } else if (csk->opt2 & TSTAMPS_EN_F) {
126 csk->opt2 &= ~TSTAMPS_EN_F;
127 csk->mtu_idx = TCPOPT_MSS_G(opt);
131 static void chtls_purge_receive_queue(struct sock *sk)
135 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
136 skb_dst_set(skb, (void *)NULL);
141 static void chtls_purge_write_queue(struct sock *sk)
143 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
146 while ((skb = __skb_dequeue(&csk->txq))) {
147 sk->sk_wmem_queued -= skb->truesize;
152 static void chtls_purge_recv_queue(struct sock *sk)
154 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
155 struct chtls_hws *tlsk = &csk->tlshws;
158 while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
159 skb_dst_set(skb, NULL);
164 static void abort_arp_failure(void *handle, struct sk_buff *skb)
166 struct cpl_abort_req *req = cplhdr(skb);
167 struct chtls_dev *cdev;
169 cdev = (struct chtls_dev *)handle;
170 req->cmd = CPL_ABORT_NO_RST;
171 cxgb4_ofld_send(cdev->lldi->ports[0], skb);
174 static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
176 if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
178 refcount_inc(&skb->users);
180 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
185 static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
187 struct cpl_abort_req *req;
188 struct chtls_sock *csk;
191 csk = rcu_dereference_sk_user_data(sk);
195 skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
197 req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
198 INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid);
199 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
200 req->rsvd0 = htonl(tp->snd_nxt);
201 req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT);
203 t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure);
204 send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
207 static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
209 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
211 if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) ||
213 if (sk->sk_state == TCP_SYN_RECV)
214 csk_set_flag(csk, CSK_RST_ABORTED);
218 if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
219 struct tcp_sock *tp = tcp_sk(sk);
221 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
222 WARN_ONCE(1, "send tx flowc error");
223 csk_set_flag(csk, CSK_TX_DATA_SENT);
226 csk_set_flag(csk, CSK_ABORT_RPL_PENDING);
227 chtls_purge_write_queue(sk);
229 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
230 if (sk->sk_state != TCP_SYN_RECV)
231 chtls_send_abort(sk, mode, skb);
241 static void release_tcp_port(struct sock *sk)
243 if (inet_csk(sk)->icsk_bind_hash)
247 static void tcp_uncork(struct sock *sk)
249 struct tcp_sock *tp = tcp_sk(sk);
251 if (tp->nonagle & TCP_NAGLE_CORK) {
252 tp->nonagle &= ~TCP_NAGLE_CORK;
253 chtls_tcp_push(sk, 0);
257 static void chtls_close_conn(struct sock *sk)
259 struct cpl_close_con_req *req;
260 struct chtls_sock *csk;
265 len = roundup(sizeof(struct cpl_close_con_req), 16);
266 csk = rcu_dereference_sk_user_data(sk);
269 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
270 req = (struct cpl_close_con_req *)__skb_put(skb, len);
272 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) |
273 FW_WR_IMMDLEN_V(sizeof(*req) -
275 req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) |
276 FW_WR_FLOWID_V(tid));
278 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
281 skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
282 if (sk->sk_state != TCP_SYN_SENT)
283 chtls_push_frames(csk, 1);
287 * Perform a state transition during close and return the actions indicated
288 * for the transition. Do not make this function inline, the main reason
289 * it exists at all is to avoid multiple inlining of tcp_set_state.
291 static int make_close_transition(struct sock *sk)
293 int next = (int)new_state[sk->sk_state];
295 tcp_set_state(sk, next & TCP_STATE_MASK);
296 return next & TCP_ACTION_FIN;
299 void chtls_close(struct sock *sk, long timeout)
301 int data_lost, prev_state;
302 struct chtls_sock *csk;
304 csk = rcu_dereference_sk_user_data(sk);
307 sk->sk_shutdown |= SHUTDOWN_MASK;
309 data_lost = skb_queue_len(&sk->sk_receive_queue);
310 data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue);
311 chtls_purge_recv_queue(sk);
312 chtls_purge_receive_queue(sk);
314 if (sk->sk_state == TCP_CLOSE) {
316 } else if (data_lost || sk->sk_state == TCP_SYN_SENT) {
317 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
318 release_tcp_port(sk);
320 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
321 sk->sk_prot->disconnect(sk, 0);
322 } else if (make_close_transition(sk)) {
323 chtls_close_conn(sk);
327 sk_stream_wait_close(sk, timeout);
330 prev_state = sk->sk_state;
339 if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
342 if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 &&
343 !csk_flag(sk, CSK_ABORT_SHUTDOWN)) {
346 skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
348 chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
351 if (sk->sk_state == TCP_CLOSE)
352 inet_csk_destroy_sock(sk);
361 * Wait until a socket enters on of the given states.
363 static int wait_for_states(struct sock *sk, unsigned int states)
365 DECLARE_WAITQUEUE(wait, current);
366 struct socket_wq _sk_wq;
373 * We want this to work even when there's no associated struct socket.
374 * In that case we provide a temporary wait_queue_head_t.
377 init_waitqueue_head(&_sk_wq.wait);
378 _sk_wq.fasync_list = NULL;
379 init_rcu_head_on_stack(&_sk_wq.rcu);
380 RCU_INIT_POINTER(sk->sk_wq, &_sk_wq);
383 add_wait_queue(sk_sleep(sk), &wait);
384 while (!sk_in_state(sk, states)) {
385 if (!current_timeo) {
389 if (signal_pending(current)) {
390 err = sock_intr_errno(current_timeo);
393 set_current_state(TASK_UNINTERRUPTIBLE);
395 if (!sk_in_state(sk, states))
396 current_timeo = schedule_timeout(current_timeo);
397 __set_current_state(TASK_RUNNING);
400 remove_wait_queue(sk_sleep(sk), &wait);
402 if (rcu_dereference(sk->sk_wq) == &_sk_wq)
407 int chtls_disconnect(struct sock *sk, int flags)
409 struct chtls_sock *csk;
414 csk = rcu_dereference_sk_user_data(sk);
415 chtls_purge_recv_queue(sk);
416 chtls_purge_receive_queue(sk);
417 chtls_purge_write_queue(sk);
419 if (sk->sk_state != TCP_CLOSE) {
420 sk->sk_err = ECONNRESET;
421 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
422 err = wait_for_states(sk, TCPF_CLOSE);
426 chtls_purge_recv_queue(sk);
427 chtls_purge_receive_queue(sk);
428 tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale);
429 return tcp_disconnect(sk, flags);
432 #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \
433 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)
434 void chtls_shutdown(struct sock *sk, int how)
436 if ((how & SEND_SHUTDOWN) &&
437 sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) &&
438 make_close_transition(sk))
439 chtls_close_conn(sk);
442 void chtls_destroy_sock(struct sock *sk)
444 struct chtls_sock *csk;
446 csk = rcu_dereference_sk_user_data(sk);
447 chtls_purge_recv_queue(sk);
448 csk->ulp_mode = ULP_MODE_NONE;
449 chtls_purge_write_queue(sk);
451 kref_put(&csk->kref, chtls_sock_release);
452 sk->sk_prot = &tcp_prot;
453 sk->sk_prot->destroy(sk);
456 static void reset_listen_child(struct sock *child)
458 struct chtls_sock *csk = rcu_dereference_sk_user_data(child);
461 skb = alloc_ctrl_skb(csk->txdata_skb_cache,
462 sizeof(struct cpl_abort_req));
464 chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
466 INC_ORPHAN_COUNT(child);
467 if (child->sk_state == TCP_CLOSE)
468 inet_csk_destroy_sock(child);
471 static void chtls_disconnect_acceptq(struct sock *listen_sk)
473 struct request_sock **pprev;
475 pprev = ACCEPT_QUEUE(listen_sk);
477 struct request_sock *req = *pprev;
479 if (req->rsk_ops == &chtls_rsk_ops) {
480 struct sock *child = req->sk;
482 *pprev = req->dl_next;
483 sk_acceptq_removed(listen_sk);
488 release_tcp_port(child);
489 reset_listen_child(child);
490 bh_unlock_sock(child);
494 pprev = &req->dl_next;
499 static int listen_hashfn(const struct sock *sk)
501 return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
504 static struct listen_info *listen_hash_add(struct chtls_dev *cdev,
508 struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL);
511 int key = listen_hashfn(sk);
515 spin_lock(&cdev->listen_lock);
516 p->next = cdev->listen_hash_tab[key];
517 cdev->listen_hash_tab[key] = p;
518 spin_unlock(&cdev->listen_lock);
523 static int listen_hash_find(struct chtls_dev *cdev,
526 struct listen_info *p;
530 key = listen_hashfn(sk);
532 spin_lock(&cdev->listen_lock);
533 for (p = cdev->listen_hash_tab[key]; p; p = p->next)
538 spin_unlock(&cdev->listen_lock);
542 static int listen_hash_del(struct chtls_dev *cdev,
545 struct listen_info *p, **prev;
549 key = listen_hashfn(sk);
550 prev = &cdev->listen_hash_tab[key];
552 spin_lock(&cdev->listen_lock);
553 for (p = *prev; p; prev = &p->next, p = p->next)
560 spin_unlock(&cdev->listen_lock);
564 static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent)
566 struct request_sock *req;
567 struct chtls_sock *csk;
569 csk = rcu_dereference_sk_user_data(child);
570 req = csk->passive_reap_next;
572 reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req);
573 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
574 chtls_reqsk_free(req);
575 csk->passive_reap_next = NULL;
578 static void chtls_reset_synq(struct listen_ctx *listen_ctx)
580 struct sock *listen_sk = listen_ctx->lsk;
582 while (!skb_queue_empty(&listen_ctx->synq)) {
583 struct chtls_sock *csk =
584 container_of((struct synq *)skb_peek
585 (&listen_ctx->synq), struct chtls_sock, synq);
586 struct sock *child = csk->sk;
588 cleanup_syn_rcv_conn(child, listen_sk);
592 release_tcp_port(child);
593 reset_listen_child(child);
594 bh_unlock_sock(child);
600 int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
602 struct net_device *ndev;
603 struct listen_ctx *ctx;
604 struct adapter *adap;
605 struct port_info *pi;
609 if (sk->sk_family != PF_INET)
613 ndev = chtls_ipv4_netdev(cdev, sk);
618 pi = netdev_priv(ndev);
620 if (!(adap->flags & FULL_INIT_DONE))
623 if (listen_hash_find(cdev, sk) >= 0) /* already have it */
626 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
630 __module_get(THIS_MODULE);
633 ctx->state = T4_LISTEN_START_PENDING;
634 skb_queue_head_init(&ctx->synq);
636 stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx);
641 if (!listen_hash_add(cdev, sk, stid))
644 ret = cxgb4_create_server(ndev, stid,
645 inet_sk(sk)->inet_rcv_saddr,
646 inet_sk(sk)->inet_sport, 0,
647 cdev->lldi->rxq_ids[0]);
649 ret = net_xmit_errno(ret);
654 listen_hash_del(cdev, sk);
656 cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
660 module_put(THIS_MODULE);
664 void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
666 struct listen_ctx *listen_ctx;
669 stid = listen_hash_del(cdev, sk);
673 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
674 chtls_reset_synq(listen_ctx);
676 cxgb4_remove_server(cdev->lldi->ports[0], stid,
677 cdev->lldi->rxq_ids[0], 0);
678 chtls_disconnect_acceptq(sk);
681 static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
683 struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR;
684 unsigned int stid = GET_TID(rpl);
685 struct listen_ctx *listen_ctx;
687 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
689 return CPL_RET_BUF_DONE;
691 if (listen_ctx->state == T4_LISTEN_START_PENDING) {
692 listen_ctx->state = T4_LISTEN_STARTED;
693 return CPL_RET_BUF_DONE;
696 if (rpl->status != CPL_ERR_NONE) {
697 pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
700 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
701 sock_put(listen_ctx->lsk);
703 module_put(THIS_MODULE);
705 return CPL_RET_BUF_DONE;
708 static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
710 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR;
711 struct listen_ctx *listen_ctx;
716 data = lookup_stid(cdev->tids, stid);
717 listen_ctx = (struct listen_ctx *)data;
719 if (rpl->status != CPL_ERR_NONE) {
720 pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
723 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
724 sock_put(listen_ctx->lsk);
726 module_put(THIS_MODULE);
728 return CPL_RET_BUF_DONE;
731 static void chtls_purge_wr_queue(struct sock *sk)
735 while ((skb = dequeue_wr(sk)) != NULL)
739 static void chtls_release_resources(struct sock *sk)
741 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
742 struct chtls_dev *cdev = csk->cdev;
743 unsigned int tid = csk->tid;
744 struct tid_info *tids;
750 kfree_skb(csk->txdata_skb_cache);
751 csk->txdata_skb_cache = NULL;
753 if (csk->wr_credits != csk->wr_max_credits) {
754 chtls_purge_wr_queue(sk);
755 chtls_reset_wr_list(csk);
758 if (csk->l2t_entry) {
759 cxgb4_l2t_release(csk->l2t_entry);
760 csk->l2t_entry = NULL;
763 cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
767 static void chtls_conn_done(struct sock *sk)
769 if (sock_flag(sk, SOCK_DEAD))
770 chtls_purge_receive_queue(sk);
771 sk_wakeup_sleepers(sk, 0);
775 static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
778 * If the server is still open we clean up the child connection,
779 * otherwise the server already did the clean up as it was purging
780 * its SYN queue and the skb was just sitting in its backlog.
782 if (likely(parent->sk_state == TCP_LISTEN)) {
783 cleanup_syn_rcv_conn(child, parent);
784 /* Without the below call to sock_orphan,
785 * we leak the socket resource with syn_flood test
786 * as inet_csk_destroy_sock will not be called
787 * in tcp_done since SOCK_DEAD flag is not set.
788 * Kernel handles this differently where new socket is
789 * created only after 3 way handshake is done.
792 percpu_counter_inc((child)->sk_prot->orphan_count);
793 chtls_release_resources(child);
794 chtls_conn_done(child);
796 if (csk_flag(child, CSK_RST_ABORTED)) {
797 chtls_release_resources(child);
798 chtls_conn_done(child);
803 static void pass_open_abort(struct sock *child, struct sock *parent,
806 do_abort_syn_rcv(child, parent);
810 static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb)
812 pass_open_abort(skb->sk, lsk, skb);
815 static void chtls_pass_open_arp_failure(struct sock *sk,
818 const struct request_sock *oreq;
819 struct chtls_sock *csk;
820 struct chtls_dev *cdev;
824 csk = rcu_dereference_sk_user_data(sk);
828 * If the connection is being aborted due to the parent listening
829 * socket going away there's nothing to do, the ABORT_REQ will close
832 if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) {
837 oreq = csk->passive_reap_next;
838 data = lookup_stid(cdev->tids, oreq->ts_recent);
839 parent = ((struct listen_ctx *)data)->lsk;
841 bh_lock_sock(parent);
842 if (!sock_owned_by_user(parent)) {
843 pass_open_abort(sk, parent, skb);
845 BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort;
846 __sk_add_backlog(parent, skb);
848 bh_unlock_sock(parent);
851 static void chtls_accept_rpl_arp_failure(void *handle,
854 struct sock *sk = (struct sock *)handle;
857 process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
861 static unsigned int chtls_select_mss(const struct chtls_sock *csk,
863 struct cpl_pass_accept_req *req)
865 struct chtls_dev *cdev;
866 struct dst_entry *dst;
867 unsigned int tcpoptsz;
868 unsigned int iphdrsz;
869 unsigned int mtu_idx;
874 mss = ntohs(req->tcpopt.mss);
876 dst = __sk_dst_get(sk);
881 iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
882 if (req->tcpopt.tstamp)
883 tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
885 tp->advmss = dst_metric_advmss(dst);
886 if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
887 tp->advmss = USER_MSS(tp);
888 if (tp->advmss > pmtu - iphdrsz)
889 tp->advmss = pmtu - iphdrsz;
890 if (mss && tp->advmss > mss)
893 tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus,
895 tp->advmss - tcpoptsz,
897 tp->advmss -= iphdrsz;
899 inet_csk(sk)->icsk_pmtu_cookie = pmtu;
903 static unsigned int select_rcv_wnd(struct chtls_sock *csk)
910 wnd = tcp_full_space(sk);
912 if (wnd < MIN_RCV_WND)
915 rcvwnd = MAX_RCV_WND;
917 csk_set_flag(csk, CSK_UPDATE_RCV_WND);
918 return min(wnd, rcvwnd);
921 static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
925 if (space > MAX_RCV_WND)
927 if (win_clamp && win_clamp < space)
931 while (wscale < 14 && (65535 << wscale) < space)
937 static void chtls_pass_accept_rpl(struct sk_buff *skb,
938 struct cpl_pass_accept_req *req,
942 struct cpl_t5_pass_accept_rpl *rpl5;
943 struct cxgb4_lld_info *lldi;
944 const struct tcphdr *tcph;
945 const struct tcp_sock *tp;
946 struct chtls_sock *csk;
954 csk = sk->sk_user_data;
956 lldi = csk->cdev->lldi;
957 len = roundup(sizeof(*rpl5), 16);
959 rpl5 = __skb_put_zero(skb, len);
960 INIT_TP_WR(rpl5, tid);
962 OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
964 csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
966 opt0 = TCAM_BYPASS_F |
967 WND_SCALE_V((tp)->rx_opt.rcv_wscale) |
968 MSS_IDX_V(csk->mtu_idx) |
969 L2T_IDX_V(csk->l2t_entry->idx) |
970 NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
971 TX_CHAN_V(csk->tx_chan) |
972 SMAC_SEL_V(csk->smac_idx) |
973 DSCP_V(csk->tos >> 2) |
974 ULP_MODE_V(ULP_MODE_TLS) |
975 RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M));
977 opt2 = RX_CHANNEL_V(0) |
978 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
980 if (!is_t5(lldi->adapter_type))
981 opt2 |= RX_FC_DISABLE_F;
982 if (req->tcpopt.tstamp)
983 opt2 |= TSTAMPS_EN_F;
984 if (req->tcpopt.sack)
986 hlen = ntohl(req->hdr_len);
988 tcph = (struct tcphdr *)((u8 *)(req + 1) +
989 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
990 if (tcph->ece && tcph->cwr)
991 opt2 |= CCTRL_ECN_V(1);
992 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
994 opt2 |= T5_OPT_2_VALID_F;
995 rpl5->opt0 = cpu_to_be64(opt0);
996 rpl5->opt2 = cpu_to_be32(opt2);
997 rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
998 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
999 t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
1000 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
1003 static void inet_inherit_port(struct inet_hashinfo *hash_info,
1004 struct sock *lsk, struct sock *newsk)
1007 __inet_inherit_port(lsk, newsk);
1011 static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1013 if (skb->protocol) {
1017 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
1021 static struct sock *chtls_recv_sock(struct sock *lsk,
1022 struct request_sock *oreq,
1024 const struct cpl_pass_accept_req *req,
1025 struct chtls_dev *cdev)
1027 struct adapter *adap = pci_get_drvdata(cdev->pdev);
1028 const struct tcphdr *tcph;
1029 struct inet_sock *newinet;
1030 const struct iphdr *iph;
1031 struct net_device *ndev;
1032 struct chtls_sock *csk;
1033 struct dst_entry *dst;
1034 struct neighbour *n;
1035 struct tcp_sock *tp;
1042 iph = (const struct iphdr *)network_hdr;
1043 newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
1047 dst = inet_csk_route_child_sock(lsk, newsk, oreq);
1051 tcph = (struct tcphdr *)(iph + 1);
1052 n = dst_neigh_lookup(dst, &iph->saddr);
1057 if (is_vlan_dev(ndev))
1058 ndev = vlan_dev_real_dev(ndev);
1060 for_each_port(adap, i)
1061 if (cdev->ports[i] == ndev)
1067 port_id = cxgb4_port_idx(ndev);
1069 csk = chtls_sock_create(cdev);
1073 csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0);
1074 if (!csk->l2t_entry)
1077 newsk->sk_user_data = csk;
1078 newsk->sk_backlog_rcv = chtls_backlog_rcv;
1081 newinet = inet_sk(newsk);
1083 newinet->inet_daddr = iph->saddr;
1084 newinet->inet_rcv_saddr = iph->daddr;
1085 newinet->inet_saddr = iph->daddr;
1087 oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1088 sk_setup_caps(newsk, dst);
1089 newsk->sk_prot_creator = lsk->sk_prot_creator;
1091 csk->passive_reap_next = oreq;
1092 csk->tx_chan = cxgb4_port_chan(ndev);
1093 csk->port_id = port_id;
1094 csk->egress_dev = ndev;
1095 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1096 csk->ulp_mode = ULP_MODE_TLS;
1097 step = cdev->lldi->nrxq / cdev->lldi->nchan;
1098 csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
1099 rxq_idx = port_id * step;
1100 csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
1102 csk->sndbuf = newsk->sk_sndbuf;
1103 csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
1104 cxgb4_port_viid(ndev));
1105 tp->rcv_wnd = select_rcv_wnd(csk);
1106 RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
1110 inet_inherit_port(&tcp_hashinfo, lsk, newsk);
1111 csk_set_flag(csk, CSK_CONN_INLINE);
1112 bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
1116 chtls_sock_release(&csk->kref);
1122 inet_csk_prepare_forced_close(newsk);
1125 chtls_reqsk_free(oreq);
1130 * Populate a TID_RELEASE WR. The skb must be already propely sized.
1132 static void mk_tid_release(struct sk_buff *skb,
1133 unsigned int chan, unsigned int tid)
1135 struct cpl_tid_release *req;
1138 len = roundup(sizeof(struct cpl_tid_release), 16);
1139 req = (struct cpl_tid_release *)__skb_put(skb, len);
1140 memset(req, 0, len);
1141 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1142 INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid);
1145 static int chtls_get_module(struct sock *sk)
1147 struct inet_connection_sock *icsk = inet_csk(sk);
1149 if (!try_module_get(icsk->icsk_ulp_ops->owner))
1155 static void chtls_pass_accept_request(struct sock *sk,
1156 struct sk_buff *skb)
1158 struct cpl_t5_pass_accept_rpl *rpl;
1159 struct cpl_pass_accept_req *req;
1160 struct listen_ctx *listen_ctx;
1161 struct request_sock *oreq;
1162 struct sk_buff *reply_skb;
1163 struct chtls_sock *csk;
1164 struct chtls_dev *cdev;
1165 struct tcphdr *tcph;
1174 req = cplhdr(skb) + RSS_HDR;
1176 cdev = BLOG_SKB_CB(skb)->cdev;
1177 newsk = lookup_tid(cdev->tids, tid);
1178 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1180 pr_info("tid (%d) already in use\n", tid);
1184 len = roundup(sizeof(*rpl), 16);
1185 reply_skb = alloc_skb(len, GFP_ATOMIC);
1187 cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family);
1192 if (sk->sk_state != TCP_LISTEN)
1195 if (inet_csk_reqsk_queue_is_full(sk))
1198 if (sk_acceptq_is_full(sk))
1201 oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
1205 oreq->rsk_rcv_wnd = 0;
1206 oreq->rsk_window_clamp = 0;
1207 oreq->cookie_ts = 0;
1209 oreq->ts_recent = 0;
1211 eh = (struct ethhdr *)(req + 1);
1212 iph = (struct iphdr *)(eh + 1);
1213 if (iph->version != 0x4)
1216 network_hdr = (void *)(eh + 1);
1217 tcph = (struct tcphdr *)(iph + 1);
1219 tcp_rsk(oreq)->tfo_listener = false;
1220 tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
1221 chtls_set_req_port(oreq, tcph->source, tcph->dest);
1222 inet_rsk(oreq)->ecn_ok = 0;
1223 chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
1224 if (req->tcpopt.wsf <= 14) {
1225 inet_rsk(oreq)->wscale_ok = 1;
1226 inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
1228 inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
1230 newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
1234 if (chtls_get_module(newsk))
1236 inet_csk_reqsk_queue_added(sk);
1237 reply_skb->sk = newsk;
1238 chtls_install_cpl_ops(newsk);
1239 cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family);
1240 csk = rcu_dereference_sk_user_data(newsk);
1241 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
1242 csk->listen_ctx = listen_ctx;
1243 __skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq);
1244 chtls_pass_accept_rpl(reply_skb, req, tid);
1249 chtls_reqsk_free(oreq);
1251 mk_tid_release(reply_skb, 0, tid);
1252 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1257 * Handle a CPL_PASS_ACCEPT_REQ message.
1259 static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
1261 struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
1262 struct listen_ctx *ctx;
1268 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1271 data = lookup_stid(cdev->tids, stid);
1275 ctx = (struct listen_ctx *)data;
1278 if (unlikely(tid >= cdev->tids->ntids)) {
1279 pr_info("passive open TID %u too large\n", tid);
1283 BLOG_SKB_CB(skb)->cdev = cdev;
1284 process_cpl_msg(chtls_pass_accept_request, lsk, skb);
1289 * Completes some final bits of initialization for just established connections
1290 * and changes their state to TCP_ESTABLISHED.
1292 * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
1294 static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
1296 struct tcp_sock *tp = tcp_sk(sk);
1298 tp->pushed_seq = snd_isn;
1299 tp->write_seq = snd_isn;
1300 tp->snd_nxt = snd_isn;
1301 tp->snd_una = snd_isn;
1302 inet_sk(sk)->inet_id = prandom_u32();
1303 assign_rxopt(sk, opt);
1305 if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
1306 tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10);
1309 tcp_set_state(sk, TCP_ESTABLISHED);
1312 static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
1314 struct sk_buff *abort_skb;
1316 abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
1318 chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb);
1321 static struct sock *reap_list;
1322 static DEFINE_SPINLOCK(reap_list_lock);
1325 * Process the reap list.
1327 DECLARE_TASK_FUNC(process_reap_list, task_param)
1329 spin_lock_bh(&reap_list_lock);
1331 struct sock *sk = reap_list;
1332 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1334 reap_list = csk->passive_reap_next;
1335 csk->passive_reap_next = NULL;
1336 spin_unlock(&reap_list_lock);
1340 chtls_abort_conn(sk, NULL);
1342 if (sk->sk_state == TCP_CLOSE)
1343 inet_csk_destroy_sock(sk);
1346 spin_lock(&reap_list_lock);
1348 spin_unlock_bh(&reap_list_lock);
1351 static DECLARE_WORK(reap_task, process_reap_list);
1353 static void add_to_reap_list(struct sock *sk)
1355 struct chtls_sock *csk = sk->sk_user_data;
1358 release_tcp_port(sk); /* release the port immediately */
1360 spin_lock(&reap_list_lock);
1361 csk->passive_reap_next = reap_list;
1363 if (!csk->passive_reap_next)
1364 schedule_work(&reap_task);
1365 spin_unlock(&reap_list_lock);
1369 static void add_pass_open_to_parent(struct sock *child, struct sock *lsk,
1370 struct chtls_dev *cdev)
1372 struct request_sock *oreq;
1373 struct chtls_sock *csk;
1375 if (lsk->sk_state != TCP_LISTEN)
1378 csk = child->sk_user_data;
1379 oreq = csk->passive_reap_next;
1380 csk->passive_reap_next = NULL;
1382 reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq);
1383 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
1385 if (sk_acceptq_is_full(lsk)) {
1386 chtls_reqsk_free(oreq);
1387 add_to_reap_list(child);
1389 refcount_set(&oreq->rsk_refcnt, 1);
1390 inet_csk_reqsk_queue_add(lsk, oreq, child);
1391 lsk->sk_data_ready(lsk);
1395 static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb)
1397 struct sock *child = skb->sk;
1400 add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev);
1404 static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
1406 struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
1407 struct chtls_sock *csk;
1408 struct sock *lsk, *sk;
1411 hwtid = GET_TID(req);
1412 sk = lookup_tid(cdev->tids, hwtid);
1414 return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE);
1417 if (unlikely(sock_owned_by_user(sk))) {
1423 csk = sk->sk_user_data;
1424 csk->wr_max_credits = 64;
1425 csk->wr_credits = 64;
1426 csk->wr_unacked = 0;
1427 make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
1428 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1429 sk->sk_state_change(sk);
1430 if (unlikely(sk->sk_socket))
1431 sk_wake_async(sk, 0, POLL_OUT);
1433 data = lookup_stid(cdev->tids, stid);
1435 /* listening server close */
1439 lsk = ((struct listen_ctx *)data)->lsk;
1442 if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) {
1443 /* removed from synq */
1444 bh_unlock_sock(lsk);
1449 if (likely(!sock_owned_by_user(lsk))) {
1451 add_pass_open_to_parent(sk, lsk, cdev);
1454 BLOG_SKB_CB(skb)->cdev = cdev;
1455 BLOG_SKB_CB(skb)->backlog_rcv =
1456 bl_add_pass_open_to_parent;
1457 __sk_add_backlog(lsk, skb);
1459 bh_unlock_sock(lsk);
1467 * Handle receipt of an urgent pointer.
1469 static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
1471 struct tcp_sock *tp = tcp_sk(sk);
1474 if (tp->urg_data && !after(urg_seq, tp->urg_seq))
1475 return; /* duplicate pointer */
1478 if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
1479 !sock_flag(sk, SOCK_URGINLINE) &&
1480 tp->copied_seq != tp->rcv_nxt) {
1481 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1484 if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len)
1485 chtls_free_skb(sk, skb);
1488 tp->urg_data = TCP_URG_NOTYET;
1489 tp->urg_seq = urg_seq;
1492 static void check_sk_callbacks(struct chtls_sock *csk)
1494 struct sock *sk = csk->sk;
1496 if (unlikely(sk->sk_user_data &&
1497 !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD)))
1498 csk_set_flag(csk, CSK_CALLBACKS_CHKD);
1502 * Handles Rx data that arrives in a state where the socket isn't accepting
1505 static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
1507 if (!csk_flag(sk, CSK_ABORT_SHUTDOWN))
1508 chtls_abort_conn(sk, skb);
1513 static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
1515 struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR;
1516 struct chtls_sock *csk;
1517 struct tcp_sock *tp;
1519 csk = rcu_dereference_sk_user_data(sk);
1522 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1523 handle_excess_rx(sk, skb);
1527 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1528 ULP_SKB_CB(skb)->psh = hdr->psh;
1529 skb_ulp_mode(skb) = ULP_MODE_NONE;
1531 skb_reset_transport_header(skb);
1532 __skb_pull(skb, sizeof(*hdr) + RSS_HDR);
1534 __skb_trim(skb, ntohs(hdr->len));
1536 if (unlikely(hdr->urg))
1537 handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
1538 if (unlikely(tp->urg_data == TCP_URG_NOTYET &&
1539 tp->urg_seq - tp->rcv_nxt < skb->len))
1540 tp->urg_data = TCP_URG_VALID |
1541 skb->data[tp->urg_seq - tp->rcv_nxt];
1543 if (unlikely(hdr->dack_mode != csk->delack_mode)) {
1544 csk->delack_mode = hdr->dack_mode;
1545 csk->delack_seq = tp->rcv_nxt;
1548 tcp_hdr(skb)->fin = 0;
1549 tp->rcv_nxt += skb->len;
1551 __skb_queue_tail(&sk->sk_receive_queue, skb);
1553 if (!sock_flag(sk, SOCK_DEAD)) {
1554 check_sk_callbacks(csk);
1555 sk->sk_data_ready(sk);
1559 static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
1561 struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
1562 unsigned int hwtid = GET_TID(req);
1565 sk = lookup_tid(cdev->tids, hwtid);
1566 if (unlikely(!sk)) {
1567 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1570 skb_dst_set(skb, NULL);
1571 process_cpl_msg(chtls_recv_data, sk, skb);
1575 static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
1577 struct cpl_tls_data *hdr = cplhdr(skb);
1578 struct chtls_sock *csk;
1579 struct chtls_hws *tlsk;
1580 struct tcp_sock *tp;
1582 csk = rcu_dereference_sk_user_data(sk);
1583 tlsk = &csk->tlshws;
1586 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1587 handle_excess_rx(sk, skb);
1591 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1592 ULP_SKB_CB(skb)->flags = 0;
1593 skb_ulp_mode(skb) = ULP_MODE_TLS;
1595 skb_reset_transport_header(skb);
1596 __skb_pull(skb, sizeof(*hdr));
1599 CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)));
1601 if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq -
1602 tp->rcv_nxt < skb->len))
1603 tp->urg_data = TCP_URG_VALID |
1604 skb->data[tp->urg_seq - tp->rcv_nxt];
1606 tcp_hdr(skb)->fin = 0;
1607 tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd));
1608 __skb_queue_tail(&tlsk->sk_recv_queue, skb);
1611 static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
1613 struct cpl_tls_data *req = cplhdr(skb);
1614 unsigned int hwtid = GET_TID(req);
1617 sk = lookup_tid(cdev->tids, hwtid);
1618 if (unlikely(!sk)) {
1619 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1622 skb_dst_set(skb, NULL);
1623 process_cpl_msg(chtls_recv_pdu, sk, skb);
1627 static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
1629 struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb);
1631 skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length);
1632 tls_cmp_hdr->length = ntohs((__force __be16)nlen);
1635 static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
1637 struct tlsrx_cmp_hdr *tls_hdr_pkt;
1638 struct cpl_rx_tls_cmp *cmp_cpl;
1639 struct sk_buff *skb_rec;
1640 struct chtls_sock *csk;
1641 struct chtls_hws *tlsk;
1642 struct tcp_sock *tp;
1644 cmp_cpl = cplhdr(skb);
1645 csk = rcu_dereference_sk_user_data(sk);
1646 tlsk = &csk->tlshws;
1649 ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq);
1650 ULP_SKB_CB(skb)->flags = 0;
1652 skb_reset_transport_header(skb);
1653 __skb_pull(skb, sizeof(*cmp_cpl));
1654 tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
1655 if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
1656 tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1658 __skb_trim(skb, TLS_HEADER_LENGTH);
1661 CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
1663 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
1664 skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
1666 __skb_queue_tail(&sk->sk_receive_queue, skb);
1668 chtls_set_hdrlen(skb, tlsk->pldlen);
1670 __skb_queue_tail(&sk->sk_receive_queue, skb);
1671 __skb_queue_tail(&sk->sk_receive_queue, skb_rec);
1674 if (!sock_flag(sk, SOCK_DEAD)) {
1675 check_sk_callbacks(csk);
1676 sk->sk_data_ready(sk);
1680 static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
1682 struct cpl_rx_tls_cmp *req = cplhdr(skb);
1683 unsigned int hwtid = GET_TID(req);
1686 sk = lookup_tid(cdev->tids, hwtid);
1687 if (unlikely(!sk)) {
1688 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1691 skb_dst_set(skb, NULL);
1692 process_cpl_msg(chtls_rx_hdr, sk, skb);
1697 static void chtls_timewait(struct sock *sk)
1699 struct tcp_sock *tp = tcp_sk(sk);
1702 tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
1704 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
1707 static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
1709 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1711 sk->sk_shutdown |= RCV_SHUTDOWN;
1712 sock_set_flag(sk, SOCK_DONE);
1714 switch (sk->sk_state) {
1716 case TCP_ESTABLISHED:
1717 tcp_set_state(sk, TCP_CLOSE_WAIT);
1720 tcp_set_state(sk, TCP_CLOSING);
1723 chtls_release_resources(sk);
1724 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1725 chtls_conn_done(sk);
1730 pr_info("cpl_peer_close in bad state %d\n", sk->sk_state);
1733 if (!sock_flag(sk, SOCK_DEAD)) {
1734 sk->sk_state_change(sk);
1735 /* Do not send POLL_HUP for half duplex close. */
1737 if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1738 sk->sk_state == TCP_CLOSE)
1739 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
1741 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1746 static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
1748 struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR;
1749 struct chtls_sock *csk;
1750 struct tcp_sock *tp;
1752 csk = rcu_dereference_sk_user_data(sk);
1755 tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
1757 switch (sk->sk_state) {
1759 chtls_release_resources(sk);
1760 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1761 chtls_conn_done(sk);
1766 chtls_release_resources(sk);
1767 chtls_conn_done(sk);
1770 tcp_set_state(sk, TCP_FIN_WAIT2);
1771 sk->sk_shutdown |= SEND_SHUTDOWN;
1773 if (!sock_flag(sk, SOCK_DEAD))
1774 sk->sk_state_change(sk);
1775 else if (tcp_sk(sk)->linger2 < 0 &&
1776 !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
1777 chtls_abort_conn(sk, skb);
1780 pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
1785 static struct sk_buff *get_cpl_skb(struct sk_buff *skb,
1786 size_t len, gfp_t gfp)
1788 if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) {
1789 WARN_ONCE(skb->len < len, "skb alloc error");
1790 __skb_trim(skb, len);
1793 skb = alloc_skb(len, gfp);
1795 __skb_put(skb, len);
1800 static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
1803 struct cpl_abort_rpl *rpl = cplhdr(skb);
1805 INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid);
1809 static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
1811 struct cpl_abort_req_rss *req = cplhdr(skb);
1812 struct sk_buff *reply_skb;
1814 reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
1815 GFP_KERNEL | __GFP_NOFAIL);
1816 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
1817 set_abort_rpl_wr(reply_skb, GET_TID(req),
1818 (req->status & CPL_ABORT_NO_RST));
1819 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1);
1820 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1825 * Add an skb to the deferred skb queue for processing from process context.
1827 static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
1828 defer_handler_t handler)
1830 DEFERRED_SKB_CB(skb)->handler = handler;
1831 spin_lock_bh(&cdev->deferq.lock);
1832 __skb_queue_tail(&cdev->deferq, skb);
1833 if (skb_queue_len(&cdev->deferq) == 1)
1834 schedule_work(&cdev->deferq_task);
1835 spin_unlock_bh(&cdev->deferq.lock);
1838 static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
1839 struct chtls_dev *cdev,
1840 int status, int queue)
1842 struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
1843 struct sk_buff *reply_skb;
1844 struct chtls_sock *csk;
1847 csk = rcu_dereference_sk_user_data(sk);
1850 reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any());
1852 req->status = (queue << 1) | status;
1853 t4_defer_reply(skb, cdev, send_defer_abort_rpl);
1857 set_abort_rpl_wr(reply_skb, tid, status);
1858 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
1859 if (csk_conn_inline(csk)) {
1860 struct l2t_entry *e = csk->l2t_entry;
1862 if (e && sk->sk_state != TCP_SYN_RECV) {
1863 cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
1867 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1872 * This is run from a listener's backlog to abort a child connection in
1873 * SYN_RCV state (i.e., one on the listener's SYN queue).
1875 static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
1877 struct chtls_sock *csk;
1882 csk = rcu_dereference_sk_user_data(child);
1883 queue = csk->txq_idx;
1886 chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
1887 CPL_ABORT_NO_RST, queue);
1888 do_abort_syn_rcv(child, lsk);
1891 static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
1893 const struct request_sock *oreq;
1894 struct listen_ctx *listen_ctx;
1895 struct chtls_sock *csk;
1896 struct chtls_dev *cdev;
1900 csk = sk->sk_user_data;
1901 oreq = csk->passive_reap_next;
1907 ctx = lookup_stid(cdev->tids, oreq->ts_recent);
1911 listen_ctx = (struct listen_ctx *)ctx;
1912 psk = listen_ctx->lsk;
1915 if (!sock_owned_by_user(psk)) {
1916 int queue = csk->txq_idx;
1918 chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
1919 do_abort_syn_rcv(sk, psk);
1922 BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
1923 __sk_add_backlog(psk, skb);
1925 bh_unlock_sock(psk);
1929 static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
1931 const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
1932 struct chtls_sock *csk = sk->sk_user_data;
1933 int rst_status = CPL_ABORT_NO_RST;
1934 int queue = csk->txq_idx;
1936 if (is_neg_adv(req->status)) {
1941 csk_reset_flag(csk, CSK_ABORT_REQ_RCVD);
1943 if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) &&
1944 !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
1945 struct tcp_sock *tp = tcp_sk(sk);
1947 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
1948 WARN_ONCE(1, "send_tx_flowc error");
1949 csk_set_flag(csk, CSK_TX_DATA_SENT);
1952 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
1954 if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
1955 sk->sk_err = ETIMEDOUT;
1957 if (!sock_flag(sk, SOCK_DEAD))
1958 sk->sk_error_report(sk);
1960 if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
1964 chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
1965 chtls_release_resources(sk);
1966 chtls_conn_done(sk);
1969 static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
1971 struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR;
1972 struct chtls_sock *csk;
1973 struct chtls_dev *cdev;
1975 csk = rcu_dereference_sk_user_data(sk);
1978 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
1979 csk_reset_flag(csk, CSK_ABORT_RPL_PENDING);
1980 if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) {
1981 if (sk->sk_state == TCP_SYN_SENT) {
1982 cxgb4_remove_tid(cdev->tids,
1988 chtls_release_resources(sk);
1989 chtls_conn_done(sk);
1995 static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
1997 struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
1998 void (*fn)(struct sock *sk, struct sk_buff *skb);
1999 unsigned int hwtid = GET_TID(req);
2003 opcode = ((const struct rss_header *)cplhdr(skb))->opcode;
2005 sk = lookup_tid(cdev->tids, hwtid);
2010 case CPL_PEER_CLOSE:
2011 fn = chtls_peer_close;
2013 case CPL_CLOSE_CON_RPL:
2014 fn = chtls_close_con_rpl;
2016 case CPL_ABORT_REQ_RSS:
2017 fn = chtls_abort_req_rss;
2019 case CPL_ABORT_RPL_RSS:
2020 fn = chtls_abort_rpl_rss;
2026 process_cpl_msg(fn, sk, skb);
2034 static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
2036 struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
2037 struct chtls_sock *csk = sk->sk_user_data;
2038 struct tcp_sock *tp = tcp_sk(sk);
2039 u32 credits = hdr->credits;
2042 snd_una = ntohl(hdr->snd_una);
2043 csk->wr_credits += credits;
2045 if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits)
2046 csk->wr_unacked = csk->wr_max_credits - csk->wr_credits;
2049 struct sk_buff *pskb = csk->wr_skb_head;
2052 if (unlikely(!pskb)) {
2053 if (csk->wr_nondata)
2054 csk->wr_nondata -= credits;
2057 csum = (__force u32)pskb->csum;
2058 if (unlikely(credits < csum)) {
2059 pskb->csum = (__force __wsum)(csum - credits);
2066 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
2067 if (unlikely(before(snd_una, tp->snd_una))) {
2072 if (tp->snd_una != snd_una) {
2073 tp->snd_una = snd_una;
2074 tp->rcv_tstamp = tcp_time_stamp(tp);
2075 if (tp->snd_una == tp->snd_nxt &&
2076 !csk_flag_nochk(csk, CSK_TX_FAILOVER))
2077 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2081 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) {
2082 unsigned int fclen16 = roundup(failover_flowc_wr_len, 16);
2084 csk->wr_credits -= fclen16;
2085 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2086 csk_reset_flag(csk, CSK_TX_FAILOVER);
2088 if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0))
2089 sk->sk_write_space(sk);
2094 static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
2096 struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR;
2097 unsigned int hwtid = GET_TID(rpl);
2100 sk = lookup_tid(cdev->tids, hwtid);
2101 if (unlikely(!sk)) {
2102 pr_err("can't find conn. for hwtid %u.\n", hwtid);
2105 process_cpl_msg(chtls_rx_ack, sk, skb);
2110 chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
2111 [CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
2112 [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
2113 [CPL_PASS_ACCEPT_REQ] = chtls_pass_accept_req,
2114 [CPL_PASS_ESTABLISH] = chtls_pass_establish,
2115 [CPL_RX_DATA] = chtls_rx_data,
2116 [CPL_TLS_DATA] = chtls_rx_pdu,
2117 [CPL_RX_TLS_CMP] = chtls_rx_cmp,
2118 [CPL_PEER_CLOSE] = chtls_conn_cpl,
2119 [CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
2120 [CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
2121 [CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
2122 [CPL_FW4_ACK] = chtls_wr_ack,