3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
86 const struct in6_addr *addr)
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
96 if (dst && dst_hold_safe(dst)) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
101 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
105 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
107 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
108 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->source);
113 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
116 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
117 struct inet_sock *inet = inet_sk(sk);
118 struct inet_connection_sock *icsk = inet_csk(sk);
119 struct ipv6_pinfo *np = inet6_sk(sk);
120 struct tcp_sock *tp = tcp_sk(sk);
121 struct in6_addr *saddr = NULL, *final_p, final;
122 struct ipv6_txoptions *opt;
124 struct dst_entry *dst;
128 if (addr_len < SIN6_LEN_RFC2133)
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
134 memset(&fl6, 0, sizeof(fl6));
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
144 fl6_sock_release(flowlabel);
149 * connect() to INADDR_ANY means loopback (BSD'ism).
152 if (ipv6_addr_any(&usin->sin6_addr)) {
153 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
154 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
157 usin->sin6_addr = in6addr_loopback;
160 addr_type = ipv6_addr_type(&usin->sin6_addr);
162 if (addr_type & IPV6_ADDR_MULTICAST)
165 if (addr_type&IPV6_ADDR_LINKLOCAL) {
166 if (addr_len >= sizeof(struct sockaddr_in6) &&
167 usin->sin6_scope_id) {
168 /* If interface is set while binding, indices
171 if (sk->sk_bound_dev_if &&
172 sk->sk_bound_dev_if != usin->sin6_scope_id)
175 sk->sk_bound_dev_if = usin->sin6_scope_id;
178 /* Connect to link-local address requires an interface */
179 if (!sk->sk_bound_dev_if)
183 if (tp->rx_opt.ts_recent_stamp &&
184 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
185 tp->rx_opt.ts_recent = 0;
186 tp->rx_opt.ts_recent_stamp = 0;
190 sk->sk_v6_daddr = usin->sin6_addr;
191 np->flow_label = fl6.flowlabel;
197 if (addr_type & IPV6_ADDR_MAPPED) {
198 u32 exthdrlen = icsk->icsk_ext_hdr_len;
199 struct sockaddr_in sin;
201 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
203 if (__ipv6_only_sock(sk))
206 sin.sin_family = AF_INET;
207 sin.sin_port = usin->sin6_port;
208 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
210 icsk->icsk_af_ops = &ipv6_mapped;
211 sk->sk_backlog_rcv = tcp_v4_do_rcv;
212 #ifdef CONFIG_TCP_MD5SIG
213 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
216 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
219 icsk->icsk_ext_hdr_len = exthdrlen;
220 icsk->icsk_af_ops = &ipv6_specific;
221 sk->sk_backlog_rcv = tcp_v6_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223 tp->af_specific = &tcp_sock_ipv6_specific;
227 np->saddr = sk->sk_v6_rcv_saddr;
232 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
233 saddr = &sk->sk_v6_rcv_saddr;
235 fl6.flowi6_proto = IPPROTO_TCP;
236 fl6.daddr = sk->sk_v6_daddr;
237 fl6.saddr = saddr ? *saddr : np->saddr;
238 fl6.flowi6_oif = sk->sk_bound_dev_if;
239 fl6.flowi6_mark = sk->sk_mark;
240 fl6.fl6_dport = usin->sin6_port;
241 fl6.fl6_sport = inet->inet_sport;
243 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
244 final_p = fl6_update_dst(&fl6, opt, &final);
246 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
248 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
256 sk->sk_v6_rcv_saddr = *saddr;
259 /* set the source address */
261 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
263 sk->sk_gso_type = SKB_GSO_TCPV6;
264 ip6_dst_store(sk, dst, NULL, NULL);
266 if (tcp_death_row.sysctl_tw_recycle &&
267 !tp->rx_opt.ts_recent_stamp &&
268 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
269 tcp_fetch_timewait_stamp(sk, dst);
271 icsk->icsk_ext_hdr_len = 0;
273 icsk->icsk_ext_hdr_len = opt->opt_flen +
276 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
278 inet->inet_dport = usin->sin6_port;
280 tcp_set_state(sk, TCP_SYN_SENT);
281 err = inet6_hash_connect(&tcp_death_row, sk);
287 if (!tp->write_seq && likely(!tp->repair))
288 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
289 sk->sk_v6_daddr.s6_addr32,
293 err = tcp_connect(sk);
300 tcp_set_state(sk, TCP_CLOSE);
303 inet->inet_dport = 0;
304 sk->sk_route_caps = 0;
308 static void tcp_v6_mtu_reduced(struct sock *sk)
310 struct dst_entry *dst;
313 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
316 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
318 /* Drop requests trying to increase our current mss.
319 * Check done in __ip6_rt_update_pmtu() is too late.
321 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
324 dst = inet6_csk_update_pmtu(sk, mtu);
328 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
329 tcp_sync_mss(sk, dst_mtu(dst));
330 tcp_simple_retransmit(sk);
334 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
335 u8 type, u8 code, int offset, __be32 info)
337 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
338 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
339 struct net *net = dev_net(skb->dev);
340 struct request_sock *fastopen;
341 struct ipv6_pinfo *np;
348 sk = __inet6_lookup_established(net, &tcp_hashinfo,
349 &hdr->daddr, th->dest,
350 &hdr->saddr, ntohs(th->source),
354 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
359 if (sk->sk_state == TCP_TIME_WAIT) {
360 inet_twsk_put(inet_twsk(sk));
363 seq = ntohl(th->seq);
364 fatal = icmpv6_err_convert(type, code, &err);
365 if (sk->sk_state == TCP_NEW_SYN_RECV)
366 return tcp_req_err(sk, seq, fatal);
369 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
370 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
372 if (sk->sk_state == TCP_CLOSE)
375 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
376 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
381 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
382 fastopen = tp->fastopen_rsk;
383 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
384 if (sk->sk_state != TCP_LISTEN &&
385 !between(seq, snd_una, tp->snd_nxt)) {
386 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
392 if (type == NDISC_REDIRECT) {
393 if (!sock_owned_by_user(sk)) {
394 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
397 dst->ops->redirect(dst, sk, skb);
402 if (type == ICMPV6_PKT_TOOBIG) {
403 u32 mtu = ntohl(info);
405 /* We are not interested in TCP_LISTEN and open_requests
406 * (SYN-ACKs send out by Linux are always <576bytes so
407 * they should go through unfragmented).
409 if (sk->sk_state == TCP_LISTEN)
412 if (!ip6_sk_accept_pmtu(sk))
415 if (mtu < IPV6_MIN_MTU)
418 WRITE_ONCE(tp->mtu_info, mtu);
420 if (!sock_owned_by_user(sk))
421 tcp_v6_mtu_reduced(sk);
422 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
429 /* Might be for an request_sock */
430 switch (sk->sk_state) {
433 /* Only in fast or simultaneous open. If a fast open socket is
434 * is already accepted it is treated as a connected one below.
436 if (fastopen && !fastopen->sk)
439 if (!sock_owned_by_user(sk)) {
441 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
445 sk->sk_err_soft = err;
449 if (!sock_owned_by_user(sk) && np->recverr) {
451 sk->sk_error_report(sk);
453 sk->sk_err_soft = err;
461 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
463 struct request_sock *req,
464 struct tcp_fastopen_cookie *foc,
467 struct inet_request_sock *ireq = inet_rsk(req);
468 struct ipv6_pinfo *np = inet6_sk(sk);
469 struct flowi6 *fl6 = &fl->u.ip6;
473 /* First, grab a route. */
474 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
475 IPPROTO_TCP)) == NULL)
478 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
481 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
482 &ireq->ir_v6_rmt_addr);
484 fl6->daddr = ireq->ir_v6_rmt_addr;
485 if (np->repflow && ireq->pktopts)
486 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
489 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
492 err = net_xmit_eval(err);
500 static void tcp_v6_reqsk_destructor(struct request_sock *req)
502 kfree_skb(inet_rsk(req)->pktopts);
505 #ifdef CONFIG_TCP_MD5SIG
506 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
507 const struct in6_addr *addr)
509 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
512 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
513 const struct sock *addr_sk)
515 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
518 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
521 struct tcp_md5sig cmd;
522 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
524 if (optlen < sizeof(cmd))
527 if (copy_from_user(&cmd, optval, sizeof(cmd)))
530 if (sin6->sin6_family != AF_INET6)
533 if (!cmd.tcpm_keylen) {
534 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
535 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
537 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
541 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
544 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
545 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
546 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
548 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
549 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
552 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
553 const struct in6_addr *daddr,
554 const struct in6_addr *saddr, int nbytes)
556 struct tcp6_pseudohdr *bp;
557 struct scatterlist sg;
559 bp = &hp->md5_blk.ip6;
560 /* 1. TCP pseudo-header (RFC2460) */
563 bp->protocol = cpu_to_be32(IPPROTO_TCP);
564 bp->len = cpu_to_be32(nbytes);
566 sg_init_one(&sg, bp, sizeof(*bp));
567 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
570 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
571 const struct in6_addr *daddr, struct in6_addr *saddr,
572 const struct tcphdr *th)
574 struct tcp_md5sig_pool *hp;
575 struct hash_desc *desc;
577 hp = tcp_get_md5sig_pool();
579 goto clear_hash_noput;
580 desc = &hp->md5_desc;
582 if (crypto_hash_init(desc))
584 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
586 if (tcp_md5_hash_header(hp, th))
588 if (tcp_md5_hash_key(hp, key))
590 if (crypto_hash_final(desc, md5_hash))
593 tcp_put_md5sig_pool();
597 tcp_put_md5sig_pool();
599 memset(md5_hash, 0, 16);
603 static int tcp_v6_md5_hash_skb(char *md5_hash,
604 const struct tcp_md5sig_key *key,
605 const struct sock *sk,
606 const struct sk_buff *skb)
608 const struct in6_addr *saddr, *daddr;
609 struct tcp_md5sig_pool *hp;
610 struct hash_desc *desc;
611 const struct tcphdr *th = tcp_hdr(skb);
613 if (sk) { /* valid for establish/request sockets */
614 saddr = &sk->sk_v6_rcv_saddr;
615 daddr = &sk->sk_v6_daddr;
617 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
618 saddr = &ip6h->saddr;
619 daddr = &ip6h->daddr;
622 hp = tcp_get_md5sig_pool();
624 goto clear_hash_noput;
625 desc = &hp->md5_desc;
627 if (crypto_hash_init(desc))
630 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
632 if (tcp_md5_hash_header(hp, th))
634 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
636 if (tcp_md5_hash_key(hp, key))
638 if (crypto_hash_final(desc, md5_hash))
641 tcp_put_md5sig_pool();
645 tcp_put_md5sig_pool();
647 memset(md5_hash, 0, 16);
653 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
654 const struct sk_buff *skb)
656 #ifdef CONFIG_TCP_MD5SIG
657 const __u8 *hash_location = NULL;
658 struct tcp_md5sig_key *hash_expected;
659 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
660 const struct tcphdr *th = tcp_hdr(skb);
664 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
665 hash_location = tcp_parse_md5sig_option(th);
667 /* We've parsed the options - do we have a hash? */
668 if (!hash_expected && !hash_location)
671 if (hash_expected && !hash_location) {
672 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
676 if (!hash_expected && hash_location) {
677 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
681 /* check the signature */
682 genhash = tcp_v6_md5_hash_skb(newhash,
686 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
687 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
688 genhash ? "failed" : "mismatch",
689 &ip6h->saddr, ntohs(th->source),
690 &ip6h->daddr, ntohs(th->dest));
697 static void tcp_v6_init_req(struct request_sock *req,
698 const struct sock *sk_listener,
701 struct inet_request_sock *ireq = inet_rsk(req);
702 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
704 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
705 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
707 /* So that link locals have meaning */
708 if (!sk_listener->sk_bound_dev_if &&
709 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
710 ireq->ir_iif = tcp_v6_iif(skb);
712 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
713 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
714 np->rxopt.bits.rxinfo ||
715 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
716 np->rxopt.bits.rxohlim || np->repflow)) {
717 atomic_inc(&skb->users);
722 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
724 const struct request_sock *req,
729 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
732 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
734 .obj_size = sizeof(struct tcp6_request_sock),
735 .rtx_syn_ack = tcp_rtx_synack,
736 .send_ack = tcp_v6_reqsk_send_ack,
737 .destructor = tcp_v6_reqsk_destructor,
738 .send_reset = tcp_v6_send_reset,
739 .syn_ack_timeout = tcp_syn_ack_timeout,
742 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
743 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
744 sizeof(struct ipv6hdr),
745 #ifdef CONFIG_TCP_MD5SIG
746 .req_md5_lookup = tcp_v6_md5_lookup,
747 .calc_md5_hash = tcp_v6_md5_hash_skb,
749 .init_req = tcp_v6_init_req,
750 #ifdef CONFIG_SYN_COOKIES
751 .cookie_init_seq = cookie_v6_init_sequence,
753 .route_req = tcp_v6_route_req,
754 .init_seq = tcp_v6_init_sequence,
755 .send_synack = tcp_v6_send_synack,
758 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
759 u32 ack, u32 win, u32 tsval, u32 tsecr,
760 int oif, struct tcp_md5sig_key *key, int rst,
761 u8 tclass, u32 label)
763 const struct tcphdr *th = tcp_hdr(skb);
765 struct sk_buff *buff;
767 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
768 struct sock *ctl_sk = net->ipv6.tcp_sk;
769 unsigned int tot_len = sizeof(struct tcphdr);
770 struct dst_entry *dst;
774 tot_len += TCPOLEN_TSTAMP_ALIGNED;
775 #ifdef CONFIG_TCP_MD5SIG
777 tot_len += TCPOLEN_MD5SIG_ALIGNED;
780 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
785 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
787 t1 = (struct tcphdr *) skb_push(buff, tot_len);
788 skb_reset_transport_header(buff);
790 /* Swap the send and the receive. */
791 memset(t1, 0, sizeof(*t1));
792 t1->dest = th->source;
793 t1->source = th->dest;
794 t1->doff = tot_len / 4;
795 t1->seq = htonl(seq);
796 t1->ack_seq = htonl(ack);
797 t1->ack = !rst || !th->ack;
799 t1->window = htons(win);
801 topt = (__be32 *)(t1 + 1);
804 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
805 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
806 *topt++ = htonl(tsval);
807 *topt++ = htonl(tsecr);
810 #ifdef CONFIG_TCP_MD5SIG
812 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
813 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
814 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
815 &ipv6_hdr(skb)->saddr,
816 &ipv6_hdr(skb)->daddr, t1);
820 memset(&fl6, 0, sizeof(fl6));
821 fl6.daddr = ipv6_hdr(skb)->saddr;
822 fl6.saddr = ipv6_hdr(skb)->daddr;
823 fl6.flowlabel = label;
825 buff->ip_summed = CHECKSUM_PARTIAL;
828 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
830 fl6.flowi6_proto = IPPROTO_TCP;
831 if (rt6_need_strict(&fl6.daddr) && !oif)
832 fl6.flowi6_oif = tcp_v6_iif(skb);
834 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
837 fl6.flowi6_oif = oif;
840 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
841 fl6.fl6_dport = t1->dest;
842 fl6.fl6_sport = t1->source;
843 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
845 /* Pass a socket to ip6_dst_lookup either it is for RST
846 * Underlying function will use this to retrieve the network
849 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
851 skb_dst_set(buff, dst);
852 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
853 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
855 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
862 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
864 const struct tcphdr *th = tcp_hdr(skb);
865 u32 seq = 0, ack_seq = 0;
866 struct tcp_md5sig_key *key = NULL;
867 #ifdef CONFIG_TCP_MD5SIG
868 const __u8 *hash_location = NULL;
869 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
870 unsigned char newhash[16];
872 struct sock *sk1 = NULL;
879 /* If sk not NULL, it means we did a successful lookup and incoming
880 * route had to be correct. prequeue might have dropped our dst.
882 if (!sk && !ipv6_unicast_destination(skb))
885 #ifdef CONFIG_TCP_MD5SIG
886 hash_location = tcp_parse_md5sig_option(th);
887 if (!sk && hash_location) {
889 * active side is lost. Try to find listening socket through
890 * source port, and then find md5 key through listening socket.
891 * we are not loose security here:
892 * Incoming packet is checked with md5 hash with finding key,
893 * no RST generated if md5 hash doesn't match.
895 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
896 &tcp_hashinfo, &ipv6h->saddr,
897 th->source, &ipv6h->daddr,
898 ntohs(th->source), tcp_v6_iif(skb));
903 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
907 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
908 if (genhash || memcmp(hash_location, newhash, 16) != 0)
911 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
916 seq = ntohl(th->ack_seq);
918 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
921 oif = sk ? sk->sk_bound_dev_if : 0;
922 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
924 #ifdef CONFIG_TCP_MD5SIG
933 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
934 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
935 struct tcp_md5sig_key *key, u8 tclass,
938 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
942 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
944 struct inet_timewait_sock *tw = inet_twsk(sk);
945 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
947 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
948 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
949 tcp_time_stamp + tcptw->tw_ts_offset,
950 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
951 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
956 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
957 struct request_sock *req)
959 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
960 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
963 * The window field (SEG.WND) of every outgoing segment, with the
964 * exception of <SYN> segments, MUST be right-shifted by
965 * Rcv.Wind.Shift bits:
967 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
968 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
969 tcp_rsk(req)->rcv_nxt,
970 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
971 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
972 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
977 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
979 #ifdef CONFIG_SYN_COOKIES
980 const struct tcphdr *th = tcp_hdr(skb);
983 sk = cookie_v6_check(sk, skb);
988 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
990 if (skb->protocol == htons(ETH_P_IP))
991 return tcp_v4_conn_request(sk, skb);
993 if (!ipv6_unicast_destination(skb))
996 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
997 IP6_INC_STATS_BH(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1001 return tcp_conn_request(&tcp6_request_sock_ops,
1002 &tcp_request_sock_ipv6_ops, sk, skb);
1005 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1006 return 0; /* don't send reset */
1009 static void tcp_v6_restore_cb(struct sk_buff *skb)
1011 /* We need to move header back to the beginning if xfrm6_policy_check()
1012 * and tcp_v6_fill_cb() are going to be called again.
1013 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1015 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1016 sizeof(struct inet6_skb_parm));
1019 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1020 struct request_sock *req,
1021 struct dst_entry *dst,
1022 struct request_sock *req_unhash,
1025 struct inet_request_sock *ireq;
1026 struct ipv6_pinfo *newnp;
1027 const struct ipv6_pinfo *np = inet6_sk(sk);
1028 struct ipv6_txoptions *opt;
1029 struct tcp6_sock *newtcp6sk;
1030 struct inet_sock *newinet;
1031 struct tcp_sock *newtp;
1033 #ifdef CONFIG_TCP_MD5SIG
1034 struct tcp_md5sig_key *key;
1038 if (skb->protocol == htons(ETH_P_IP)) {
1043 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1044 req_unhash, own_req);
1049 newtcp6sk = (struct tcp6_sock *)newsk;
1050 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1052 newinet = inet_sk(newsk);
1053 newnp = inet6_sk(newsk);
1054 newtp = tcp_sk(newsk);
1056 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1058 newnp->saddr = newsk->sk_v6_rcv_saddr;
1060 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1061 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1062 #ifdef CONFIG_TCP_MD5SIG
1063 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1066 newnp->ipv6_mc_list = NULL;
1067 newnp->ipv6_ac_list = NULL;
1068 newnp->ipv6_fl_list = NULL;
1069 newnp->pktoptions = NULL;
1071 newnp->mcast_oif = inet_iif(skb);
1072 newnp->mcast_hops = ip_hdr(skb)->ttl;
1073 newnp->rcv_flowinfo = 0;
1075 newnp->flow_label = 0;
1078 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1079 * here, tcp_create_openreq_child now does this for us, see the comment in
1080 * that function for the gory details. -acme
1083 /* It is tricky place. Until this moment IPv4 tcp
1084 worked with IPv6 icsk.icsk_af_ops.
1087 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1092 ireq = inet_rsk(req);
1094 if (sk_acceptq_is_full(sk))
1098 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1103 newsk = tcp_create_openreq_child(sk, req, skb);
1108 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1109 * count here, tcp_create_openreq_child now does this for us, see the
1110 * comment in that function for the gory details. -acme
1113 newsk->sk_gso_type = SKB_GSO_TCPV6;
1114 ip6_dst_store(newsk, dst, NULL, NULL);
1115 inet6_sk_rx_dst_set(newsk, skb);
1117 newtcp6sk = (struct tcp6_sock *)newsk;
1118 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1120 newtp = tcp_sk(newsk);
1121 newinet = inet_sk(newsk);
1122 newnp = inet6_sk(newsk);
1124 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1126 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1127 newnp->saddr = ireq->ir_v6_loc_addr;
1128 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1129 newsk->sk_bound_dev_if = ireq->ir_iif;
1131 /* Now IPv6 options...
1133 First: no IPv4 options.
1135 newinet->inet_opt = NULL;
1136 newnp->ipv6_mc_list = NULL;
1137 newnp->ipv6_ac_list = NULL;
1138 newnp->ipv6_fl_list = NULL;
1141 newnp->rxopt.all = np->rxopt.all;
1143 newnp->pktoptions = NULL;
1145 newnp->mcast_oif = tcp_v6_iif(skb);
1146 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1147 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1149 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1151 /* Clone native IPv6 options from listening socket (if any)
1153 Yes, keeping reference count would be much more clever,
1154 but we make one more one thing there: reattach optmem
1157 opt = rcu_dereference(np->opt);
1159 opt = ipv6_dup_options(newsk, opt);
1160 RCU_INIT_POINTER(newnp->opt, opt);
1162 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1164 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1167 tcp_ca_openreq_child(newsk, dst);
1169 tcp_sync_mss(newsk, dst_mtu(dst));
1170 newtp->advmss = dst_metric_advmss(dst);
1171 if (tcp_sk(sk)->rx_opt.user_mss &&
1172 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1173 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1175 tcp_initialize_rcv_mss(newsk);
1177 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1178 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1180 #ifdef CONFIG_TCP_MD5SIG
1181 /* Copy over the MD5 key from the original socket */
1182 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1184 /* We're using one, so create a matching key
1185 * on the newsk structure. If we fail to get
1186 * memory, then we end up not copying the key
1189 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1190 AF_INET6, key->key, key->keylen,
1191 sk_gfp_atomic(sk, GFP_ATOMIC));
1195 if (__inet_inherit_port(sk, newsk) < 0) {
1196 inet_csk_prepare_forced_close(newsk);
1200 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1202 tcp_move_syn(newtp, req);
1204 /* Clone pktoptions received with SYN, if we own the req */
1205 if (ireq->pktopts) {
1206 newnp->pktoptions = skb_clone(ireq->pktopts,
1207 sk_gfp_atomic(sk, GFP_ATOMIC));
1208 consume_skb(ireq->pktopts);
1209 ireq->pktopts = NULL;
1210 if (newnp->pktoptions) {
1211 tcp_v6_restore_cb(newnp->pktoptions);
1212 skb_set_owner_r(newnp->pktoptions, newsk);
1220 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1224 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1228 /* The socket must have it's spinlock held when we get
1229 * here, unless it is a TCP_LISTEN socket.
1231 * We have a potential double-lock case here, so even when
1232 * doing backlog processing we use the BH locking scheme.
1233 * This is because we cannot sleep with the original spinlock
1236 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1238 struct ipv6_pinfo *np = inet6_sk(sk);
1239 struct tcp_sock *tp;
1240 struct sk_buff *opt_skb = NULL;
1242 /* Imagine: socket is IPv6. IPv4 packet arrives,
1243 goes to IPv4 receive handler and backlogged.
1244 From backlog it always goes here. Kerboom...
1245 Fortunately, tcp_rcv_established and rcv_established
1246 handle them correctly, but it is not case with
1247 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1250 if (skb->protocol == htons(ETH_P_IP))
1251 return tcp_v4_do_rcv(sk, skb);
1253 if (tcp_filter(sk, skb))
1257 * socket locking is here for SMP purposes as backlog rcv
1258 * is currently called with bh processing disabled.
1261 /* Do Stevens' IPV6_PKTOPTIONS.
1263 Yes, guys, it is the only place in our code, where we
1264 may make it not affecting IPv4.
1265 The rest of code is protocol independent,
1266 and I do not like idea to uglify IPv4.
1268 Actually, all the idea behind IPV6_PKTOPTIONS
1269 looks not very well thought. For now we latch
1270 options, received in the last packet, enqueued
1271 by tcp. Feel free to propose better solution.
1275 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1277 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1278 struct dst_entry *dst = sk->sk_rx_dst;
1280 sock_rps_save_rxhash(sk, skb);
1281 sk_mark_napi_id(sk, skb);
1283 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1284 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1286 sk->sk_rx_dst = NULL;
1290 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1292 goto ipv6_pktoptions;
1296 if (tcp_checksum_complete(skb))
1299 if (sk->sk_state == TCP_LISTEN) {
1300 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1306 sock_rps_save_rxhash(nsk, skb);
1307 sk_mark_napi_id(nsk, skb);
1308 if (tcp_child_process(sk, nsk, skb))
1311 __kfree_skb(opt_skb);
1315 sock_rps_save_rxhash(sk, skb);
1317 if (tcp_rcv_state_process(sk, skb))
1320 goto ipv6_pktoptions;
1324 tcp_v6_send_reset(sk, skb);
1327 __kfree_skb(opt_skb);
1331 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1332 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1337 /* Do you ask, what is it?
1339 1. skb was enqueued by tcp.
1340 2. skb is added to tail of read queue, rather than out of order.
1341 3. socket is not in passive state.
1342 4. Finally, it really contains options, which user wants to receive.
1345 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1346 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1347 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1348 np->mcast_oif = tcp_v6_iif(opt_skb);
1349 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1350 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1351 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1352 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1354 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1355 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1356 skb_set_owner_r(opt_skb, sk);
1357 tcp_v6_restore_cb(opt_skb);
1358 opt_skb = xchg(&np->pktoptions, opt_skb);
1360 __kfree_skb(opt_skb);
1361 opt_skb = xchg(&np->pktoptions, NULL);
1369 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1370 const struct tcphdr *th)
1372 /* This is tricky: we move IP6CB at its correct location into
1373 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1374 * _decode_session6() uses IP6CB().
1375 * barrier() makes sure compiler won't play aliasing games.
1377 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1378 sizeof(struct inet6_skb_parm));
1381 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1382 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1383 skb->len - th->doff*4);
1384 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1385 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1386 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1387 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1388 TCP_SKB_CB(skb)->sacked = 0;
1391 static int tcp_v6_rcv(struct sk_buff *skb)
1393 const struct tcphdr *th;
1394 const struct ipv6hdr *hdr;
1397 struct net *net = dev_net(skb->dev);
1399 if (skb->pkt_type != PACKET_HOST)
1403 * Count it even if it's bad.
1405 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1407 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1412 if (th->doff < sizeof(struct tcphdr)/4)
1414 if (!pskb_may_pull(skb, th->doff*4))
1417 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1421 hdr = ipv6_hdr(skb);
1424 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1430 if (sk->sk_state == TCP_TIME_WAIT)
1433 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1434 struct request_sock *req = inet_reqsk(sk);
1437 sk = req->rsk_listener;
1438 tcp_v6_fill_cb(skb, hdr, th);
1439 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1443 if (tcp_checksum_complete(skb)) {
1447 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1448 inet_csk_reqsk_queue_drop_and_put(sk, req);
1452 nsk = tcp_check_req(sk, skb, req, false);
1455 goto discard_and_relse;
1459 tcp_v6_restore_cb(skb);
1460 } else if (tcp_child_process(sk, nsk, skb)) {
1461 tcp_v6_send_reset(nsk, skb);
1462 goto discard_and_relse;
1468 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1469 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1470 goto discard_and_relse;
1473 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1474 goto discard_and_relse;
1476 tcp_v6_fill_cb(skb, hdr, th);
1478 if (tcp_v6_inbound_md5_hash(sk, skb))
1479 goto discard_and_relse;
1481 if (tcp_filter(sk, skb))
1482 goto discard_and_relse;
1483 th = (const struct tcphdr *)skb->data;
1484 hdr = ipv6_hdr(skb);
1488 if (sk->sk_state == TCP_LISTEN) {
1489 ret = tcp_v6_do_rcv(sk, skb);
1490 goto put_and_return;
1493 sk_incoming_cpu_update(sk);
1495 bh_lock_sock_nested(sk);
1496 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1498 if (!sock_owned_by_user(sk)) {
1499 if (!tcp_prequeue(sk, skb))
1500 ret = tcp_v6_do_rcv(sk, skb);
1501 } else if (unlikely(sk_add_backlog(sk, skb,
1502 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1504 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1505 goto discard_and_relse;
1511 return ret ? -1 : 0;
1514 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1517 tcp_v6_fill_cb(skb, hdr, th);
1519 if (tcp_checksum_complete(skb)) {
1521 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1523 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1525 tcp_v6_send_reset(NULL, skb);
1533 sk_drops_add(sk, skb);
1538 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1539 inet_twsk_put(inet_twsk(sk));
1543 tcp_v6_fill_cb(skb, hdr, th);
1545 if (tcp_checksum_complete(skb)) {
1546 inet_twsk_put(inet_twsk(sk));
1550 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1555 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1556 &ipv6_hdr(skb)->saddr, th->source,
1557 &ipv6_hdr(skb)->daddr,
1558 ntohs(th->dest), tcp_v6_iif(skb));
1560 struct inet_timewait_sock *tw = inet_twsk(sk);
1561 inet_twsk_deschedule_put(tw);
1563 tcp_v6_restore_cb(skb);
1566 /* Fall through to ACK */
1569 tcp_v6_timewait_ack(sk, skb);
1572 tcp_v6_restore_cb(skb);
1574 case TCP_TW_SUCCESS:
1580 static void tcp_v6_early_demux(struct sk_buff *skb)
1582 const struct ipv6hdr *hdr;
1583 const struct tcphdr *th;
1586 if (skb->pkt_type != PACKET_HOST)
1589 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1592 hdr = ipv6_hdr(skb);
1595 if (th->doff < sizeof(struct tcphdr) / 4)
1598 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1599 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1600 &hdr->saddr, th->source,
1601 &hdr->daddr, ntohs(th->dest),
1605 skb->destructor = sock_edemux;
1606 if (sk_fullsock(sk)) {
1607 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1610 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1612 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1613 skb_dst_set_noref(skb, dst);
1618 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1619 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1620 .twsk_unique = tcp_twsk_unique,
1621 .twsk_destructor = tcp_twsk_destructor,
1624 static const struct inet_connection_sock_af_ops ipv6_specific = {
1625 .queue_xmit = inet6_csk_xmit,
1626 .send_check = tcp_v6_send_check,
1627 .rebuild_header = inet6_sk_rebuild_header,
1628 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1629 .conn_request = tcp_v6_conn_request,
1630 .syn_recv_sock = tcp_v6_syn_recv_sock,
1631 .net_header_len = sizeof(struct ipv6hdr),
1632 .net_frag_header_len = sizeof(struct frag_hdr),
1633 .setsockopt = ipv6_setsockopt,
1634 .getsockopt = ipv6_getsockopt,
1635 .addr2sockaddr = inet6_csk_addr2sockaddr,
1636 .sockaddr_len = sizeof(struct sockaddr_in6),
1637 .bind_conflict = inet6_csk_bind_conflict,
1638 #ifdef CONFIG_COMPAT
1639 .compat_setsockopt = compat_ipv6_setsockopt,
1640 .compat_getsockopt = compat_ipv6_getsockopt,
1642 .mtu_reduced = tcp_v6_mtu_reduced,
1645 #ifdef CONFIG_TCP_MD5SIG
1646 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1647 .md5_lookup = tcp_v6_md5_lookup,
1648 .calc_md5_hash = tcp_v6_md5_hash_skb,
1649 .md5_parse = tcp_v6_parse_md5_keys,
1654 * TCP over IPv4 via INET6 API
1656 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1657 .queue_xmit = ip_queue_xmit,
1658 .send_check = tcp_v4_send_check,
1659 .rebuild_header = inet_sk_rebuild_header,
1660 .sk_rx_dst_set = inet_sk_rx_dst_set,
1661 .conn_request = tcp_v6_conn_request,
1662 .syn_recv_sock = tcp_v6_syn_recv_sock,
1663 .net_header_len = sizeof(struct iphdr),
1664 .setsockopt = ipv6_setsockopt,
1665 .getsockopt = ipv6_getsockopt,
1666 .addr2sockaddr = inet6_csk_addr2sockaddr,
1667 .sockaddr_len = sizeof(struct sockaddr_in6),
1668 .bind_conflict = inet6_csk_bind_conflict,
1669 #ifdef CONFIG_COMPAT
1670 .compat_setsockopt = compat_ipv6_setsockopt,
1671 .compat_getsockopt = compat_ipv6_getsockopt,
1673 .mtu_reduced = tcp_v4_mtu_reduced,
1676 #ifdef CONFIG_TCP_MD5SIG
1677 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1678 .md5_lookup = tcp_v4_md5_lookup,
1679 .calc_md5_hash = tcp_v4_md5_hash_skb,
1680 .md5_parse = tcp_v6_parse_md5_keys,
1684 /* NOTE: A lot of things set to zero explicitly by call to
1685 * sk_alloc() so need not be done here.
1687 static int tcp_v6_init_sock(struct sock *sk)
1689 struct inet_connection_sock *icsk = inet_csk(sk);
1693 icsk->icsk_af_ops = &ipv6_specific;
1695 #ifdef CONFIG_TCP_MD5SIG
1696 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1702 static void tcp_v6_destroy_sock(struct sock *sk)
1704 tcp_v4_destroy_sock(sk);
1705 inet6_destroy_sock(sk);
1708 #ifdef CONFIG_PROC_FS
1709 /* Proc filesystem TCPv6 sock list dumping. */
1710 static void get_openreq6(struct seq_file *seq,
1711 const struct request_sock *req, int i)
1713 long ttd = req->rsk_timer.expires - jiffies;
1714 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1715 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1721 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1722 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1724 src->s6_addr32[0], src->s6_addr32[1],
1725 src->s6_addr32[2], src->s6_addr32[3],
1726 inet_rsk(req)->ir_num,
1727 dest->s6_addr32[0], dest->s6_addr32[1],
1728 dest->s6_addr32[2], dest->s6_addr32[3],
1729 ntohs(inet_rsk(req)->ir_rmt_port),
1731 0, 0, /* could print option size, but that is af dependent. */
1732 1, /* timers active (only the expire timer) */
1733 jiffies_to_clock_t(ttd),
1735 from_kuid_munged(seq_user_ns(seq),
1736 sock_i_uid(req->rsk_listener)),
1737 0, /* non standard timer */
1738 0, /* open_requests have no inode */
1742 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1744 const struct in6_addr *dest, *src;
1747 unsigned long timer_expires;
1748 const struct inet_sock *inet = inet_sk(sp);
1749 const struct tcp_sock *tp = tcp_sk(sp);
1750 const struct inet_connection_sock *icsk = inet_csk(sp);
1751 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1755 dest = &sp->sk_v6_daddr;
1756 src = &sp->sk_v6_rcv_saddr;
1757 destp = ntohs(inet->inet_dport);
1758 srcp = ntohs(inet->inet_sport);
1760 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1761 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1762 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1764 timer_expires = icsk->icsk_timeout;
1765 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1767 timer_expires = icsk->icsk_timeout;
1768 } else if (timer_pending(&sp->sk_timer)) {
1770 timer_expires = sp->sk_timer.expires;
1773 timer_expires = jiffies;
1776 state = sk_state_load(sp);
1777 if (state == TCP_LISTEN)
1778 rx_queue = sp->sk_ack_backlog;
1780 /* Because we don't lock the socket,
1781 * we might find a transient negative value.
1783 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1786 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1787 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1789 src->s6_addr32[0], src->s6_addr32[1],
1790 src->s6_addr32[2], src->s6_addr32[3], srcp,
1791 dest->s6_addr32[0], dest->s6_addr32[1],
1792 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1794 tp->write_seq - tp->snd_una,
1797 jiffies_delta_to_clock_t(timer_expires - jiffies),
1798 icsk->icsk_retransmits,
1799 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1800 icsk->icsk_probes_out,
1802 atomic_read(&sp->sk_refcnt), sp,
1803 jiffies_to_clock_t(icsk->icsk_rto),
1804 jiffies_to_clock_t(icsk->icsk_ack.ato),
1805 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1807 state == TCP_LISTEN ?
1808 fastopenq->max_qlen :
1809 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1813 static void get_timewait6_sock(struct seq_file *seq,
1814 struct inet_timewait_sock *tw, int i)
1816 long delta = tw->tw_timer.expires - jiffies;
1817 const struct in6_addr *dest, *src;
1820 dest = &tw->tw_v6_daddr;
1821 src = &tw->tw_v6_rcv_saddr;
1822 destp = ntohs(tw->tw_dport);
1823 srcp = ntohs(tw->tw_sport);
1826 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1827 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1829 src->s6_addr32[0], src->s6_addr32[1],
1830 src->s6_addr32[2], src->s6_addr32[3], srcp,
1831 dest->s6_addr32[0], dest->s6_addr32[1],
1832 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1833 tw->tw_substate, 0, 0,
1834 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1835 atomic_read(&tw->tw_refcnt), tw);
1838 static int tcp6_seq_show(struct seq_file *seq, void *v)
1840 struct tcp_iter_state *st;
1841 struct sock *sk = v;
1843 if (v == SEQ_START_TOKEN) {
1848 "st tx_queue rx_queue tr tm->when retrnsmt"
1849 " uid timeout inode\n");
1854 if (sk->sk_state == TCP_TIME_WAIT)
1855 get_timewait6_sock(seq, v, st->num);
1856 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1857 get_openreq6(seq, v, st->num);
1859 get_tcp6_sock(seq, v, st->num);
1864 static const struct file_operations tcp6_afinfo_seq_fops = {
1865 .owner = THIS_MODULE,
1866 .open = tcp_seq_open,
1868 .llseek = seq_lseek,
1869 .release = seq_release_net
1872 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1875 .seq_fops = &tcp6_afinfo_seq_fops,
1877 .show = tcp6_seq_show,
1881 int __net_init tcp6_proc_init(struct net *net)
1883 return tcp_proc_register(net, &tcp6_seq_afinfo);
1886 void tcp6_proc_exit(struct net *net)
1888 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1892 static void tcp_v6_clear_sk(struct sock *sk, int size)
1894 struct inet_sock *inet = inet_sk(sk);
1896 /* we do not want to clear pinet6 field, because of RCU lookups */
1897 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1899 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1900 memset(&inet->pinet6 + 1, 0, size);
1903 struct proto tcpv6_prot = {
1905 .owner = THIS_MODULE,
1907 .connect = tcp_v6_connect,
1908 .disconnect = tcp_disconnect,
1909 .accept = inet_csk_accept,
1911 .init = tcp_v6_init_sock,
1912 .destroy = tcp_v6_destroy_sock,
1913 .shutdown = tcp_shutdown,
1914 .setsockopt = tcp_setsockopt,
1915 .getsockopt = tcp_getsockopt,
1916 .recvmsg = tcp_recvmsg,
1917 .sendmsg = tcp_sendmsg,
1918 .sendpage = tcp_sendpage,
1919 .backlog_rcv = tcp_v6_do_rcv,
1920 .release_cb = tcp_release_cb,
1922 .unhash = inet_unhash,
1923 .get_port = inet_csk_get_port,
1924 .enter_memory_pressure = tcp_enter_memory_pressure,
1925 .stream_memory_free = tcp_stream_memory_free,
1926 .sockets_allocated = &tcp_sockets_allocated,
1927 .memory_allocated = &tcp_memory_allocated,
1928 .memory_pressure = &tcp_memory_pressure,
1929 .orphan_count = &tcp_orphan_count,
1930 .sysctl_mem = sysctl_tcp_mem,
1931 .sysctl_wmem = sysctl_tcp_wmem,
1932 .sysctl_rmem = sysctl_tcp_rmem,
1933 .max_header = MAX_TCP_HEADER,
1934 .obj_size = sizeof(struct tcp6_sock),
1935 .slab_flags = SLAB_DESTROY_BY_RCU,
1936 .twsk_prot = &tcp6_timewait_sock_ops,
1937 .rsk_prot = &tcp6_request_sock_ops,
1938 .h.hashinfo = &tcp_hashinfo,
1939 .no_autobind = true,
1940 #ifdef CONFIG_COMPAT
1941 .compat_setsockopt = compat_tcp_setsockopt,
1942 .compat_getsockopt = compat_tcp_getsockopt,
1944 #ifdef CONFIG_MEMCG_KMEM
1945 .proto_cgroup = tcp_proto_cgroup,
1947 .clear_sk = tcp_v6_clear_sk,
1950 static const struct inet6_protocol tcpv6_protocol = {
1951 .early_demux = tcp_v6_early_demux,
1952 .handler = tcp_v6_rcv,
1953 .err_handler = tcp_v6_err,
1954 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1957 static struct inet_protosw tcpv6_protosw = {
1958 .type = SOCK_STREAM,
1959 .protocol = IPPROTO_TCP,
1960 .prot = &tcpv6_prot,
1961 .ops = &inet6_stream_ops,
1962 .flags = INET_PROTOSW_PERMANENT |
1966 static int __net_init tcpv6_net_init(struct net *net)
1968 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1969 SOCK_RAW, IPPROTO_TCP, net);
1972 static void __net_exit tcpv6_net_exit(struct net *net)
1974 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1977 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1979 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1982 static struct pernet_operations tcpv6_net_ops = {
1983 .init = tcpv6_net_init,
1984 .exit = tcpv6_net_exit,
1985 .exit_batch = tcpv6_net_exit_batch,
1988 int __init tcpv6_init(void)
1992 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1996 /* register inet6 protocol */
1997 ret = inet6_register_protosw(&tcpv6_protosw);
1999 goto out_tcpv6_protocol;
2001 ret = register_pernet_subsys(&tcpv6_net_ops);
2003 goto out_tcpv6_protosw;
2008 inet6_unregister_protosw(&tcpv6_protosw);
2010 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2014 void tcpv6_exit(void)
2016 unregister_pernet_subsys(&tcpv6_net_ops);
2017 inet6_unregister_protosw(&tcpv6_protosw);
2018 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);