3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 struct dst_entry *dst = skb_dst(skb);
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
104 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
106 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
109 tcp_hdr(skb)->source);
112 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
114 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
115 ipv6_hdr(skb)->saddr.s6_addr32);
118 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
121 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
122 struct inet_sock *inet = inet_sk(sk);
123 struct inet_connection_sock *icsk = inet_csk(sk);
124 struct ipv6_pinfo *np = inet6_sk(sk);
125 struct tcp_sock *tp = tcp_sk(sk);
126 struct in6_addr *saddr = NULL, *final_p, final;
127 struct ipv6_txoptions *opt;
129 struct dst_entry *dst;
132 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
134 if (addr_len < SIN6_LEN_RFC2133)
137 if (usin->sin6_family != AF_INET6)
138 return -EAFNOSUPPORT;
140 memset(&fl6, 0, sizeof(fl6));
143 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl6.flowlabel);
145 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
146 struct ip6_flowlabel *flowlabel;
147 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
150 fl6_sock_release(flowlabel);
155 * connect() to INADDR_ANY means loopback (BSD'ism).
158 if (ipv6_addr_any(&usin->sin6_addr)) {
159 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
160 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
163 usin->sin6_addr = in6addr_loopback;
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
168 if (addr_type & IPV6_ADDR_MULTICAST)
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
196 sk->sk_v6_daddr = usin->sin6_addr;
197 np->flow_label = fl6.flowlabel;
203 if (addr_type & IPV6_ADDR_MAPPED) {
204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205 struct sockaddr_in sin;
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
209 if (__ipv6_only_sock(sk))
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
216 icsk->icsk_af_ops = &ipv6_mapped;
217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
228 #ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
233 np->saddr = sk->sk_v6_rcv_saddr;
238 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
239 saddr = &sk->sk_v6_rcv_saddr;
241 fl6.flowi6_proto = IPPROTO_TCP;
242 fl6.daddr = sk->sk_v6_daddr;
243 fl6.saddr = saddr ? *saddr : np->saddr;
244 fl6.flowi6_oif = sk->sk_bound_dev_if;
245 fl6.flowi6_mark = sk->sk_mark;
246 fl6.fl6_dport = usin->sin6_port;
247 fl6.fl6_sport = inet->inet_sport;
248 fl6.flowi6_uid = sk->sk_uid;
250 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
251 final_p = fl6_update_dst(&fl6, opt, &final);
253 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
255 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
263 sk->sk_v6_rcv_saddr = *saddr;
266 /* set the source address */
268 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
270 sk->sk_gso_type = SKB_GSO_TCPV6;
271 ip6_dst_store(sk, dst, NULL, NULL);
273 icsk->icsk_ext_hdr_len = 0;
275 icsk->icsk_ext_hdr_len = opt->opt_flen +
278 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
280 inet->inet_dport = usin->sin6_port;
282 tcp_set_state(sk, TCP_SYN_SENT);
283 err = inet6_hash_connect(tcp_death_row, sk);
289 if (likely(!tp->repair)) {
291 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
292 sk->sk_v6_daddr.s6_addr32,
295 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
297 sk->sk_v6_daddr.s6_addr32);
300 if (tcp_fastopen_defer_connect(sk, &err))
305 err = tcp_connect(sk);
312 tcp_set_state(sk, TCP_CLOSE);
314 inet->inet_dport = 0;
315 sk->sk_route_caps = 0;
319 static void tcp_v6_mtu_reduced(struct sock *sk)
321 struct dst_entry *dst;
324 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
327 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
329 /* Drop requests trying to increase our current mss.
330 * Check done in __ip6_rt_update_pmtu() is too late.
332 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
335 dst = inet6_csk_update_pmtu(sk, mtu);
339 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
340 tcp_sync_mss(sk, dst_mtu(dst));
341 tcp_simple_retransmit(sk);
345 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
346 u8 type, u8 code, int offset, __be32 info)
348 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
349 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
350 struct net *net = dev_net(skb->dev);
351 struct request_sock *fastopen;
352 struct ipv6_pinfo *np;
359 sk = __inet6_lookup_established(net, &tcp_hashinfo,
360 &hdr->daddr, th->dest,
361 &hdr->saddr, ntohs(th->source),
362 skb->dev->ifindex, inet6_sdif(skb));
365 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
370 if (sk->sk_state == TCP_TIME_WAIT) {
371 inet_twsk_put(inet_twsk(sk));
374 seq = ntohl(th->seq);
375 fatal = icmpv6_err_convert(type, code, &err);
376 if (sk->sk_state == TCP_NEW_SYN_RECV)
377 return tcp_req_err(sk, seq, fatal);
380 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
381 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
383 if (sk->sk_state == TCP_CLOSE)
386 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
387 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
392 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
393 fastopen = tp->fastopen_rsk;
394 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
395 if (sk->sk_state != TCP_LISTEN &&
396 !between(seq, snd_una, tp->snd_nxt)) {
397 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
403 if (type == NDISC_REDIRECT) {
404 if (!sock_owned_by_user(sk)) {
405 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
408 dst->ops->redirect(dst, sk, skb);
413 if (type == ICMPV6_PKT_TOOBIG) {
414 u32 mtu = ntohl(info);
416 /* We are not interested in TCP_LISTEN and open_requests
417 * (SYN-ACKs send out by Linux are always <576bytes so
418 * they should go through unfragmented).
420 if (sk->sk_state == TCP_LISTEN)
423 if (!ip6_sk_accept_pmtu(sk))
426 if (mtu < IPV6_MIN_MTU)
429 WRITE_ONCE(tp->mtu_info, mtu);
431 if (!sock_owned_by_user(sk))
432 tcp_v6_mtu_reduced(sk);
433 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
440 /* Might be for an request_sock */
441 switch (sk->sk_state) {
444 /* Only in fast or simultaneous open. If a fast open socket is
445 * is already accepted it is treated as a connected one below.
447 if (fastopen && !fastopen->sk)
450 if (!sock_owned_by_user(sk)) {
452 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
456 sk->sk_err_soft = err;
460 if (!sock_owned_by_user(sk) && np->recverr) {
462 sk->sk_error_report(sk);
464 sk->sk_err_soft = err;
472 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
474 struct request_sock *req,
475 struct tcp_fastopen_cookie *foc,
476 enum tcp_synack_type synack_type)
478 struct inet_request_sock *ireq = inet_rsk(req);
479 struct ipv6_pinfo *np = inet6_sk(sk);
480 struct ipv6_txoptions *opt;
481 struct flowi6 *fl6 = &fl->u.ip6;
485 /* First, grab a route. */
486 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
487 IPPROTO_TCP)) == NULL)
490 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
493 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
494 &ireq->ir_v6_rmt_addr);
496 fl6->daddr = ireq->ir_v6_rmt_addr;
497 if (np->repflow && ireq->pktopts)
498 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
501 opt = ireq->ipv6_opt;
503 opt = rcu_dereference(np->opt);
504 err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
507 err = net_xmit_eval(err);
515 static void tcp_v6_reqsk_destructor(struct request_sock *req)
517 kfree(inet_rsk(req)->ipv6_opt);
518 kfree_skb(inet_rsk(req)->pktopts);
521 #ifdef CONFIG_TCP_MD5SIG
522 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
523 const struct in6_addr *addr)
525 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
528 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
529 const struct sock *addr_sk)
531 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
534 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
535 char __user *optval, int optlen)
537 struct tcp_md5sig cmd;
538 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
541 if (optlen < sizeof(cmd))
544 if (copy_from_user(&cmd, optval, sizeof(cmd)))
547 if (sin6->sin6_family != AF_INET6)
550 if (optname == TCP_MD5SIG_EXT &&
551 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
552 prefixlen = cmd.tcpm_prefixlen;
553 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
557 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
560 if (!cmd.tcpm_keylen) {
561 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
562 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
564 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
565 AF_INET6, prefixlen);
568 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
571 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
572 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
573 AF_INET, prefixlen, cmd.tcpm_key,
574 cmd.tcpm_keylen, GFP_KERNEL);
576 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
577 AF_INET6, prefixlen, cmd.tcpm_key,
578 cmd.tcpm_keylen, GFP_KERNEL);
581 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
582 const struct in6_addr *daddr,
583 const struct in6_addr *saddr,
584 const struct tcphdr *th, int nbytes)
586 struct tcp6_pseudohdr *bp;
587 struct scatterlist sg;
591 /* 1. TCP pseudo-header (RFC2460) */
594 bp->protocol = cpu_to_be32(IPPROTO_TCP);
595 bp->len = cpu_to_be32(nbytes);
597 _th = (struct tcphdr *)(bp + 1);
598 memcpy(_th, th, sizeof(*th));
601 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
602 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
603 sizeof(*bp) + sizeof(*th));
604 return crypto_ahash_update(hp->md5_req);
607 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
608 const struct in6_addr *daddr, struct in6_addr *saddr,
609 const struct tcphdr *th)
611 struct tcp_md5sig_pool *hp;
612 struct ahash_request *req;
614 hp = tcp_get_md5sig_pool();
616 goto clear_hash_noput;
619 if (crypto_ahash_init(req))
621 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
623 if (tcp_md5_hash_key(hp, key))
625 ahash_request_set_crypt(req, NULL, md5_hash, 0);
626 if (crypto_ahash_final(req))
629 tcp_put_md5sig_pool();
633 tcp_put_md5sig_pool();
635 memset(md5_hash, 0, 16);
639 static int tcp_v6_md5_hash_skb(char *md5_hash,
640 const struct tcp_md5sig_key *key,
641 const struct sock *sk,
642 const struct sk_buff *skb)
644 const struct in6_addr *saddr, *daddr;
645 struct tcp_md5sig_pool *hp;
646 struct ahash_request *req;
647 const struct tcphdr *th = tcp_hdr(skb);
649 if (sk) { /* valid for establish/request sockets */
650 saddr = &sk->sk_v6_rcv_saddr;
651 daddr = &sk->sk_v6_daddr;
653 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
654 saddr = &ip6h->saddr;
655 daddr = &ip6h->daddr;
658 hp = tcp_get_md5sig_pool();
660 goto clear_hash_noput;
663 if (crypto_ahash_init(req))
666 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
668 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
670 if (tcp_md5_hash_key(hp, key))
672 ahash_request_set_crypt(req, NULL, md5_hash, 0);
673 if (crypto_ahash_final(req))
676 tcp_put_md5sig_pool();
680 tcp_put_md5sig_pool();
682 memset(md5_hash, 0, 16);
688 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
689 const struct sk_buff *skb)
691 #ifdef CONFIG_TCP_MD5SIG
692 const __u8 *hash_location = NULL;
693 struct tcp_md5sig_key *hash_expected;
694 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
695 const struct tcphdr *th = tcp_hdr(skb);
699 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
700 hash_location = tcp_parse_md5sig_option(th);
702 /* We've parsed the options - do we have a hash? */
703 if (!hash_expected && !hash_location)
706 if (hash_expected && !hash_location) {
707 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
711 if (!hash_expected && hash_location) {
712 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
716 /* check the signature */
717 genhash = tcp_v6_md5_hash_skb(newhash,
721 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
722 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
723 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
724 genhash ? "failed" : "mismatch",
725 &ip6h->saddr, ntohs(th->source),
726 &ip6h->daddr, ntohs(th->dest));
733 static void tcp_v6_init_req(struct request_sock *req,
734 const struct sock *sk_listener,
737 struct inet_request_sock *ireq = inet_rsk(req);
738 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
740 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
741 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
743 /* So that link locals have meaning */
744 if (!sk_listener->sk_bound_dev_if &&
745 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
746 ireq->ir_iif = tcp_v6_iif(skb);
748 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
749 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
750 np->rxopt.bits.rxinfo ||
751 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
752 np->rxopt.bits.rxohlim || np->repflow)) {
753 refcount_inc(&skb->users);
758 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
760 const struct request_sock *req)
762 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
765 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
767 .obj_size = sizeof(struct tcp6_request_sock),
768 .rtx_syn_ack = tcp_rtx_synack,
769 .send_ack = tcp_v6_reqsk_send_ack,
770 .destructor = tcp_v6_reqsk_destructor,
771 .send_reset = tcp_v6_send_reset,
772 .syn_ack_timeout = tcp_syn_ack_timeout,
775 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
776 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
777 sizeof(struct ipv6hdr),
778 #ifdef CONFIG_TCP_MD5SIG
779 .req_md5_lookup = tcp_v6_md5_lookup,
780 .calc_md5_hash = tcp_v6_md5_hash_skb,
782 .init_req = tcp_v6_init_req,
783 #ifdef CONFIG_SYN_COOKIES
784 .cookie_init_seq = cookie_v6_init_sequence,
786 .route_req = tcp_v6_route_req,
787 .init_seq = tcp_v6_init_seq,
788 .init_ts_off = tcp_v6_init_ts_off,
789 .send_synack = tcp_v6_send_synack,
792 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
793 u32 ack, u32 win, u32 tsval, u32 tsecr,
794 int oif, struct tcp_md5sig_key *key, int rst,
795 u8 tclass, __be32 label)
797 const struct tcphdr *th = tcp_hdr(skb);
799 struct sk_buff *buff;
801 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
802 struct sock *ctl_sk = net->ipv6.tcp_sk;
803 unsigned int tot_len = sizeof(struct tcphdr);
804 struct dst_entry *dst;
808 tot_len += TCPOLEN_TSTAMP_ALIGNED;
809 #ifdef CONFIG_TCP_MD5SIG
811 tot_len += TCPOLEN_MD5SIG_ALIGNED;
814 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
819 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
821 t1 = skb_push(buff, tot_len);
822 skb_reset_transport_header(buff);
824 /* Swap the send and the receive. */
825 memset(t1, 0, sizeof(*t1));
826 t1->dest = th->source;
827 t1->source = th->dest;
828 t1->doff = tot_len / 4;
829 t1->seq = htonl(seq);
830 t1->ack_seq = htonl(ack);
831 t1->ack = !rst || !th->ack;
833 t1->window = htons(win);
835 topt = (__be32 *)(t1 + 1);
838 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
839 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
840 *topt++ = htonl(tsval);
841 *topt++ = htonl(tsecr);
844 #ifdef CONFIG_TCP_MD5SIG
846 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
847 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
848 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
849 &ipv6_hdr(skb)->saddr,
850 &ipv6_hdr(skb)->daddr, t1);
854 memset(&fl6, 0, sizeof(fl6));
855 fl6.daddr = ipv6_hdr(skb)->saddr;
856 fl6.saddr = ipv6_hdr(skb)->daddr;
857 fl6.flowlabel = label;
859 buff->ip_summed = CHECKSUM_PARTIAL;
862 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
864 fl6.flowi6_proto = IPPROTO_TCP;
865 if (rt6_need_strict(&fl6.daddr) && !oif)
866 fl6.flowi6_oif = tcp_v6_iif(skb);
868 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
871 fl6.flowi6_oif = oif;
874 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
875 fl6.fl6_dport = t1->dest;
876 fl6.fl6_sport = t1->source;
877 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
878 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
880 /* Pass a socket to ip6_dst_lookup either it is for RST
881 * Underlying function will use this to retrieve the network
884 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
886 skb_dst_set(buff, dst);
887 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
888 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
890 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
897 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
899 const struct tcphdr *th = tcp_hdr(skb);
900 u32 seq = 0, ack_seq = 0;
901 struct tcp_md5sig_key *key = NULL;
902 #ifdef CONFIG_TCP_MD5SIG
903 const __u8 *hash_location = NULL;
904 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
905 unsigned char newhash[16];
907 struct sock *sk1 = NULL;
914 /* If sk not NULL, it means we did a successful lookup and incoming
915 * route had to be correct. prequeue might have dropped our dst.
917 if (!sk && !ipv6_unicast_destination(skb))
920 #ifdef CONFIG_TCP_MD5SIG
922 hash_location = tcp_parse_md5sig_option(th);
923 if (sk && sk_fullsock(sk)) {
924 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
925 } else if (hash_location) {
927 * active side is lost. Try to find listening socket through
928 * source port, and then find md5 key through listening socket.
929 * we are not loose security here:
930 * Incoming packet is checked with md5 hash with finding key,
931 * no RST generated if md5 hash doesn't match.
933 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
934 &tcp_hashinfo, NULL, 0,
936 th->source, &ipv6h->daddr,
938 tcp_v6_iif_l3_slave(skb),
943 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
947 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
948 if (genhash || memcmp(hash_location, newhash, 16) != 0)
954 seq = ntohl(th->ack_seq);
956 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
959 oif = sk ? sk->sk_bound_dev_if : 0;
960 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
962 #ifdef CONFIG_TCP_MD5SIG
968 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
969 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
970 struct tcp_md5sig_key *key, u8 tclass,
973 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
977 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
979 struct inet_timewait_sock *tw = inet_twsk(sk);
980 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
982 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
983 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
984 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
985 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
986 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
991 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
992 struct request_sock *req)
994 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
995 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
998 * The window field (SEG.WND) of every outgoing segment, with the
999 * exception of <SYN> segments, MUST be right-shifted by
1000 * Rcv.Wind.Shift bits:
1002 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1003 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1004 tcp_rsk(req)->rcv_nxt,
1005 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1006 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1007 req->ts_recent, sk->sk_bound_dev_if,
1008 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1013 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1015 #ifdef CONFIG_SYN_COOKIES
1016 const struct tcphdr *th = tcp_hdr(skb);
1019 sk = cookie_v6_check(sk, skb);
1024 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1026 if (skb->protocol == htons(ETH_P_IP))
1027 return tcp_v4_conn_request(sk, skb);
1029 if (!ipv6_unicast_destination(skb))
1032 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1033 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1037 return tcp_conn_request(&tcp6_request_sock_ops,
1038 &tcp_request_sock_ipv6_ops, sk, skb);
1042 return 0; /* don't send reset */
1045 static void tcp_v6_restore_cb(struct sk_buff *skb)
1047 /* We need to move header back to the beginning if xfrm6_policy_check()
1048 * and tcp_v6_fill_cb() are going to be called again.
1049 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1051 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1052 sizeof(struct inet6_skb_parm));
1055 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1056 struct request_sock *req,
1057 struct dst_entry *dst,
1058 struct request_sock *req_unhash,
1061 struct inet_request_sock *ireq;
1062 struct ipv6_pinfo *newnp;
1063 const struct ipv6_pinfo *np = inet6_sk(sk);
1064 struct ipv6_txoptions *opt;
1065 struct tcp6_sock *newtcp6sk;
1066 struct inet_sock *newinet;
1067 struct tcp_sock *newtp;
1069 #ifdef CONFIG_TCP_MD5SIG
1070 struct tcp_md5sig_key *key;
1074 if (skb->protocol == htons(ETH_P_IP)) {
1079 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1080 req_unhash, own_req);
1085 newtcp6sk = (struct tcp6_sock *)newsk;
1086 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1088 newinet = inet_sk(newsk);
1089 newnp = inet6_sk(newsk);
1090 newtp = tcp_sk(newsk);
1092 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1094 newnp->saddr = newsk->sk_v6_rcv_saddr;
1096 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1097 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1098 #ifdef CONFIG_TCP_MD5SIG
1099 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1102 newnp->ipv6_mc_list = NULL;
1103 newnp->ipv6_ac_list = NULL;
1104 newnp->ipv6_fl_list = NULL;
1105 newnp->pktoptions = NULL;
1107 newnp->mcast_oif = inet_iif(skb);
1108 newnp->mcast_hops = ip_hdr(skb)->ttl;
1109 newnp->rcv_flowinfo = 0;
1111 newnp->flow_label = 0;
1114 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1115 * here, tcp_create_openreq_child now does this for us, see the comment in
1116 * that function for the gory details. -acme
1119 /* It is tricky place. Until this moment IPv4 tcp
1120 worked with IPv6 icsk.icsk_af_ops.
1123 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1128 ireq = inet_rsk(req);
1130 if (sk_acceptq_is_full(sk))
1134 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1139 newsk = tcp_create_openreq_child(sk, req, skb);
1144 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1145 * count here, tcp_create_openreq_child now does this for us, see the
1146 * comment in that function for the gory details. -acme
1149 newsk->sk_gso_type = SKB_GSO_TCPV6;
1150 ip6_dst_store(newsk, dst, NULL, NULL);
1151 inet6_sk_rx_dst_set(newsk, skb);
1153 newtcp6sk = (struct tcp6_sock *)newsk;
1154 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1156 newtp = tcp_sk(newsk);
1157 newinet = inet_sk(newsk);
1158 newnp = inet6_sk(newsk);
1160 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1162 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1163 newnp->saddr = ireq->ir_v6_loc_addr;
1164 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1165 newsk->sk_bound_dev_if = ireq->ir_iif;
1167 /* Now IPv6 options...
1169 First: no IPv4 options.
1171 newinet->inet_opt = NULL;
1172 newnp->ipv6_mc_list = NULL;
1173 newnp->ipv6_ac_list = NULL;
1174 newnp->ipv6_fl_list = NULL;
1177 newnp->rxopt.all = np->rxopt.all;
1179 newnp->pktoptions = NULL;
1181 newnp->mcast_oif = tcp_v6_iif(skb);
1182 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1183 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1185 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1187 /* Clone native IPv6 options from listening socket (if any)
1189 Yes, keeping reference count would be much more clever,
1190 but we make one more one thing there: reattach optmem
1193 opt = ireq->ipv6_opt;
1195 opt = rcu_dereference(np->opt);
1197 opt = ipv6_dup_options(newsk, opt);
1198 RCU_INIT_POINTER(newnp->opt, opt);
1200 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1202 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1205 tcp_ca_openreq_child(newsk, dst);
1207 tcp_sync_mss(newsk, dst_mtu(dst));
1208 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1210 tcp_initialize_rcv_mss(newsk);
1212 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1213 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1215 #ifdef CONFIG_TCP_MD5SIG
1216 /* Copy over the MD5 key from the original socket */
1217 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1219 /* We're using one, so create a matching key
1220 * on the newsk structure. If we fail to get
1221 * memory, then we end up not copying the key
1224 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1225 AF_INET6, 128, key->key, key->keylen,
1226 sk_gfp_mask(sk, GFP_ATOMIC));
1230 if (__inet_inherit_port(sk, newsk) < 0) {
1231 inet_csk_prepare_forced_close(newsk);
1235 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1237 tcp_move_syn(newtp, req);
1239 /* Clone pktoptions received with SYN, if we own the req */
1240 if (ireq->pktopts) {
1241 newnp->pktoptions = skb_clone(ireq->pktopts,
1242 sk_gfp_mask(sk, GFP_ATOMIC));
1243 consume_skb(ireq->pktopts);
1244 ireq->pktopts = NULL;
1245 if (newnp->pktoptions) {
1246 tcp_v6_restore_cb(newnp->pktoptions);
1247 skb_set_owner_r(newnp->pktoptions, newsk);
1255 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1263 /* The socket must have it's spinlock held when we get
1264 * here, unless it is a TCP_LISTEN socket.
1266 * We have a potential double-lock case here, so even when
1267 * doing backlog processing we use the BH locking scheme.
1268 * This is because we cannot sleep with the original spinlock
1271 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1273 struct ipv6_pinfo *np = inet6_sk(sk);
1274 struct tcp_sock *tp;
1275 struct sk_buff *opt_skb = NULL;
1277 /* Imagine: socket is IPv6. IPv4 packet arrives,
1278 goes to IPv4 receive handler and backlogged.
1279 From backlog it always goes here. Kerboom...
1280 Fortunately, tcp_rcv_established and rcv_established
1281 handle them correctly, but it is not case with
1282 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1285 if (skb->protocol == htons(ETH_P_IP))
1286 return tcp_v4_do_rcv(sk, skb);
1289 * socket locking is here for SMP purposes as backlog rcv
1290 * is currently called with bh processing disabled.
1293 /* Do Stevens' IPV6_PKTOPTIONS.
1295 Yes, guys, it is the only place in our code, where we
1296 may make it not affecting IPv4.
1297 The rest of code is protocol independent,
1298 and I do not like idea to uglify IPv4.
1300 Actually, all the idea behind IPV6_PKTOPTIONS
1301 looks not very well thought. For now we latch
1302 options, received in the last packet, enqueued
1303 by tcp. Feel free to propose better solution.
1307 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1309 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1310 struct dst_entry *dst = sk->sk_rx_dst;
1312 sock_rps_save_rxhash(sk, skb);
1313 sk_mark_napi_id(sk, skb);
1315 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1316 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1318 sk->sk_rx_dst = NULL;
1322 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1324 goto ipv6_pktoptions;
1328 if (tcp_checksum_complete(skb))
1331 if (sk->sk_state == TCP_LISTEN) {
1332 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1338 if (tcp_child_process(sk, nsk, skb))
1341 __kfree_skb(opt_skb);
1345 sock_rps_save_rxhash(sk, skb);
1347 if (tcp_rcv_state_process(sk, skb))
1350 goto ipv6_pktoptions;
1354 tcp_v6_send_reset(sk, skb);
1357 __kfree_skb(opt_skb);
1361 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1362 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1367 /* Do you ask, what is it?
1369 1. skb was enqueued by tcp.
1370 2. skb is added to tail of read queue, rather than out of order.
1371 3. socket is not in passive state.
1372 4. Finally, it really contains options, which user wants to receive.
1375 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1376 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1377 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1378 np->mcast_oif = tcp_v6_iif(opt_skb);
1379 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1380 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1381 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1382 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1384 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1385 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1386 skb_set_owner_r(opt_skb, sk);
1387 tcp_v6_restore_cb(opt_skb);
1388 opt_skb = xchg(&np->pktoptions, opt_skb);
1390 __kfree_skb(opt_skb);
1391 opt_skb = xchg(&np->pktoptions, NULL);
1399 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1400 const struct tcphdr *th)
1402 /* This is tricky: we move IP6CB at its correct location into
1403 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1404 * _decode_session6() uses IP6CB().
1405 * barrier() makes sure compiler won't play aliasing games.
1407 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1408 sizeof(struct inet6_skb_parm));
1411 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1412 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1413 skb->len - th->doff*4);
1414 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1415 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1416 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1417 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1418 TCP_SKB_CB(skb)->sacked = 0;
1419 TCP_SKB_CB(skb)->has_rxtstamp =
1420 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1423 static int tcp_v6_rcv(struct sk_buff *skb)
1425 int sdif = inet6_sdif(skb);
1426 const struct tcphdr *th;
1427 const struct ipv6hdr *hdr;
1431 struct net *net = dev_net(skb->dev);
1433 if (skb->pkt_type != PACKET_HOST)
1437 * Count it even if it's bad.
1439 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1441 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1444 th = (const struct tcphdr *)skb->data;
1446 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1448 if (!pskb_may_pull(skb, th->doff*4))
1451 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1454 th = (const struct tcphdr *)skb->data;
1455 hdr = ipv6_hdr(skb);
1458 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1459 th->source, th->dest, inet6_iif(skb), sdif,
1465 if (sk->sk_state == TCP_TIME_WAIT)
1468 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1469 struct request_sock *req = inet_reqsk(sk);
1472 sk = req->rsk_listener;
1473 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1474 sk_drops_add(sk, skb);
1478 if (tcp_checksum_complete(skb)) {
1482 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1483 inet_csk_reqsk_queue_drop_and_put(sk, req);
1489 if (!tcp_filter(sk, skb)) {
1490 th = (const struct tcphdr *)skb->data;
1491 hdr = ipv6_hdr(skb);
1492 tcp_v6_fill_cb(skb, hdr, th);
1493 nsk = tcp_check_req(sk, skb, req, false);
1497 goto discard_and_relse;
1501 tcp_v6_restore_cb(skb);
1502 } else if (tcp_child_process(sk, nsk, skb)) {
1503 tcp_v6_send_reset(nsk, skb);
1504 goto discard_and_relse;
1510 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1511 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1512 goto discard_and_relse;
1515 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1516 goto discard_and_relse;
1518 if (tcp_v6_inbound_md5_hash(sk, skb))
1519 goto discard_and_relse;
1521 if (tcp_filter(sk, skb))
1522 goto discard_and_relse;
1523 th = (const struct tcphdr *)skb->data;
1524 hdr = ipv6_hdr(skb);
1525 tcp_v6_fill_cb(skb, hdr, th);
1529 if (sk->sk_state == TCP_LISTEN) {
1530 ret = tcp_v6_do_rcv(sk, skb);
1531 goto put_and_return;
1534 sk_incoming_cpu_update(sk);
1536 bh_lock_sock_nested(sk);
1537 tcp_segs_in(tcp_sk(sk), skb);
1539 if (!sock_owned_by_user(sk)) {
1540 ret = tcp_v6_do_rcv(sk, skb);
1541 } else if (tcp_add_backlog(sk, skb)) {
1542 goto discard_and_relse;
1549 return ret ? -1 : 0;
1552 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1555 tcp_v6_fill_cb(skb, hdr, th);
1557 if (tcp_checksum_complete(skb)) {
1559 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1561 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1563 tcp_v6_send_reset(NULL, skb);
1571 sk_drops_add(sk, skb);
1577 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1578 inet_twsk_put(inet_twsk(sk));
1582 tcp_v6_fill_cb(skb, hdr, th);
1584 if (tcp_checksum_complete(skb)) {
1585 inet_twsk_put(inet_twsk(sk));
1589 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1594 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1595 skb, __tcp_hdrlen(th),
1596 &ipv6_hdr(skb)->saddr, th->source,
1597 &ipv6_hdr(skb)->daddr,
1599 tcp_v6_iif_l3_slave(skb),
1602 struct inet_timewait_sock *tw = inet_twsk(sk);
1603 inet_twsk_deschedule_put(tw);
1605 tcp_v6_restore_cb(skb);
1609 /* Fall through to ACK */
1612 tcp_v6_timewait_ack(sk, skb);
1615 tcp_v6_send_reset(sk, skb);
1616 inet_twsk_deschedule_put(inet_twsk(sk));
1618 case TCP_TW_SUCCESS:
1624 static void tcp_v6_early_demux(struct sk_buff *skb)
1626 const struct ipv6hdr *hdr;
1627 const struct tcphdr *th;
1630 if (skb->pkt_type != PACKET_HOST)
1633 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1636 hdr = ipv6_hdr(skb);
1639 if (th->doff < sizeof(struct tcphdr) / 4)
1642 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1643 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1644 &hdr->saddr, th->source,
1645 &hdr->daddr, ntohs(th->dest),
1646 inet6_iif(skb), inet6_sdif(skb));
1649 skb->destructor = sock_edemux;
1650 if (sk_fullsock(sk)) {
1651 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1654 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1656 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1657 skb_dst_set_noref(skb, dst);
1662 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1663 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1664 .twsk_unique = tcp_twsk_unique,
1665 .twsk_destructor = tcp_twsk_destructor,
1668 static const struct inet_connection_sock_af_ops ipv6_specific = {
1669 .queue_xmit = inet6_csk_xmit,
1670 .send_check = tcp_v6_send_check,
1671 .rebuild_header = inet6_sk_rebuild_header,
1672 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1673 .conn_request = tcp_v6_conn_request,
1674 .syn_recv_sock = tcp_v6_syn_recv_sock,
1675 .net_header_len = sizeof(struct ipv6hdr),
1676 .net_frag_header_len = sizeof(struct frag_hdr),
1677 .setsockopt = ipv6_setsockopt,
1678 .getsockopt = ipv6_getsockopt,
1679 .addr2sockaddr = inet6_csk_addr2sockaddr,
1680 .sockaddr_len = sizeof(struct sockaddr_in6),
1681 #ifdef CONFIG_COMPAT
1682 .compat_setsockopt = compat_ipv6_setsockopt,
1683 .compat_getsockopt = compat_ipv6_getsockopt,
1685 .mtu_reduced = tcp_v6_mtu_reduced,
1688 #ifdef CONFIG_TCP_MD5SIG
1689 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1690 .md5_lookup = tcp_v6_md5_lookup,
1691 .calc_md5_hash = tcp_v6_md5_hash_skb,
1692 .md5_parse = tcp_v6_parse_md5_keys,
1697 * TCP over IPv4 via INET6 API
1699 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1700 .queue_xmit = ip_queue_xmit,
1701 .send_check = tcp_v4_send_check,
1702 .rebuild_header = inet_sk_rebuild_header,
1703 .sk_rx_dst_set = inet_sk_rx_dst_set,
1704 .conn_request = tcp_v6_conn_request,
1705 .syn_recv_sock = tcp_v6_syn_recv_sock,
1706 .net_header_len = sizeof(struct iphdr),
1707 .setsockopt = ipv6_setsockopt,
1708 .getsockopt = ipv6_getsockopt,
1709 .addr2sockaddr = inet6_csk_addr2sockaddr,
1710 .sockaddr_len = sizeof(struct sockaddr_in6),
1711 #ifdef CONFIG_COMPAT
1712 .compat_setsockopt = compat_ipv6_setsockopt,
1713 .compat_getsockopt = compat_ipv6_getsockopt,
1715 .mtu_reduced = tcp_v4_mtu_reduced,
1718 #ifdef CONFIG_TCP_MD5SIG
1719 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1720 .md5_lookup = tcp_v4_md5_lookup,
1721 .calc_md5_hash = tcp_v4_md5_hash_skb,
1722 .md5_parse = tcp_v6_parse_md5_keys,
1726 /* NOTE: A lot of things set to zero explicitly by call to
1727 * sk_alloc() so need not be done here.
1729 static int tcp_v6_init_sock(struct sock *sk)
1731 struct inet_connection_sock *icsk = inet_csk(sk);
1735 icsk->icsk_af_ops = &ipv6_specific;
1737 #ifdef CONFIG_TCP_MD5SIG
1738 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1744 static void tcp_v6_destroy_sock(struct sock *sk)
1746 tcp_v4_destroy_sock(sk);
1747 inet6_destroy_sock(sk);
1750 #ifdef CONFIG_PROC_FS
1751 /* Proc filesystem TCPv6 sock list dumping. */
1752 static void get_openreq6(struct seq_file *seq,
1753 const struct request_sock *req, int i)
1755 long ttd = req->rsk_timer.expires - jiffies;
1756 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1757 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1763 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1764 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1766 src->s6_addr32[0], src->s6_addr32[1],
1767 src->s6_addr32[2], src->s6_addr32[3],
1768 inet_rsk(req)->ir_num,
1769 dest->s6_addr32[0], dest->s6_addr32[1],
1770 dest->s6_addr32[2], dest->s6_addr32[3],
1771 ntohs(inet_rsk(req)->ir_rmt_port),
1773 0, 0, /* could print option size, but that is af dependent. */
1774 1, /* timers active (only the expire timer) */
1775 jiffies_to_clock_t(ttd),
1777 from_kuid_munged(seq_user_ns(seq),
1778 sock_i_uid(req->rsk_listener)),
1779 0, /* non standard timer */
1780 0, /* open_requests have no inode */
1784 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1786 const struct in6_addr *dest, *src;
1789 unsigned long timer_expires;
1790 const struct inet_sock *inet = inet_sk(sp);
1791 const struct tcp_sock *tp = tcp_sk(sp);
1792 const struct inet_connection_sock *icsk = inet_csk(sp);
1793 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1797 dest = &sp->sk_v6_daddr;
1798 src = &sp->sk_v6_rcv_saddr;
1799 destp = ntohs(inet->inet_dport);
1800 srcp = ntohs(inet->inet_sport);
1802 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1803 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1804 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1806 timer_expires = icsk->icsk_timeout;
1807 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1809 timer_expires = icsk->icsk_timeout;
1810 } else if (timer_pending(&sp->sk_timer)) {
1812 timer_expires = sp->sk_timer.expires;
1815 timer_expires = jiffies;
1818 state = sk_state_load(sp);
1819 if (state == TCP_LISTEN)
1820 rx_queue = sp->sk_ack_backlog;
1822 /* Because we don't lock the socket,
1823 * we might find a transient negative value.
1825 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1828 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1829 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1831 src->s6_addr32[0], src->s6_addr32[1],
1832 src->s6_addr32[2], src->s6_addr32[3], srcp,
1833 dest->s6_addr32[0], dest->s6_addr32[1],
1834 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1836 tp->write_seq - tp->snd_una,
1839 jiffies_delta_to_clock_t(timer_expires - jiffies),
1840 icsk->icsk_retransmits,
1841 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1842 icsk->icsk_probes_out,
1844 refcount_read(&sp->sk_refcnt), sp,
1845 jiffies_to_clock_t(icsk->icsk_rto),
1846 jiffies_to_clock_t(icsk->icsk_ack.ato),
1847 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1849 state == TCP_LISTEN ?
1850 fastopenq->max_qlen :
1851 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1855 static void get_timewait6_sock(struct seq_file *seq,
1856 struct inet_timewait_sock *tw, int i)
1858 long delta = tw->tw_timer.expires - jiffies;
1859 const struct in6_addr *dest, *src;
1862 dest = &tw->tw_v6_daddr;
1863 src = &tw->tw_v6_rcv_saddr;
1864 destp = ntohs(tw->tw_dport);
1865 srcp = ntohs(tw->tw_sport);
1868 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1869 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1871 src->s6_addr32[0], src->s6_addr32[1],
1872 src->s6_addr32[2], src->s6_addr32[3], srcp,
1873 dest->s6_addr32[0], dest->s6_addr32[1],
1874 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1875 tw->tw_substate, 0, 0,
1876 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1877 refcount_read(&tw->tw_refcnt), tw);
1880 static int tcp6_seq_show(struct seq_file *seq, void *v)
1882 struct tcp_iter_state *st;
1883 struct sock *sk = v;
1885 if (v == SEQ_START_TOKEN) {
1890 "st tx_queue rx_queue tr tm->when retrnsmt"
1891 " uid timeout inode\n");
1896 if (sk->sk_state == TCP_TIME_WAIT)
1897 get_timewait6_sock(seq, v, st->num);
1898 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1899 get_openreq6(seq, v, st->num);
1901 get_tcp6_sock(seq, v, st->num);
1906 static const struct file_operations tcp6_afinfo_seq_fops = {
1907 .owner = THIS_MODULE,
1908 .open = tcp_seq_open,
1910 .llseek = seq_lseek,
1911 .release = seq_release_net
1914 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1917 .seq_fops = &tcp6_afinfo_seq_fops,
1919 .show = tcp6_seq_show,
1923 int __net_init tcp6_proc_init(struct net *net)
1925 return tcp_proc_register(net, &tcp6_seq_afinfo);
1928 void tcp6_proc_exit(struct net *net)
1930 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1934 struct proto tcpv6_prot = {
1936 .owner = THIS_MODULE,
1938 .connect = tcp_v6_connect,
1939 .disconnect = tcp_disconnect,
1940 .accept = inet_csk_accept,
1942 .init = tcp_v6_init_sock,
1943 .destroy = tcp_v6_destroy_sock,
1944 .shutdown = tcp_shutdown,
1945 .setsockopt = tcp_setsockopt,
1946 .getsockopt = tcp_getsockopt,
1947 .keepalive = tcp_set_keepalive,
1948 .recvmsg = tcp_recvmsg,
1949 .sendmsg = tcp_sendmsg,
1950 .sendpage = tcp_sendpage,
1951 .backlog_rcv = tcp_v6_do_rcv,
1952 .release_cb = tcp_release_cb,
1954 .unhash = inet_unhash,
1955 .get_port = inet_csk_get_port,
1956 .enter_memory_pressure = tcp_enter_memory_pressure,
1957 .leave_memory_pressure = tcp_leave_memory_pressure,
1958 .stream_memory_free = tcp_stream_memory_free,
1959 .sockets_allocated = &tcp_sockets_allocated,
1960 .memory_allocated = &tcp_memory_allocated,
1961 .memory_pressure = &tcp_memory_pressure,
1962 .orphan_count = &tcp_orphan_count,
1963 .sysctl_mem = sysctl_tcp_mem,
1964 .sysctl_wmem = sysctl_tcp_wmem,
1965 .sysctl_rmem = sysctl_tcp_rmem,
1966 .max_header = MAX_TCP_HEADER,
1967 .obj_size = sizeof(struct tcp6_sock),
1968 .slab_flags = SLAB_TYPESAFE_BY_RCU,
1969 .twsk_prot = &tcp6_timewait_sock_ops,
1970 .rsk_prot = &tcp6_request_sock_ops,
1971 .h.hashinfo = &tcp_hashinfo,
1972 .no_autobind = true,
1973 #ifdef CONFIG_COMPAT
1974 .compat_setsockopt = compat_tcp_setsockopt,
1975 .compat_getsockopt = compat_tcp_getsockopt,
1977 .diag_destroy = tcp_abort,
1980 /* thinking of making this const? Don't.
1981 * early_demux can change based on sysctl.
1983 static struct inet6_protocol tcpv6_protocol = {
1984 .early_demux = tcp_v6_early_demux,
1985 .early_demux_handler = tcp_v6_early_demux,
1986 .handler = tcp_v6_rcv,
1987 .err_handler = tcp_v6_err,
1988 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1991 static struct inet_protosw tcpv6_protosw = {
1992 .type = SOCK_STREAM,
1993 .protocol = IPPROTO_TCP,
1994 .prot = &tcpv6_prot,
1995 .ops = &inet6_stream_ops,
1996 .flags = INET_PROTOSW_PERMANENT |
2000 static int __net_init tcpv6_net_init(struct net *net)
2002 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2003 SOCK_RAW, IPPROTO_TCP, net);
2006 static void __net_exit tcpv6_net_exit(struct net *net)
2008 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2011 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2013 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2016 static struct pernet_operations tcpv6_net_ops = {
2017 .init = tcpv6_net_init,
2018 .exit = tcpv6_net_exit,
2019 .exit_batch = tcpv6_net_exit_batch,
2022 int __init tcpv6_init(void)
2026 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2030 /* register inet6 protocol */
2031 ret = inet6_register_protosw(&tcpv6_protosw);
2033 goto out_tcpv6_protocol;
2035 ret = register_pernet_subsys(&tcpv6_net_ops);
2037 goto out_tcpv6_protosw;
2042 inet6_unregister_protosw(&tcpv6_protosw);
2044 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2048 void tcpv6_exit(void)
2050 unregister_pernet_subsys(&tcpv6_net_ops);
2051 inet6_unregister_protosw(&tcpv6_protosw);
2052 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);