1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IPv6 output functions
4 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
9 * Based on linux/net/ipv4/ip_output.c
12 * A.N.Kuznetsov : airthmetics in fragmentation.
13 * extension headers are implemented.
14 * route changes now work.
15 * ip6_forward does not confuse sniffers.
18 * H. von Brand : Added missing #include <linux/string.h>
19 * Imran Patel : frag id should be in NBO
20 * Kazunori MIYAZAWA @USAGI
21 * : add ip6_append_data and related functions
25 #include <linux/errno.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/socket.h>
29 #include <linux/net.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/in6.h>
33 #include <linux/tcp.h>
34 #include <linux/route.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
38 #include <linux/bpf-cgroup.h>
39 #include <linux/netfilter.h>
40 #include <linux/netfilter_ipv6.h>
46 #include <net/ndisc.h>
47 #include <net/protocol.h>
48 #include <net/ip6_route.h>
49 #include <net/addrconf.h>
50 #include <net/rawv6.h>
53 #include <net/checksum.h>
54 #include <linux/mroute6.h>
55 #include <net/l3mdev.h>
56 #include <net/lwtunnel.h>
57 #include <net/ip_tunnels.h>
59 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
61 struct dst_entry *dst = skb_dst(skb);
62 struct net_device *dev = dst->dev;
63 unsigned int hh_len = LL_RESERVED_SPACE(dev);
64 int delta = hh_len - skb_headroom(skb);
65 const struct in6_addr *nexthop;
66 struct neighbour *neigh;
69 /* Be paranoid, rather than too clever. */
70 if (unlikely(delta > 0) && dev->header_ops) {
71 /* pskb_expand_head() might crash, if skb is shared */
72 if (skb_shared(skb)) {
73 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
77 skb_set_owner_w(nskb, skb->sk);
85 pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
90 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
95 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
96 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
98 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
99 ((mroute6_is_socket(net, skb) &&
100 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
101 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
102 &ipv6_hdr(skb)->saddr))) {
103 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
105 /* Do not check for IFF_ALLMULTI; multicast routing
106 is not supported in any case.
109 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
110 net, sk, newskb, NULL, newskb->dev,
113 if (ipv6_hdr(skb)->hop_limit == 0) {
114 IP6_INC_STATS(net, idev,
115 IPSTATS_MIB_OUTDISCARDS);
121 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
123 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
124 IPV6_ADDR_SCOPE_NODELOCAL &&
125 !(dev->flags & IFF_LOOPBACK)) {
131 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
132 int res = lwtunnel_xmit(skb);
134 if (res != LWTUNNEL_XMIT_CONTINUE)
139 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
140 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
141 if (unlikely(!neigh))
142 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
143 if (!IS_ERR(neigh)) {
144 sock_confirm_neigh(skb, neigh);
145 ret = neigh_output(neigh, skb, false);
146 rcu_read_unlock_bh();
149 rcu_read_unlock_bh();
151 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
157 ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
158 struct sk_buff *skb, unsigned int mtu)
160 struct sk_buff *segs, *nskb;
161 netdev_features_t features;
164 /* Please see corresponding comment in ip_finish_output_gso
165 * describing the cases where GSO segment length exceeds the
168 features = netif_skb_features(skb);
169 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
170 if (IS_ERR_OR_NULL(segs)) {
177 skb_list_walk_safe(segs, segs, nskb) {
180 skb_mark_not_on_list(segs);
181 /* Last GSO segment can be smaller than gso_size (and MTU).
182 * Adding a fragment header would produce an "atomic fragment",
183 * which is considered harmful (RFC-8021). Avoid that.
185 err = segs->len > mtu ?
186 ip6_fragment(net, sk, segs, ip6_finish_output2) :
187 ip6_finish_output2(net, sk, segs);
195 static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
199 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
200 /* Policy lookup after SNAT yielded a new policy */
201 if (skb_dst(skb)->xfrm) {
202 IP6CB(skb)->flags |= IP6SKB_REROUTED;
203 return dst_output(net, sk, skb);
207 mtu = ip6_skb_dst_mtu(skb);
208 if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))
209 return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
211 if ((skb->len > mtu && !skb_is_gso(skb)) ||
212 dst_allfrag(skb_dst(skb)) ||
213 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
214 return ip6_fragment(net, sk, skb, ip6_finish_output2);
216 return ip6_finish_output2(net, sk, skb);
219 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
223 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
225 case NET_XMIT_SUCCESS:
226 return __ip6_finish_output(net, sk, skb);
228 return __ip6_finish_output(net, sk, skb) ? : ret;
235 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
237 struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
238 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
240 skb->protocol = htons(ETH_P_IPV6);
243 if (unlikely(idev->cnf.disable_ipv6)) {
244 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
249 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
250 net, sk, skb, indev, dev,
252 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
255 bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
257 if (!np->autoflowlabel_set)
258 return ip6_default_np_autolabel(net);
260 return np->autoflowlabel;
264 * xmit an sk_buff (used by TCP, SCTP and DCCP)
265 * Note : socket lock is not held for SYNACK packets, but might be modified
266 * by calls to skb_set_owner_w() and ipv6_local_error(),
267 * which are using proper atomic operations or spinlocks.
269 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
270 __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority)
272 struct net *net = sock_net(sk);
273 const struct ipv6_pinfo *np = inet6_sk(sk);
274 struct in6_addr *first_hop = &fl6->daddr;
275 struct dst_entry *dst = skb_dst(skb);
276 unsigned int head_room;
278 u8 proto = fl6->flowi6_proto;
279 int seg_len = skb->len;
283 head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
285 head_room += opt->opt_nflen + opt->opt_flen;
287 if (unlikely(skb_headroom(skb) < head_room)) {
288 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
290 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
291 IPSTATS_MIB_OUTDISCARDS);
296 skb_set_owner_w(skb2, skb->sk);
302 seg_len += opt->opt_nflen + opt->opt_flen;
305 ipv6_push_frag_opts(skb, opt, &proto);
308 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
312 skb_push(skb, sizeof(struct ipv6hdr));
313 skb_reset_network_header(skb);
317 * Fill in the IPv6 header
320 hlimit = np->hop_limit;
322 hlimit = ip6_dst_hoplimit(dst);
324 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
325 ip6_autoflowlabel(net, np), fl6));
327 hdr->payload_len = htons(seg_len);
328 hdr->nexthdr = proto;
329 hdr->hop_limit = hlimit;
331 hdr->saddr = fl6->saddr;
332 hdr->daddr = *first_hop;
334 skb->protocol = htons(ETH_P_IPV6);
335 skb->priority = priority;
339 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
340 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
341 IPSTATS_MIB_OUT, skb->len);
343 /* if egress device is enslaved to an L3 master device pass the
344 * skb to its handler for processing
346 skb = l3mdev_ip6_out((struct sock *)sk, skb);
350 /* hooks should never assume socket lock is held.
351 * we promote our socket to non const
353 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
354 net, (struct sock *)sk, skb, NULL, dst->dev,
359 /* ipv6_local_error() does not require socket lock,
360 * we promote our socket to non const
362 ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
364 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
368 EXPORT_SYMBOL(ip6_xmit);
370 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
372 struct ip6_ra_chain *ra;
373 struct sock *last = NULL;
375 read_lock(&ip6_ra_lock);
376 for (ra = ip6_ra_chain; ra; ra = ra->next) {
377 struct sock *sk = ra->sk;
378 if (sk && ra->sel == sel &&
379 (!sk->sk_bound_dev_if ||
380 sk->sk_bound_dev_if == skb->dev->ifindex)) {
381 struct ipv6_pinfo *np = inet6_sk(sk);
383 if (np && np->rtalert_isolate &&
384 !net_eq(sock_net(sk), dev_net(skb->dev))) {
388 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
390 rawv6_rcv(last, skb2);
397 rawv6_rcv(last, skb);
398 read_unlock(&ip6_ra_lock);
401 read_unlock(&ip6_ra_lock);
405 static int ip6_forward_proxy_check(struct sk_buff *skb)
407 struct ipv6hdr *hdr = ipv6_hdr(skb);
408 u8 nexthdr = hdr->nexthdr;
412 if (ipv6_ext_hdr(nexthdr)) {
413 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
417 offset = sizeof(struct ipv6hdr);
419 if (nexthdr == IPPROTO_ICMPV6) {
420 struct icmp6hdr *icmp6;
422 if (!pskb_may_pull(skb, (skb_network_header(skb) +
423 offset + 1 - skb->data)))
426 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
428 switch (icmp6->icmp6_type) {
429 case NDISC_ROUTER_SOLICITATION:
430 case NDISC_ROUTER_ADVERTISEMENT:
431 case NDISC_NEIGHBOUR_SOLICITATION:
432 case NDISC_NEIGHBOUR_ADVERTISEMENT:
434 /* For reaction involving unicast neighbor discovery
435 * message destined to the proxied address, pass it to
445 * The proxying router can't forward traffic sent to a link-local
446 * address, so signal the sender and discard the packet. This
447 * behavior is clarified by the MIPv6 specification.
449 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
450 dst_link_failure(skb);
457 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
460 struct dst_entry *dst = skb_dst(skb);
462 __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
463 __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
465 #ifdef CONFIG_NET_SWITCHDEV
466 if (skb->offload_l3_fwd_mark) {
473 return dst_output(net, sk, skb);
476 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
481 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
482 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
488 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
494 int ip6_forward(struct sk_buff *skb)
496 struct dst_entry *dst = skb_dst(skb);
497 struct ipv6hdr *hdr = ipv6_hdr(skb);
498 struct inet6_skb_parm *opt = IP6CB(skb);
499 struct net *net = dev_net(dst->dev);
500 struct inet6_dev *idev;
503 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
504 if (net->ipv6.devconf_all->forwarding == 0)
507 if (skb->pkt_type != PACKET_HOST)
510 if (unlikely(skb->sk))
513 if (skb_warn_if_lro(skb))
516 if (!net->ipv6.devconf_all->disable_policy &&
517 (!idev || !idev->cnf.disable_policy) &&
518 !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
519 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
523 skb_forward_csum(skb);
526 * We DO NOT make any processing on
527 * RA packets, pushing them to user level AS IS
528 * without ane WARRANTY that application will be able
529 * to interpret them. The reason is that we
530 * cannot make anything clever here.
532 * We are not end-node, so that if packet contains
533 * AH/ESP, we cannot make anything.
534 * Defragmentation also would be mistake, RA packets
535 * cannot be fragmented, because there is no warranty
536 * that different fragments will go along one path. --ANK
538 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
539 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
544 * check and decrement ttl
546 if (hdr->hop_limit <= 1) {
547 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
548 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
554 /* XXX: idev->cnf.proxy_ndp? */
555 if (net->ipv6.devconf_all->proxy_ndp &&
556 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
557 int proxied = ip6_forward_proxy_check(skb);
559 return ip6_input(skb);
560 else if (proxied < 0) {
561 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
566 if (!xfrm6_route_forward(skb)) {
567 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
572 /* IPv6 specs say nothing about it, but it is clear that we cannot
573 send redirects to source routed frames.
574 We don't send redirects to frames decapsulated from IPsec.
576 if (IP6CB(skb)->iif == dst->dev->ifindex &&
577 opt->srcrt == 0 && !skb_sec_path(skb)) {
578 struct in6_addr *target = NULL;
579 struct inet_peer *peer;
583 * incoming and outgoing devices are the same
587 rt = (struct rt6_info *) dst;
588 if (rt->rt6i_flags & RTF_GATEWAY)
589 target = &rt->rt6i_gateway;
591 target = &hdr->daddr;
593 peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
595 /* Limit redirects both by destination (here)
596 and by source (inside ndisc_send_redirect)
598 if (inet_peer_xrlim_allow(peer, 1*HZ))
599 ndisc_send_redirect(skb, target);
603 int addrtype = ipv6_addr_type(&hdr->saddr);
605 /* This check is security critical. */
606 if (addrtype == IPV6_ADDR_ANY ||
607 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
609 if (addrtype & IPV6_ADDR_LINKLOCAL) {
610 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
611 ICMPV6_NOT_NEIGHBOUR, 0);
616 mtu = ip6_dst_mtu_forward(dst);
617 if (mtu < IPV6_MIN_MTU)
620 if (ip6_pkt_too_big(skb, mtu)) {
621 /* Again, force OUTPUT device used as source address */
623 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
624 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS);
625 __IP6_INC_STATS(net, ip6_dst_idev(dst),
626 IPSTATS_MIB_FRAGFAILS);
631 if (skb_cow(skb, dst->dev->hard_header_len)) {
632 __IP6_INC_STATS(net, ip6_dst_idev(dst),
633 IPSTATS_MIB_OUTDISCARDS);
639 /* Mangling hops number delayed to point after skb COW */
643 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
644 net, NULL, skb, skb->dev, dst->dev,
648 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
654 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
656 to->pkt_type = from->pkt_type;
657 to->priority = from->priority;
658 to->protocol = from->protocol;
660 skb_dst_set(to, dst_clone(skb_dst(from)));
662 to->mark = from->mark;
664 skb_copy_hash(to, from);
666 #ifdef CONFIG_NET_SCHED
667 to->tc_index = from->tc_index;
670 skb_ext_copy(to, from);
671 skb_copy_secmark(to, from);
674 int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr,
675 u8 nexthdr, __be32 frag_id,
676 struct ip6_fraglist_iter *iter)
678 unsigned int first_len;
682 *prevhdr = NEXTHDR_FRAGMENT;
683 iter->tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
687 iter->frag = skb_shinfo(skb)->frag_list;
688 skb_frag_list_init(skb);
692 iter->frag_id = frag_id;
693 iter->nexthdr = nexthdr;
695 __skb_pull(skb, hlen);
696 fh = __skb_push(skb, sizeof(struct frag_hdr));
697 __skb_push(skb, hlen);
698 skb_reset_network_header(skb);
699 memcpy(skb_network_header(skb), iter->tmp_hdr, hlen);
701 fh->nexthdr = nexthdr;
703 fh->frag_off = htons(IP6_MF);
704 fh->identification = frag_id;
706 first_len = skb_pagelen(skb);
707 skb->data_len = first_len - skb_headlen(skb);
708 skb->len = first_len;
709 ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr));
713 EXPORT_SYMBOL(ip6_fraglist_init);
715 void ip6_fraglist_prepare(struct sk_buff *skb,
716 struct ip6_fraglist_iter *iter)
718 struct sk_buff *frag = iter->frag;
719 unsigned int hlen = iter->hlen;
722 frag->ip_summed = CHECKSUM_NONE;
723 skb_reset_transport_header(frag);
724 fh = __skb_push(frag, sizeof(struct frag_hdr));
725 __skb_push(frag, hlen);
726 skb_reset_network_header(frag);
727 memcpy(skb_network_header(frag), iter->tmp_hdr, hlen);
728 iter->offset += skb->len - hlen - sizeof(struct frag_hdr);
729 fh->nexthdr = iter->nexthdr;
731 fh->frag_off = htons(iter->offset);
733 fh->frag_off |= htons(IP6_MF);
734 fh->identification = iter->frag_id;
735 ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
736 ip6_copy_metadata(frag, skb);
738 EXPORT_SYMBOL(ip6_fraglist_prepare);
740 void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu,
741 unsigned short needed_tailroom, int hdr_room, u8 *prevhdr,
742 u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state)
744 state->prevhdr = prevhdr;
745 state->nexthdr = nexthdr;
746 state->frag_id = frag_id;
751 state->left = skb->len - hlen; /* Space per frame */
752 state->ptr = hlen; /* Where to start from */
754 state->hroom = hdr_room;
755 state->troom = needed_tailroom;
759 EXPORT_SYMBOL(ip6_frag_init);
761 struct sk_buff *ip6_frag_next(struct sk_buff *skb, struct ip6_frag_state *state)
763 u8 *prevhdr = state->prevhdr, *fragnexthdr_offset;
764 struct sk_buff *frag;
769 /* IF: it doesn't fit, use 'mtu' - the data space left */
770 if (len > state->mtu)
772 /* IF: we are not sending up to and including the packet end
773 then align the next start on an eight byte boundary */
774 if (len < state->left)
777 /* Allocate buffer */
778 frag = alloc_skb(len + state->hlen + sizeof(struct frag_hdr) +
779 state->hroom + state->troom, GFP_ATOMIC);
781 return ERR_PTR(-ENOMEM);
784 * Set up data on packet
787 ip6_copy_metadata(frag, skb);
788 skb_reserve(frag, state->hroom);
789 skb_put(frag, len + state->hlen + sizeof(struct frag_hdr));
790 skb_reset_network_header(frag);
791 fh = (struct frag_hdr *)(skb_network_header(frag) + state->hlen);
792 frag->transport_header = (frag->network_header + state->hlen +
793 sizeof(struct frag_hdr));
796 * Charge the memory for the fragment to any owner
800 skb_set_owner_w(frag, skb->sk);
803 * Copy the packet header into the new buffer.
805 skb_copy_from_linear_data(skb, skb_network_header(frag), state->hlen);
807 fragnexthdr_offset = skb_network_header(frag);
808 fragnexthdr_offset += prevhdr - skb_network_header(skb);
809 *fragnexthdr_offset = NEXTHDR_FRAGMENT;
812 * Build fragment header.
814 fh->nexthdr = state->nexthdr;
816 fh->identification = state->frag_id;
819 * Copy a block of the IP datagram.
821 BUG_ON(skb_copy_bits(skb, state->ptr, skb_transport_header(frag),
825 fh->frag_off = htons(state->offset);
827 fh->frag_off |= htons(IP6_MF);
828 ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
831 state->offset += len;
835 EXPORT_SYMBOL(ip6_frag_next);
837 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
838 int (*output)(struct net *, struct sock *, struct sk_buff *))
840 struct sk_buff *frag;
841 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
842 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
843 inet6_sk(skb->sk) : NULL;
844 struct ip6_frag_state state;
845 unsigned int mtu, hlen, nexthdr_offset;
846 ktime_t tstamp = skb->tstamp;
849 u8 *prevhdr, nexthdr = 0;
851 err = ip6_find_1stfragopt(skb, &prevhdr);
856 nexthdr_offset = prevhdr - skb_network_header(skb);
858 mtu = ip6_skb_dst_mtu(skb);
860 /* We must not fragment if the socket is set to force MTU discovery
861 * or if the skb it not generated by a local socket.
863 if (unlikely(!skb->ignore_df && skb->len > mtu))
866 if (IP6CB(skb)->frag_max_size) {
867 if (IP6CB(skb)->frag_max_size > mtu)
870 /* don't send fragments larger than what we received */
871 mtu = IP6CB(skb)->frag_max_size;
872 if (mtu < IPV6_MIN_MTU)
876 if (np && np->frag_size < mtu) {
880 if (mtu < hlen + sizeof(struct frag_hdr) + 8)
882 mtu -= hlen + sizeof(struct frag_hdr);
884 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
885 &ipv6_hdr(skb)->saddr);
887 if (skb->ip_summed == CHECKSUM_PARTIAL &&
888 (err = skb_checksum_help(skb)))
891 prevhdr = skb_network_header(skb) + nexthdr_offset;
892 hroom = LL_RESERVED_SPACE(rt->dst.dev);
893 if (skb_has_frag_list(skb)) {
894 unsigned int first_len = skb_pagelen(skb);
895 struct ip6_fraglist_iter iter;
896 struct sk_buff *frag2;
898 if (first_len - hlen > mtu ||
899 ((first_len - hlen) & 7) ||
901 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
904 skb_walk_frags(skb, frag) {
905 /* Correct geometry. */
906 if (frag->len > mtu ||
907 ((frag->len & 7) && frag->next) ||
908 skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
909 goto slow_path_clean;
911 /* Partially cloned skb? */
912 if (skb_shared(frag))
913 goto slow_path_clean;
918 frag->destructor = sock_wfree;
920 skb->truesize -= frag->truesize;
923 err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
928 /* We prevent @rt from being freed. */
932 /* Prepare header of the next frame,
933 * before previous one went down. */
935 ip6_fraglist_prepare(skb, &iter);
937 skb->tstamp = tstamp;
938 err = output(net, sk, skb);
940 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
941 IPSTATS_MIB_FRAGCREATES);
943 if (err || !iter.frag)
946 skb = ip6_fraglist_next(&iter);
952 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
953 IPSTATS_MIB_FRAGOKS);
958 kfree_skb_list(iter.frag);
960 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
961 IPSTATS_MIB_FRAGFAILS);
966 skb_walk_frags(skb, frag2) {
970 frag2->destructor = NULL;
971 skb->truesize += frag2->truesize;
977 * Fragment the datagram.
980 ip6_frag_init(skb, hlen, mtu, rt->dst.dev->needed_tailroom,
981 LL_RESERVED_SPACE(rt->dst.dev), prevhdr, nexthdr, frag_id,
985 * Keep copying data until we run out.
988 while (state.left > 0) {
989 frag = ip6_frag_next(skb, &state);
996 * Put this fragment into the sending queue.
998 frag->tstamp = tstamp;
999 err = output(net, sk, frag);
1003 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1004 IPSTATS_MIB_FRAGCREATES);
1006 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1007 IPSTATS_MIB_FRAGOKS);
1012 if (skb->sk && dst_allfrag(skb_dst(skb)))
1013 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
1015 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1019 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1020 IPSTATS_MIB_FRAGFAILS);
1025 static inline int ip6_rt_check(const struct rt6key *rt_key,
1026 const struct in6_addr *fl_addr,
1027 const struct in6_addr *addr_cache)
1029 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
1030 (!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
1033 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
1034 struct dst_entry *dst,
1035 const struct flowi6 *fl6)
1037 struct ipv6_pinfo *np = inet6_sk(sk);
1038 struct rt6_info *rt;
1043 if (dst->ops->family != AF_INET6) {
1048 rt = (struct rt6_info *)dst;
1049 /* Yes, checking route validity in not connected
1050 * case is not very simple. Take into account,
1051 * that we do not support routing by source, TOS,
1052 * and MSG_DONTROUTE --ANK (980726)
1054 * 1. ip6_rt_check(): If route was host route,
1055 * check that cached destination is current.
1056 * If it is network route, we still may
1057 * check its validity using saved pointer
1058 * to the last used address: daddr_cache.
1059 * We do not want to save whole address now,
1060 * (because main consumer of this service
1061 * is tcp, which has not this problem),
1062 * so that the last trick works only on connected
1064 * 2. oif also should be the same.
1066 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
1067 #ifdef CONFIG_IPV6_SUBTREES
1068 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
1070 (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
1071 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
1080 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
1081 struct dst_entry **dst, struct flowi6 *fl6)
1083 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1084 struct neighbour *n;
1085 struct rt6_info *rt;
1090 /* The correct way to handle this would be to do
1091 * ip6_route_get_saddr, and then ip6_route_output; however,
1092 * the route-specific preferred source forces the
1093 * ip6_route_output call _before_ ip6_route_get_saddr.
1095 * In source specific routing (no src=any default route),
1096 * ip6_route_output will fail given src=any saddr, though, so
1097 * that's why we try it again later.
1099 if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
1100 struct fib6_info *from;
1101 struct rt6_info *rt;
1102 bool had_dst = *dst != NULL;
1105 *dst = ip6_route_output(net, sk, fl6);
1106 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
1109 from = rt ? rcu_dereference(rt->from) : NULL;
1110 err = ip6_route_get_saddr(net, from, &fl6->daddr,
1111 sk ? inet6_sk(sk)->srcprefs : 0,
1116 goto out_err_release;
1118 /* If we had an erroneous initial result, pretend it
1119 * never existed and let the SA-enabled version take
1122 if (!had_dst && (*dst)->error) {
1127 if (fl6->flowi6_oif)
1128 flags |= RT6_LOOKUP_F_IFACE;
1132 *dst = ip6_route_output_flags(net, sk, fl6, flags);
1134 err = (*dst)->error;
1136 goto out_err_release;
1138 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1140 * Here if the dst entry we've looked up
1141 * has a neighbour entry that is in the INCOMPLETE
1142 * state and the src address from the flow is
1143 * marked as OPTIMISTIC, we release the found
1144 * dst entry and replace it instead with the
1145 * dst entry of the nexthop router
1147 rt = (struct rt6_info *) *dst;
1149 n = __ipv6_neigh_lookup_noref(rt->dst.dev,
1150 rt6_nexthop(rt, &fl6->daddr));
1151 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
1152 rcu_read_unlock_bh();
1155 struct inet6_ifaddr *ifp;
1156 struct flowi6 fl_gw6;
1159 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
1162 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
1168 * We need to get the dst entry for the
1169 * default router instead
1172 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1173 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1174 *dst = ip6_route_output(net, sk, &fl_gw6);
1175 err = (*dst)->error;
1177 goto out_err_release;
1181 if (ipv6_addr_v4mapped(&fl6->saddr) &&
1182 !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1183 err = -EAFNOSUPPORT;
1184 goto out_err_release;
1193 if (err == -ENETUNREACH)
1194 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1199 * ip6_dst_lookup - perform route lookup on flow
1200 * @net: Network namespace to perform lookup in
1201 * @sk: socket which provides route info
1202 * @dst: pointer to dst_entry * for result
1203 * @fl6: flow to lookup
1205 * This function performs a route lookup on the given flow.
1207 * It returns zero on success, or a standard errno code on error.
1209 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1213 return ip6_dst_lookup_tail(net, sk, dst, fl6);
1215 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1218 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1219 * @net: Network namespace to perform lookup in
1220 * @sk: socket which provides route info
1221 * @fl6: flow to lookup
1222 * @final_dst: final destination address for ipsec lookup
1224 * This function performs a route lookup on the given flow.
1226 * It returns a valid dst pointer on success, or a pointer encoded
1229 struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
1230 const struct in6_addr *final_dst)
1232 struct dst_entry *dst = NULL;
1235 err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
1237 return ERR_PTR(err);
1239 fl6->daddr = *final_dst;
1241 return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
1243 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1246 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1247 * @sk: socket which provides the dst cache and route info
1248 * @fl6: flow to lookup
1249 * @final_dst: final destination address for ipsec lookup
1250 * @connected: whether @sk is connected or not
1252 * This function performs a route lookup on the given flow with the
1253 * possibility of using the cached route in the socket if it is valid.
1254 * It will take the socket dst lock when operating on the dst cache.
1255 * As a result, this function can only be used in process context.
1257 * In addition, for a connected socket, cache the dst in the socket
1258 * if the current cache is not valid.
1260 * It returns a valid dst pointer on success, or a pointer encoded
1263 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1264 const struct in6_addr *final_dst,
1267 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1269 dst = ip6_sk_dst_check(sk, dst, fl6);
1273 dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
1274 if (connected && !IS_ERR(dst))
1275 ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
1279 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1282 * ip6_dst_lookup_tunnel - perform route lookup on tunnel
1283 * @skb: Packet for which lookup is done
1284 * @dev: Tunnel device
1285 * @net: Network namespace of tunnel device
1286 * @sock: Socket which provides route info
1287 * @saddr: Memory to store the src ip address
1288 * @info: Tunnel information
1289 * @protocol: IP protocol
1290 * @use_cache: Flag to enable cache usage
1291 * This function performs a route lookup on a tunnel
1293 * It returns a valid dst pointer and stores src address to be used in
1294 * tunnel in param saddr on success, else a pointer encoded error code.
1297 struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
1298 struct net_device *dev,
1300 struct socket *sock,
1301 struct in6_addr *saddr,
1302 const struct ip_tunnel_info *info,
1306 struct dst_entry *dst = NULL;
1307 #ifdef CONFIG_DST_CACHE
1308 struct dst_cache *dst_cache;
1313 #ifdef CONFIG_DST_CACHE
1314 dst_cache = (struct dst_cache *)&info->dst_cache;
1316 dst = dst_cache_get_ip6(dst_cache, saddr);
1321 memset(&fl6, 0, sizeof(fl6));
1322 fl6.flowi6_mark = skb->mark;
1323 fl6.flowi6_proto = protocol;
1324 fl6.daddr = info->key.u.ipv6.dst;
1325 fl6.saddr = info->key.u.ipv6.src;
1326 prio = info->key.tos;
1327 fl6.flowlabel = ip6_make_flowinfo(prio, info->key.label);
1329 dst = ipv6_stub->ipv6_dst_lookup_flow(net, sock->sk, &fl6,
1332 netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr);
1333 return ERR_PTR(-ENETUNREACH);
1335 if (dst->dev == dev) { /* is this necessary? */
1336 netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr);
1338 return ERR_PTR(-ELOOP);
1340 #ifdef CONFIG_DST_CACHE
1342 dst_cache_set_ip6(dst_cache, dst, &fl6.saddr);
1347 EXPORT_SYMBOL_GPL(ip6_dst_lookup_tunnel);
1349 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1352 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1355 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1358 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1361 static void ip6_append_data_mtu(unsigned int *mtu,
1363 unsigned int fragheaderlen,
1364 struct sk_buff *skb,
1365 struct rt6_info *rt,
1366 unsigned int orig_mtu)
1368 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1370 /* first fragment, reserve header_len */
1371 *mtu = orig_mtu - rt->dst.header_len;
1375 * this fragment is not first, the headers
1376 * space is regarded as data space.
1380 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1381 + fragheaderlen - sizeof(struct frag_hdr);
1385 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1386 struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1387 struct rt6_info *rt, struct flowi6 *fl6)
1389 struct ipv6_pinfo *np = inet6_sk(sk);
1391 struct ipv6_txoptions *opt = ipc6->opt;
1397 if (WARN_ON(v6_cork->opt))
1400 v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
1401 if (unlikely(!v6_cork->opt))
1404 v6_cork->opt->tot_len = sizeof(*opt);
1405 v6_cork->opt->opt_flen = opt->opt_flen;
1406 v6_cork->opt->opt_nflen = opt->opt_nflen;
1408 v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1410 if (opt->dst0opt && !v6_cork->opt->dst0opt)
1413 v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1415 if (opt->dst1opt && !v6_cork->opt->dst1opt)
1418 v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1420 if (opt->hopopt && !v6_cork->opt->hopopt)
1423 v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1425 if (opt->srcrt && !v6_cork->opt->srcrt)
1428 /* need source address above miyazawa*/
1431 cork->base.dst = &rt->dst;
1432 cork->fl.u.ip6 = *fl6;
1433 v6_cork->hop_limit = ipc6->hlimit;
1434 v6_cork->tclass = ipc6->tclass;
1435 if (rt->dst.flags & DST_XFRM_TUNNEL)
1436 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1437 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
1439 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1440 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
1441 if (np->frag_size < mtu) {
1443 mtu = np->frag_size;
1445 cork->base.fragsize = mtu;
1446 cork->base.gso_size = ipc6->gso_size;
1447 cork->base.tx_flags = 0;
1448 cork->base.mark = ipc6->sockc.mark;
1449 sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
1451 if (dst_allfrag(xfrm_dst_path(&rt->dst)))
1452 cork->base.flags |= IPCORK_ALLFRAG;
1453 cork->base.length = 0;
1455 cork->base.transmit_time = ipc6->sockc.transmit_time;
1460 static int __ip6_append_data(struct sock *sk,
1462 struct sk_buff_head *queue,
1463 struct inet_cork *cork,
1464 struct inet6_cork *v6_cork,
1465 struct page_frag *pfrag,
1466 int getfrag(void *from, char *to, int offset,
1467 int len, int odd, struct sk_buff *skb),
1468 void *from, int length, int transhdrlen,
1469 unsigned int flags, struct ipcm6_cookie *ipc6)
1471 struct sk_buff *skb, *skb_prev = NULL;
1472 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
1473 struct ubuf_info *uarg = NULL;
1475 int dst_exthdrlen = 0;
1481 struct rt6_info *rt = (struct rt6_info *)cork->dst;
1482 struct ipv6_txoptions *opt = v6_cork->opt;
1483 int csummode = CHECKSUM_NONE;
1484 unsigned int maxnonfragsize, headersize;
1485 unsigned int wmem_alloc_delta = 0;
1486 bool paged, extra_uref = false;
1488 skb = skb_peek_tail(queue);
1490 exthdrlen = opt ? opt->opt_flen : 0;
1491 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1494 paged = !!cork->gso_size;
1495 mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
1498 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
1499 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1500 tskey = sk->sk_tskey++;
1502 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1504 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1505 (opt ? opt->opt_nflen : 0);
1507 headersize = sizeof(struct ipv6hdr) +
1508 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1509 (dst_allfrag(&rt->dst) ?
1510 sizeof(struct frag_hdr) : 0) +
1511 rt->rt6i_nfheader_len;
1513 if (mtu <= fragheaderlen ||
1514 ((mtu - fragheaderlen) & ~7) + fragheaderlen <= sizeof(struct frag_hdr))
1517 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1518 sizeof(struct frag_hdr);
1520 /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
1521 * the first fragment
1523 if (headersize + transhdrlen > mtu)
1526 if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1527 (sk->sk_protocol == IPPROTO_UDP ||
1528 sk->sk_protocol == IPPROTO_RAW)) {
1529 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1530 sizeof(struct ipv6hdr));
1534 if (ip6_sk_ignore_df(sk))
1535 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1537 maxnonfragsize = mtu;
1539 if (cork->length + length > maxnonfragsize - headersize) {
1541 pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
1542 ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
1546 /* CHECKSUM_PARTIAL only with no extension headers and when
1547 * we are not going to fragment
1549 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1550 headersize == sizeof(struct ipv6hdr) &&
1551 length <= mtu - headersize &&
1552 (!(flags & MSG_MORE) || cork->gso_size) &&
1553 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1554 csummode = CHECKSUM_PARTIAL;
1556 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1557 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1560 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
1561 if (rt->dst.dev->features & NETIF_F_SG &&
1562 csummode == CHECKSUM_PARTIAL) {
1566 skb_zcopy_set(skb, uarg, &extra_uref);
1571 * Let's try using as much space as possible.
1572 * Use MTU if total length of the message fits into the MTU.
1573 * Otherwise, we need to reserve fragment header and
1574 * fragment alignment (= 8-15 octects, in total).
1576 * Note that we may need to "move" the data from the tail
1577 * of the buffer to the new fragment when we split
1580 * FIXME: It may be fragmented into multiple chunks
1581 * at once if non-fragmentable extension headers
1586 cork->length += length;
1590 while (length > 0) {
1591 /* Check if the remaining data fits into current packet. */
1592 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1594 copy = maxfraglen - skb->len;
1598 unsigned int datalen;
1599 unsigned int fraglen;
1600 unsigned int fraggap;
1601 unsigned int alloclen, alloc_extra;
1602 unsigned int pagedlen;
1604 /* There's no room in the current skb */
1606 fraggap = skb->len - maxfraglen;
1609 /* update mtu and maxfraglen if necessary */
1610 if (!skb || !skb_prev)
1611 ip6_append_data_mtu(&mtu, &maxfraglen,
1612 fragheaderlen, skb, rt,
1618 * If remaining data exceeds the mtu,
1619 * we know we need more fragment(s).
1621 datalen = length + fraggap;
1623 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1624 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1625 fraglen = datalen + fragheaderlen;
1628 alloc_extra = hh_len;
1629 alloc_extra += dst_exthdrlen;
1630 alloc_extra += rt->dst.trailer_len;
1632 /* We just reserve space for fragment header.
1633 * Note: this may be overallocation if the message
1634 * (without MSG_MORE) fits into the MTU.
1636 alloc_extra += sizeof(struct frag_hdr);
1638 if ((flags & MSG_MORE) &&
1639 !(rt->dst.dev->features&NETIF_F_SG))
1642 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1643 !(rt->dst.dev->features & NETIF_F_SG)))
1646 alloclen = min_t(int, fraglen, MAX_HEADER);
1647 pagedlen = fraglen - alloclen;
1649 alloclen += alloc_extra;
1651 if (datalen != length + fraggap) {
1653 * this is not the last fragment, the trailer
1654 * space is regarded as data space.
1656 datalen += rt->dst.trailer_len;
1659 fraglen = datalen + fragheaderlen;
1661 copy = datalen - transhdrlen - fraggap - pagedlen;
1667 skb = sock_alloc_send_skb(sk, alloclen,
1668 (flags & MSG_DONTWAIT), &err);
1671 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1673 skb = alloc_skb(alloclen,
1681 * Fill in the control structures
1683 skb->protocol = htons(ETH_P_IPV6);
1684 skb->ip_summed = csummode;
1686 /* reserve for fragmentation and ipsec header */
1687 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1691 * Find where to start putting bytes
1693 data = skb_put(skb, fraglen - pagedlen);
1694 skb_set_network_header(skb, exthdrlen);
1695 data += fragheaderlen;
1696 skb->transport_header = (skb->network_header +
1699 skb->csum = skb_copy_and_csum_bits(
1700 skb_prev, maxfraglen,
1701 data + transhdrlen, fraggap);
1702 skb_prev->csum = csum_sub(skb_prev->csum,
1705 pskb_trim_unique(skb_prev, maxfraglen);
1708 getfrag(from, data + transhdrlen, offset,
1709 copy, fraggap, skb) < 0) {
1716 length -= copy + transhdrlen;
1721 /* Only the initial fragment is time stamped */
1722 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1724 skb_shinfo(skb)->tskey = tskey;
1726 skb_zcopy_set(skb, uarg, &extra_uref);
1728 if ((flags & MSG_CONFIRM) && !skb_prev)
1729 skb_set_dst_pending_confirm(skb, 1);
1732 * Put the packet on the pending queue
1734 if (!skb->destructor) {
1735 skb->destructor = sock_wfree;
1737 wmem_alloc_delta += skb->truesize;
1739 __skb_queue_tail(queue, skb);
1746 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1747 skb_tailroom(skb) >= copy) {
1751 if (getfrag(from, skb_put(skb, copy),
1752 offset, copy, off, skb) < 0) {
1753 __skb_trim(skb, off);
1757 } else if (!uarg || !uarg->zerocopy) {
1758 int i = skb_shinfo(skb)->nr_frags;
1761 if (!sk_page_frag_refill(sk, pfrag))
1764 if (!skb_can_coalesce(skb, i, pfrag->page,
1767 if (i == MAX_SKB_FRAGS)
1770 __skb_fill_page_desc(skb, i, pfrag->page,
1772 skb_shinfo(skb)->nr_frags = ++i;
1773 get_page(pfrag->page);
1775 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1777 page_address(pfrag->page) + pfrag->offset,
1778 offset, copy, skb->len, skb) < 0)
1781 pfrag->offset += copy;
1782 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1784 skb->data_len += copy;
1785 skb->truesize += copy;
1786 wmem_alloc_delta += copy;
1788 err = skb_zerocopy_iter_dgram(skb, from, copy);
1796 if (wmem_alloc_delta)
1797 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1804 sock_zerocopy_put_abort(uarg, extra_uref);
1805 cork->length -= length;
1806 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1807 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1811 int ip6_append_data(struct sock *sk,
1812 int getfrag(void *from, char *to, int offset, int len,
1813 int odd, struct sk_buff *skb),
1814 void *from, int length, int transhdrlen,
1815 struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1816 struct rt6_info *rt, unsigned int flags)
1818 struct inet_sock *inet = inet_sk(sk);
1819 struct ipv6_pinfo *np = inet6_sk(sk);
1823 if (flags&MSG_PROBE)
1825 if (skb_queue_empty(&sk->sk_write_queue)) {
1829 err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1834 exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1835 length += exthdrlen;
1836 transhdrlen += exthdrlen;
1838 fl6 = &inet->cork.fl.u.ip6;
1842 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1843 &np->cork, sk_page_frag(sk), getfrag,
1844 from, length, transhdrlen, flags, ipc6);
1846 EXPORT_SYMBOL_GPL(ip6_append_data);
1848 static void ip6_cork_release(struct inet_cork_full *cork,
1849 struct inet6_cork *v6_cork)
1852 kfree(v6_cork->opt->dst0opt);
1853 kfree(v6_cork->opt->dst1opt);
1854 kfree(v6_cork->opt->hopopt);
1855 kfree(v6_cork->opt->srcrt);
1856 kfree(v6_cork->opt);
1857 v6_cork->opt = NULL;
1860 if (cork->base.dst) {
1861 dst_release(cork->base.dst);
1862 cork->base.dst = NULL;
1863 cork->base.flags &= ~IPCORK_ALLFRAG;
1865 memset(&cork->fl, 0, sizeof(cork->fl));
1868 struct sk_buff *__ip6_make_skb(struct sock *sk,
1869 struct sk_buff_head *queue,
1870 struct inet_cork_full *cork,
1871 struct inet6_cork *v6_cork)
1873 struct sk_buff *skb, *tmp_skb;
1874 struct sk_buff **tail_skb;
1875 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1876 struct ipv6_pinfo *np = inet6_sk(sk);
1877 struct net *net = sock_net(sk);
1878 struct ipv6hdr *hdr;
1879 struct ipv6_txoptions *opt = v6_cork->opt;
1880 struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1881 struct flowi6 *fl6 = &cork->fl.u.ip6;
1882 unsigned char proto = fl6->flowi6_proto;
1884 skb = __skb_dequeue(queue);
1887 tail_skb = &(skb_shinfo(skb)->frag_list);
1889 /* move skb->data to ip header from ext header */
1890 if (skb->data < skb_network_header(skb))
1891 __skb_pull(skb, skb_network_offset(skb));
1892 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1893 __skb_pull(tmp_skb, skb_network_header_len(skb));
1894 *tail_skb = tmp_skb;
1895 tail_skb = &(tmp_skb->next);
1896 skb->len += tmp_skb->len;
1897 skb->data_len += tmp_skb->len;
1898 skb->truesize += tmp_skb->truesize;
1899 tmp_skb->destructor = NULL;
1903 /* Allow local fragmentation. */
1904 skb->ignore_df = ip6_sk_ignore_df(sk);
1906 *final_dst = fl6->daddr;
1907 __skb_pull(skb, skb_network_header_len(skb));
1908 if (opt && opt->opt_flen)
1909 ipv6_push_frag_opts(skb, opt, &proto);
1910 if (opt && opt->opt_nflen)
1911 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr);
1913 skb_push(skb, sizeof(struct ipv6hdr));
1914 skb_reset_network_header(skb);
1915 hdr = ipv6_hdr(skb);
1917 ip6_flow_hdr(hdr, v6_cork->tclass,
1918 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1919 ip6_autoflowlabel(net, np), fl6));
1920 hdr->hop_limit = v6_cork->hop_limit;
1921 hdr->nexthdr = proto;
1922 hdr->saddr = fl6->saddr;
1923 hdr->daddr = *final_dst;
1925 skb->priority = sk->sk_priority;
1926 skb->mark = cork->base.mark;
1928 skb->tstamp = cork->base.transmit_time;
1930 skb_dst_set(skb, dst_clone(&rt->dst));
1931 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1932 if (proto == IPPROTO_ICMPV6) {
1933 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1936 if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl)
1937 icmp6_type = fl6->fl6_icmp_type;
1939 icmp6_type = icmp6_hdr(skb)->icmp6_type;
1940 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_type);
1941 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1944 ip6_cork_release(cork, v6_cork);
1949 int ip6_send_skb(struct sk_buff *skb)
1951 struct net *net = sock_net(skb->sk);
1952 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1955 err = ip6_local_out(net, skb->sk, skb);
1958 err = net_xmit_errno(err);
1960 IP6_INC_STATS(net, rt->rt6i_idev,
1961 IPSTATS_MIB_OUTDISCARDS);
1967 int ip6_push_pending_frames(struct sock *sk)
1969 struct sk_buff *skb;
1971 skb = ip6_finish_skb(sk);
1975 return ip6_send_skb(skb);
1977 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1979 static void __ip6_flush_pending_frames(struct sock *sk,
1980 struct sk_buff_head *queue,
1981 struct inet_cork_full *cork,
1982 struct inet6_cork *v6_cork)
1984 struct sk_buff *skb;
1986 while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1988 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1989 IPSTATS_MIB_OUTDISCARDS);
1993 ip6_cork_release(cork, v6_cork);
1996 void ip6_flush_pending_frames(struct sock *sk)
1998 __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1999 &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
2001 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
2003 struct sk_buff *ip6_make_skb(struct sock *sk,
2004 int getfrag(void *from, char *to, int offset,
2005 int len, int odd, struct sk_buff *skb),
2006 void *from, int length, int transhdrlen,
2007 struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
2008 struct rt6_info *rt, unsigned int flags,
2009 struct inet_cork_full *cork)
2011 struct inet6_cork v6_cork;
2012 struct sk_buff_head queue;
2013 int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
2016 if (flags & MSG_PROBE)
2019 __skb_queue_head_init(&queue);
2021 cork->base.flags = 0;
2022 cork->base.addr = 0;
2023 cork->base.opt = NULL;
2024 cork->base.dst = NULL;
2026 err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt, fl6);
2028 ip6_cork_release(cork, &v6_cork);
2029 return ERR_PTR(err);
2031 if (ipc6->dontfrag < 0)
2032 ipc6->dontfrag = inet6_sk(sk)->dontfrag;
2034 err = __ip6_append_data(sk, fl6, &queue, &cork->base, &v6_cork,
2035 ¤t->task_frag, getfrag, from,
2036 length + exthdrlen, transhdrlen + exthdrlen,
2039 __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
2040 return ERR_PTR(err);
2043 return __ip6_make_skb(sk, &queue, cork, &v6_cork);