GNU Linux-libre 6.1.90-gnu
[releases.git] / net / ipv6 / ip6_output.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      IPv6 output functions
4  *      Linux INET6 implementation
5  *
6  *      Authors:
7  *      Pedro Roque             <roque@di.fc.ul.pt>
8  *
9  *      Based on linux/net/ipv4/ip_output.c
10  *
11  *      Changes:
12  *      A.N.Kuznetsov   :       airthmetics in fragmentation.
13  *                              extension headers are implemented.
14  *                              route changes now work.
15  *                              ip6_forward does not confuse sniffers.
16  *                              etc.
17  *
18  *      H. von Brand    :       Added missing #include <linux/string.h>
19  *      Imran Patel     :       frag id should be in NBO
20  *      Kazunori MIYAZAWA @USAGI
21  *                      :       add ip6_append_data and related functions
22  *                              for datagram xmit
23  */
24
25 #include <linux/errno.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/socket.h>
29 #include <linux/net.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/in6.h>
33 #include <linux/tcp.h>
34 #include <linux/route.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37
38 #include <linux/bpf-cgroup.h>
39 #include <linux/netfilter.h>
40 #include <linux/netfilter_ipv6.h>
41
42 #include <net/sock.h>
43 #include <net/snmp.h>
44
45 #include <net/ipv6.h>
46 #include <net/ndisc.h>
47 #include <net/protocol.h>
48 #include <net/ip6_route.h>
49 #include <net/addrconf.h>
50 #include <net/rawv6.h>
51 #include <net/icmp.h>
52 #include <net/xfrm.h>
53 #include <net/checksum.h>
54 #include <linux/mroute6.h>
55 #include <net/l3mdev.h>
56 #include <net/lwtunnel.h>
57 #include <net/ip_tunnels.h>
58
59 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
60 {
61         struct dst_entry *dst = skb_dst(skb);
62         struct net_device *dev = dst->dev;
63         struct inet6_dev *idev = ip6_dst_idev(dst);
64         unsigned int hh_len = LL_RESERVED_SPACE(dev);
65         const struct in6_addr *daddr, *nexthop;
66         struct ipv6hdr *hdr;
67         struct neighbour *neigh;
68         int ret;
69
70         /* Be paranoid, rather than too clever. */
71         if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) {
72                 skb = skb_expand_head(skb, hh_len);
73                 if (!skb) {
74                         IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
75                         return -ENOMEM;
76                 }
77         }
78
79         hdr = ipv6_hdr(skb);
80         daddr = &hdr->daddr;
81         if (ipv6_addr_is_multicast(daddr)) {
82                 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
83                     ((mroute6_is_socket(net, skb) &&
84                      !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
85                      ipv6_chk_mcast_addr(dev, daddr, &hdr->saddr))) {
86                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
87
88                         /* Do not check for IFF_ALLMULTI; multicast routing
89                            is not supported in any case.
90                          */
91                         if (newskb)
92                                 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
93                                         net, sk, newskb, NULL, newskb->dev,
94                                         dev_loopback_xmit);
95
96                         if (hdr->hop_limit == 0) {
97                                 IP6_INC_STATS(net, idev,
98                                               IPSTATS_MIB_OUTDISCARDS);
99                                 kfree_skb(skb);
100                                 return 0;
101                         }
102                 }
103
104                 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
105                 if (IPV6_ADDR_MC_SCOPE(daddr) <= IPV6_ADDR_SCOPE_NODELOCAL &&
106                     !(dev->flags & IFF_LOOPBACK)) {
107                         kfree_skb(skb);
108                         return 0;
109                 }
110         }
111
112         if (lwtunnel_xmit_redirect(dst->lwtstate)) {
113                 int res = lwtunnel_xmit(skb);
114
115                 if (res != LWTUNNEL_XMIT_CONTINUE)
116                         return res;
117         }
118
119         rcu_read_lock();
120         nexthop = rt6_nexthop((struct rt6_info *)dst, daddr);
121         neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
122
123         if (unlikely(IS_ERR_OR_NULL(neigh))) {
124                 if (unlikely(!neigh))
125                         neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
126                 if (IS_ERR(neigh)) {
127                         rcu_read_unlock();
128                         IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
129                         kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
130                         return -EINVAL;
131                 }
132         }
133         sock_confirm_neigh(skb, neigh);
134         ret = neigh_output(neigh, skb, false);
135         rcu_read_unlock();
136         return ret;
137 }
138
139 static int
140 ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
141                                     struct sk_buff *skb, unsigned int mtu)
142 {
143         struct sk_buff *segs, *nskb;
144         netdev_features_t features;
145         int ret = 0;
146
147         /* Please see corresponding comment in ip_finish_output_gso
148          * describing the cases where GSO segment length exceeds the
149          * egress MTU.
150          */
151         features = netif_skb_features(skb);
152         segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
153         if (IS_ERR_OR_NULL(segs)) {
154                 kfree_skb(skb);
155                 return -ENOMEM;
156         }
157
158         consume_skb(skb);
159
160         skb_list_walk_safe(segs, segs, nskb) {
161                 int err;
162
163                 skb_mark_not_on_list(segs);
164                 /* Last GSO segment can be smaller than gso_size (and MTU).
165                  * Adding a fragment header would produce an "atomic fragment",
166                  * which is considered harmful (RFC-8021). Avoid that.
167                  */
168                 err = segs->len > mtu ?
169                         ip6_fragment(net, sk, segs, ip6_finish_output2) :
170                         ip6_finish_output2(net, sk, segs);
171                 if (err && ret == 0)
172                         ret = err;
173         }
174
175         return ret;
176 }
177
178 static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
179 {
180         unsigned int mtu;
181
182 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
183         /* Policy lookup after SNAT yielded a new policy */
184         if (skb_dst(skb)->xfrm) {
185                 IP6CB(skb)->flags |= IP6SKB_REROUTED;
186                 return dst_output(net, sk, skb);
187         }
188 #endif
189
190         mtu = ip6_skb_dst_mtu(skb);
191         if (skb_is_gso(skb) &&
192             !(IP6CB(skb)->flags & IP6SKB_FAKEJUMBO) &&
193             !skb_gso_validate_network_len(skb, mtu))
194                 return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
195
196         if ((skb->len > mtu && !skb_is_gso(skb)) ||
197             dst_allfrag(skb_dst(skb)) ||
198             (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
199                 return ip6_fragment(net, sk, skb, ip6_finish_output2);
200         else
201                 return ip6_finish_output2(net, sk, skb);
202 }
203
204 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
205 {
206         int ret;
207
208         ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
209         switch (ret) {
210         case NET_XMIT_SUCCESS:
211         case NET_XMIT_CN:
212                 return __ip6_finish_output(net, sk, skb) ? : ret;
213         default:
214                 kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
215                 return ret;
216         }
217 }
218
219 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
220 {
221         struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
222         struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
223
224         skb->protocol = htons(ETH_P_IPV6);
225         skb->dev = dev;
226
227         if (unlikely(idev->cnf.disable_ipv6)) {
228                 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
229                 kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
230                 return 0;
231         }
232
233         return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
234                             net, sk, skb, indev, dev,
235                             ip6_finish_output,
236                             !(IP6CB(skb)->flags & IP6SKB_REROUTED));
237 }
238 EXPORT_SYMBOL(ip6_output);
239
240 bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
241 {
242         if (!np->autoflowlabel_set)
243                 return ip6_default_np_autolabel(net);
244         else
245                 return np->autoflowlabel;
246 }
247
248 /*
249  * xmit an sk_buff (used by TCP, SCTP and DCCP)
250  * Note : socket lock is not held for SYNACK packets, but might be modified
251  * by calls to skb_set_owner_w() and ipv6_local_error(),
252  * which are using proper atomic operations or spinlocks.
253  */
254 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
255              __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority)
256 {
257         struct net *net = sock_net(sk);
258         const struct ipv6_pinfo *np = inet6_sk(sk);
259         struct in6_addr *first_hop = &fl6->daddr;
260         struct dst_entry *dst = skb_dst(skb);
261         struct net_device *dev = dst->dev;
262         struct inet6_dev *idev = ip6_dst_idev(dst);
263         struct hop_jumbo_hdr *hop_jumbo;
264         int hoplen = sizeof(*hop_jumbo);
265         unsigned int head_room;
266         struct ipv6hdr *hdr;
267         u8  proto = fl6->flowi6_proto;
268         int seg_len = skb->len;
269         int hlimit = -1;
270         u32 mtu;
271
272         head_room = sizeof(struct ipv6hdr) + hoplen + LL_RESERVED_SPACE(dev);
273         if (opt)
274                 head_room += opt->opt_nflen + opt->opt_flen;
275
276         if (unlikely(head_room > skb_headroom(skb))) {
277                 skb = skb_expand_head(skb, head_room);
278                 if (!skb) {
279                         IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
280                         return -ENOBUFS;
281                 }
282         }
283
284         if (opt) {
285                 seg_len += opt->opt_nflen + opt->opt_flen;
286
287                 if (opt->opt_flen)
288                         ipv6_push_frag_opts(skb, opt, &proto);
289
290                 if (opt->opt_nflen)
291                         ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
292                                              &fl6->saddr);
293         }
294
295         if (unlikely(seg_len > IPV6_MAXPLEN)) {
296                 hop_jumbo = skb_push(skb, hoplen);
297
298                 hop_jumbo->nexthdr = proto;
299                 hop_jumbo->hdrlen = 0;
300                 hop_jumbo->tlv_type = IPV6_TLV_JUMBO;
301                 hop_jumbo->tlv_len = 4;
302                 hop_jumbo->jumbo_payload_len = htonl(seg_len + hoplen);
303
304                 proto = IPPROTO_HOPOPTS;
305                 seg_len = 0;
306                 IP6CB(skb)->flags |= IP6SKB_FAKEJUMBO;
307         }
308
309         skb_push(skb, sizeof(struct ipv6hdr));
310         skb_reset_network_header(skb);
311         hdr = ipv6_hdr(skb);
312
313         /*
314          *      Fill in the IPv6 header
315          */
316         if (np)
317                 hlimit = np->hop_limit;
318         if (hlimit < 0)
319                 hlimit = ip6_dst_hoplimit(dst);
320
321         ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
322                                 ip6_autoflowlabel(net, np), fl6));
323
324         hdr->payload_len = htons(seg_len);
325         hdr->nexthdr = proto;
326         hdr->hop_limit = hlimit;
327
328         hdr->saddr = fl6->saddr;
329         hdr->daddr = *first_hop;
330
331         skb->protocol = htons(ETH_P_IPV6);
332         skb->priority = priority;
333         skb->mark = mark;
334
335         mtu = dst_mtu(dst);
336         if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
337                 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
338
339                 /* if egress device is enslaved to an L3 master device pass the
340                  * skb to its handler for processing
341                  */
342                 skb = l3mdev_ip6_out((struct sock *)sk, skb);
343                 if (unlikely(!skb))
344                         return 0;
345
346                 /* hooks should never assume socket lock is held.
347                  * we promote our socket to non const
348                  */
349                 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
350                                net, (struct sock *)sk, skb, NULL, dev,
351                                dst_output);
352         }
353
354         skb->dev = dev;
355         /* ipv6_local_error() does not require socket lock,
356          * we promote our socket to non const
357          */
358         ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
359
360         IP6_INC_STATS(net, idev, IPSTATS_MIB_FRAGFAILS);
361         kfree_skb(skb);
362         return -EMSGSIZE;
363 }
364 EXPORT_SYMBOL(ip6_xmit);
365
366 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
367 {
368         struct ip6_ra_chain *ra;
369         struct sock *last = NULL;
370
371         read_lock(&ip6_ra_lock);
372         for (ra = ip6_ra_chain; ra; ra = ra->next) {
373                 struct sock *sk = ra->sk;
374                 if (sk && ra->sel == sel &&
375                     (!sk->sk_bound_dev_if ||
376                      sk->sk_bound_dev_if == skb->dev->ifindex)) {
377                         struct ipv6_pinfo *np = inet6_sk(sk);
378
379                         if (np && np->rtalert_isolate &&
380                             !net_eq(sock_net(sk), dev_net(skb->dev))) {
381                                 continue;
382                         }
383                         if (last) {
384                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
385                                 if (skb2)
386                                         rawv6_rcv(last, skb2);
387                         }
388                         last = sk;
389                 }
390         }
391
392         if (last) {
393                 rawv6_rcv(last, skb);
394                 read_unlock(&ip6_ra_lock);
395                 return 1;
396         }
397         read_unlock(&ip6_ra_lock);
398         return 0;
399 }
400
401 static int ip6_forward_proxy_check(struct sk_buff *skb)
402 {
403         struct ipv6hdr *hdr = ipv6_hdr(skb);
404         u8 nexthdr = hdr->nexthdr;
405         __be16 frag_off;
406         int offset;
407
408         if (ipv6_ext_hdr(nexthdr)) {
409                 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
410                 if (offset < 0)
411                         return 0;
412         } else
413                 offset = sizeof(struct ipv6hdr);
414
415         if (nexthdr == IPPROTO_ICMPV6) {
416                 struct icmp6hdr *icmp6;
417
418                 if (!pskb_may_pull(skb, (skb_network_header(skb) +
419                                          offset + 1 - skb->data)))
420                         return 0;
421
422                 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
423
424                 switch (icmp6->icmp6_type) {
425                 case NDISC_ROUTER_SOLICITATION:
426                 case NDISC_ROUTER_ADVERTISEMENT:
427                 case NDISC_NEIGHBOUR_SOLICITATION:
428                 case NDISC_NEIGHBOUR_ADVERTISEMENT:
429                 case NDISC_REDIRECT:
430                         /* For reaction involving unicast neighbor discovery
431                          * message destined to the proxied address, pass it to
432                          * input function.
433                          */
434                         return 1;
435                 default:
436                         break;
437                 }
438         }
439
440         /*
441          * The proxying router can't forward traffic sent to a link-local
442          * address, so signal the sender and discard the packet. This
443          * behavior is clarified by the MIPv6 specification.
444          */
445         if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
446                 dst_link_failure(skb);
447                 return -1;
448         }
449
450         return 0;
451 }
452
453 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
454                                      struct sk_buff *skb)
455 {
456         struct dst_entry *dst = skb_dst(skb);
457
458         __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
459         __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
460
461 #ifdef CONFIG_NET_SWITCHDEV
462         if (skb->offload_l3_fwd_mark) {
463                 consume_skb(skb);
464                 return 0;
465         }
466 #endif
467
468         skb_clear_tstamp(skb);
469         return dst_output(net, sk, skb);
470 }
471
472 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
473 {
474         if (skb->len <= mtu)
475                 return false;
476
477         /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
478         if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
479                 return true;
480
481         if (skb->ignore_df)
482                 return false;
483
484         if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
485                 return false;
486
487         return true;
488 }
489
490 int ip6_forward(struct sk_buff *skb)
491 {
492         struct dst_entry *dst = skb_dst(skb);
493         struct ipv6hdr *hdr = ipv6_hdr(skb);
494         struct inet6_skb_parm *opt = IP6CB(skb);
495         struct net *net = dev_net(dst->dev);
496         struct inet6_dev *idev;
497         SKB_DR(reason);
498         u32 mtu;
499
500         idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
501         if (net->ipv6.devconf_all->forwarding == 0)
502                 goto error;
503
504         if (skb->pkt_type != PACKET_HOST)
505                 goto drop;
506
507         if (unlikely(skb->sk))
508                 goto drop;
509
510         if (skb_warn_if_lro(skb))
511                 goto drop;
512
513         if (!net->ipv6.devconf_all->disable_policy &&
514             (!idev || !idev->cnf.disable_policy) &&
515             !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
516                 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
517                 goto drop;
518         }
519
520         skb_forward_csum(skb);
521
522         /*
523          *      We DO NOT make any processing on
524          *      RA packets, pushing them to user level AS IS
525          *      without ane WARRANTY that application will be able
526          *      to interpret them. The reason is that we
527          *      cannot make anything clever here.
528          *
529          *      We are not end-node, so that if packet contains
530          *      AH/ESP, we cannot make anything.
531          *      Defragmentation also would be mistake, RA packets
532          *      cannot be fragmented, because there is no warranty
533          *      that different fragments will go along one path. --ANK
534          */
535         if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
536                 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
537                         return 0;
538         }
539
540         /*
541          *      check and decrement ttl
542          */
543         if (hdr->hop_limit <= 1) {
544                 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
545                 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
546
547                 kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
548                 return -ETIMEDOUT;
549         }
550
551         /* XXX: idev->cnf.proxy_ndp? */
552         if (net->ipv6.devconf_all->proxy_ndp &&
553             pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
554                 int proxied = ip6_forward_proxy_check(skb);
555                 if (proxied > 0) {
556                         /* It's tempting to decrease the hop limit
557                          * here by 1, as we do at the end of the
558                          * function too.
559                          *
560                          * But that would be incorrect, as proxying is
561                          * not forwarding.  The ip6_input function
562                          * will handle this packet locally, and it
563                          * depends on the hop limit being unchanged.
564                          *
565                          * One example is the NDP hop limit, that
566                          * always has to stay 255, but other would be
567                          * similar checks around RA packets, where the
568                          * user can even change the desired limit.
569                          */
570                         return ip6_input(skb);
571                 } else if (proxied < 0) {
572                         __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
573                         goto drop;
574                 }
575         }
576
577         if (!xfrm6_route_forward(skb)) {
578                 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
579                 SKB_DR_SET(reason, XFRM_POLICY);
580                 goto drop;
581         }
582         dst = skb_dst(skb);
583
584         /* IPv6 specs say nothing about it, but it is clear that we cannot
585            send redirects to source routed frames.
586            We don't send redirects to frames decapsulated from IPsec.
587          */
588         if (IP6CB(skb)->iif == dst->dev->ifindex &&
589             opt->srcrt == 0 && !skb_sec_path(skb)) {
590                 struct in6_addr *target = NULL;
591                 struct inet_peer *peer;
592                 struct rt6_info *rt;
593
594                 /*
595                  *      incoming and outgoing devices are the same
596                  *      send a redirect.
597                  */
598
599                 rt = (struct rt6_info *) dst;
600                 if (rt->rt6i_flags & RTF_GATEWAY)
601                         target = &rt->rt6i_gateway;
602                 else
603                         target = &hdr->daddr;
604
605                 peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
606
607                 /* Limit redirects both by destination (here)
608                    and by source (inside ndisc_send_redirect)
609                  */
610                 if (inet_peer_xrlim_allow(peer, 1*HZ))
611                         ndisc_send_redirect(skb, target);
612                 if (peer)
613                         inet_putpeer(peer);
614         } else {
615                 int addrtype = ipv6_addr_type(&hdr->saddr);
616
617                 /* This check is security critical. */
618                 if (addrtype == IPV6_ADDR_ANY ||
619                     addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
620                         goto error;
621                 if (addrtype & IPV6_ADDR_LINKLOCAL) {
622                         icmpv6_send(skb, ICMPV6_DEST_UNREACH,
623                                     ICMPV6_NOT_NEIGHBOUR, 0);
624                         goto error;
625                 }
626         }
627
628         mtu = ip6_dst_mtu_maybe_forward(dst, true);
629         if (mtu < IPV6_MIN_MTU)
630                 mtu = IPV6_MIN_MTU;
631
632         if (ip6_pkt_too_big(skb, mtu)) {
633                 /* Again, force OUTPUT device used as source address */
634                 skb->dev = dst->dev;
635                 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
636                 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS);
637                 __IP6_INC_STATS(net, ip6_dst_idev(dst),
638                                 IPSTATS_MIB_FRAGFAILS);
639                 kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
640                 return -EMSGSIZE;
641         }
642
643         if (skb_cow(skb, dst->dev->hard_header_len)) {
644                 __IP6_INC_STATS(net, ip6_dst_idev(dst),
645                                 IPSTATS_MIB_OUTDISCARDS);
646                 goto drop;
647         }
648
649         hdr = ipv6_hdr(skb);
650
651         /* Mangling hops number delayed to point after skb COW */
652
653         hdr->hop_limit--;
654
655         return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
656                        net, NULL, skb, skb->dev, dst->dev,
657                        ip6_forward_finish);
658
659 error:
660         __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
661         SKB_DR_SET(reason, IP_INADDRERRORS);
662 drop:
663         kfree_skb_reason(skb, reason);
664         return -EINVAL;
665 }
666
667 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
668 {
669         to->pkt_type = from->pkt_type;
670         to->priority = from->priority;
671         to->protocol = from->protocol;
672         skb_dst_drop(to);
673         skb_dst_set(to, dst_clone(skb_dst(from)));
674         to->dev = from->dev;
675         to->mark = from->mark;
676
677         skb_copy_hash(to, from);
678
679 #ifdef CONFIG_NET_SCHED
680         to->tc_index = from->tc_index;
681 #endif
682         nf_copy(to, from);
683         skb_ext_copy(to, from);
684         skb_copy_secmark(to, from);
685 }
686
687 int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr,
688                       u8 nexthdr, __be32 frag_id,
689                       struct ip6_fraglist_iter *iter)
690 {
691         unsigned int first_len;
692         struct frag_hdr *fh;
693
694         /* BUILD HEADER */
695         *prevhdr = NEXTHDR_FRAGMENT;
696         iter->tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
697         if (!iter->tmp_hdr)
698                 return -ENOMEM;
699
700         iter->frag = skb_shinfo(skb)->frag_list;
701         skb_frag_list_init(skb);
702
703         iter->offset = 0;
704         iter->hlen = hlen;
705         iter->frag_id = frag_id;
706         iter->nexthdr = nexthdr;
707
708         __skb_pull(skb, hlen);
709         fh = __skb_push(skb, sizeof(struct frag_hdr));
710         __skb_push(skb, hlen);
711         skb_reset_network_header(skb);
712         memcpy(skb_network_header(skb), iter->tmp_hdr, hlen);
713
714         fh->nexthdr = nexthdr;
715         fh->reserved = 0;
716         fh->frag_off = htons(IP6_MF);
717         fh->identification = frag_id;
718
719         first_len = skb_pagelen(skb);
720         skb->data_len = first_len - skb_headlen(skb);
721         skb->len = first_len;
722         ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr));
723
724         return 0;
725 }
726 EXPORT_SYMBOL(ip6_fraglist_init);
727
728 void ip6_fraglist_prepare(struct sk_buff *skb,
729                           struct ip6_fraglist_iter *iter)
730 {
731         struct sk_buff *frag = iter->frag;
732         unsigned int hlen = iter->hlen;
733         struct frag_hdr *fh;
734
735         frag->ip_summed = CHECKSUM_NONE;
736         skb_reset_transport_header(frag);
737         fh = __skb_push(frag, sizeof(struct frag_hdr));
738         __skb_push(frag, hlen);
739         skb_reset_network_header(frag);
740         memcpy(skb_network_header(frag), iter->tmp_hdr, hlen);
741         iter->offset += skb->len - hlen - sizeof(struct frag_hdr);
742         fh->nexthdr = iter->nexthdr;
743         fh->reserved = 0;
744         fh->frag_off = htons(iter->offset);
745         if (frag->next)
746                 fh->frag_off |= htons(IP6_MF);
747         fh->identification = iter->frag_id;
748         ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
749         ip6_copy_metadata(frag, skb);
750 }
751 EXPORT_SYMBOL(ip6_fraglist_prepare);
752
753 void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu,
754                    unsigned short needed_tailroom, int hdr_room, u8 *prevhdr,
755                    u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state)
756 {
757         state->prevhdr = prevhdr;
758         state->nexthdr = nexthdr;
759         state->frag_id = frag_id;
760
761         state->hlen = hlen;
762         state->mtu = mtu;
763
764         state->left = skb->len - hlen;  /* Space per frame */
765         state->ptr = hlen;              /* Where to start from */
766
767         state->hroom = hdr_room;
768         state->troom = needed_tailroom;
769
770         state->offset = 0;
771 }
772 EXPORT_SYMBOL(ip6_frag_init);
773
774 struct sk_buff *ip6_frag_next(struct sk_buff *skb, struct ip6_frag_state *state)
775 {
776         u8 *prevhdr = state->prevhdr, *fragnexthdr_offset;
777         struct sk_buff *frag;
778         struct frag_hdr *fh;
779         unsigned int len;
780
781         len = state->left;
782         /* IF: it doesn't fit, use 'mtu' - the data space left */
783         if (len > state->mtu)
784                 len = state->mtu;
785         /* IF: we are not sending up to and including the packet end
786            then align the next start on an eight byte boundary */
787         if (len < state->left)
788                 len &= ~7;
789
790         /* Allocate buffer */
791         frag = alloc_skb(len + state->hlen + sizeof(struct frag_hdr) +
792                          state->hroom + state->troom, GFP_ATOMIC);
793         if (!frag)
794                 return ERR_PTR(-ENOMEM);
795
796         /*
797          *      Set up data on packet
798          */
799
800         ip6_copy_metadata(frag, skb);
801         skb_reserve(frag, state->hroom);
802         skb_put(frag, len + state->hlen + sizeof(struct frag_hdr));
803         skb_reset_network_header(frag);
804         fh = (struct frag_hdr *)(skb_network_header(frag) + state->hlen);
805         frag->transport_header = (frag->network_header + state->hlen +
806                                   sizeof(struct frag_hdr));
807
808         /*
809          *      Charge the memory for the fragment to any owner
810          *      it might possess
811          */
812         if (skb->sk)
813                 skb_set_owner_w(frag, skb->sk);
814
815         /*
816          *      Copy the packet header into the new buffer.
817          */
818         skb_copy_from_linear_data(skb, skb_network_header(frag), state->hlen);
819
820         fragnexthdr_offset = skb_network_header(frag);
821         fragnexthdr_offset += prevhdr - skb_network_header(skb);
822         *fragnexthdr_offset = NEXTHDR_FRAGMENT;
823
824         /*
825          *      Build fragment header.
826          */
827         fh->nexthdr = state->nexthdr;
828         fh->reserved = 0;
829         fh->identification = state->frag_id;
830
831         /*
832          *      Copy a block of the IP datagram.
833          */
834         BUG_ON(skb_copy_bits(skb, state->ptr, skb_transport_header(frag),
835                              len));
836         state->left -= len;
837
838         fh->frag_off = htons(state->offset);
839         if (state->left > 0)
840                 fh->frag_off |= htons(IP6_MF);
841         ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
842
843         state->ptr += len;
844         state->offset += len;
845
846         return frag;
847 }
848 EXPORT_SYMBOL(ip6_frag_next);
849
850 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
851                  int (*output)(struct net *, struct sock *, struct sk_buff *))
852 {
853         struct sk_buff *frag;
854         struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
855         struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
856                                 inet6_sk(skb->sk) : NULL;
857         bool mono_delivery_time = skb->mono_delivery_time;
858         struct ip6_frag_state state;
859         unsigned int mtu, hlen, nexthdr_offset;
860         ktime_t tstamp = skb->tstamp;
861         int hroom, err = 0;
862         __be32 frag_id;
863         u8 *prevhdr, nexthdr = 0;
864
865         err = ip6_find_1stfragopt(skb, &prevhdr);
866         if (err < 0)
867                 goto fail;
868         hlen = err;
869         nexthdr = *prevhdr;
870         nexthdr_offset = prevhdr - skb_network_header(skb);
871
872         mtu = ip6_skb_dst_mtu(skb);
873
874         /* We must not fragment if the socket is set to force MTU discovery
875          * or if the skb it not generated by a local socket.
876          */
877         if (unlikely(!skb->ignore_df && skb->len > mtu))
878                 goto fail_toobig;
879
880         if (IP6CB(skb)->frag_max_size) {
881                 if (IP6CB(skb)->frag_max_size > mtu)
882                         goto fail_toobig;
883
884                 /* don't send fragments larger than what we received */
885                 mtu = IP6CB(skb)->frag_max_size;
886                 if (mtu < IPV6_MIN_MTU)
887                         mtu = IPV6_MIN_MTU;
888         }
889
890         if (np && np->frag_size < mtu) {
891                 if (np->frag_size)
892                         mtu = np->frag_size;
893         }
894         if (mtu < hlen + sizeof(struct frag_hdr) + 8)
895                 goto fail_toobig;
896         mtu -= hlen + sizeof(struct frag_hdr);
897
898         frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
899                                     &ipv6_hdr(skb)->saddr);
900
901         if (skb->ip_summed == CHECKSUM_PARTIAL &&
902             (err = skb_checksum_help(skb)))
903                 goto fail;
904
905         prevhdr = skb_network_header(skb) + nexthdr_offset;
906         hroom = LL_RESERVED_SPACE(rt->dst.dev);
907         if (skb_has_frag_list(skb)) {
908                 unsigned int first_len = skb_pagelen(skb);
909                 struct ip6_fraglist_iter iter;
910                 struct sk_buff *frag2;
911
912                 if (first_len - hlen > mtu ||
913                     ((first_len - hlen) & 7) ||
914                     skb_cloned(skb) ||
915                     skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
916                         goto slow_path;
917
918                 skb_walk_frags(skb, frag) {
919                         /* Correct geometry. */
920                         if (frag->len > mtu ||
921                             ((frag->len & 7) && frag->next) ||
922                             skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
923                                 goto slow_path_clean;
924
925                         /* Partially cloned skb? */
926                         if (skb_shared(frag))
927                                 goto slow_path_clean;
928
929                         BUG_ON(frag->sk);
930                         if (skb->sk) {
931                                 frag->sk = skb->sk;
932                                 frag->destructor = sock_wfree;
933                         }
934                         skb->truesize -= frag->truesize;
935                 }
936
937                 err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
938                                         &iter);
939                 if (err < 0)
940                         goto fail;
941
942                 /* We prevent @rt from being freed. */
943                 rcu_read_lock();
944
945                 for (;;) {
946                         /* Prepare header of the next frame,
947                          * before previous one went down. */
948                         if (iter.frag)
949                                 ip6_fraglist_prepare(skb, &iter);
950
951                         skb_set_delivery_time(skb, tstamp, mono_delivery_time);
952                         err = output(net, sk, skb);
953                         if (!err)
954                                 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
955                                               IPSTATS_MIB_FRAGCREATES);
956
957                         if (err || !iter.frag)
958                                 break;
959
960                         skb = ip6_fraglist_next(&iter);
961                 }
962
963                 kfree(iter.tmp_hdr);
964
965                 if (err == 0) {
966                         IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
967                                       IPSTATS_MIB_FRAGOKS);
968                         rcu_read_unlock();
969                         return 0;
970                 }
971
972                 kfree_skb_list(iter.frag);
973
974                 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
975                               IPSTATS_MIB_FRAGFAILS);
976                 rcu_read_unlock();
977                 return err;
978
979 slow_path_clean:
980                 skb_walk_frags(skb, frag2) {
981                         if (frag2 == frag)
982                                 break;
983                         frag2->sk = NULL;
984                         frag2->destructor = NULL;
985                         skb->truesize += frag2->truesize;
986                 }
987         }
988
989 slow_path:
990         /*
991          *      Fragment the datagram.
992          */
993
994         ip6_frag_init(skb, hlen, mtu, rt->dst.dev->needed_tailroom,
995                       LL_RESERVED_SPACE(rt->dst.dev), prevhdr, nexthdr, frag_id,
996                       &state);
997
998         /*
999          *      Keep copying data until we run out.
1000          */
1001
1002         while (state.left > 0) {
1003                 frag = ip6_frag_next(skb, &state);
1004                 if (IS_ERR(frag)) {
1005                         err = PTR_ERR(frag);
1006                         goto fail;
1007                 }
1008
1009                 /*
1010                  *      Put this fragment into the sending queue.
1011                  */
1012                 skb_set_delivery_time(frag, tstamp, mono_delivery_time);
1013                 err = output(net, sk, frag);
1014                 if (err)
1015                         goto fail;
1016
1017                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1018                               IPSTATS_MIB_FRAGCREATES);
1019         }
1020         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1021                       IPSTATS_MIB_FRAGOKS);
1022         consume_skb(skb);
1023         return err;
1024
1025 fail_toobig:
1026         if (skb->sk && dst_allfrag(skb_dst(skb)))
1027                 sk_gso_disable(skb->sk);
1028
1029         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1030         err = -EMSGSIZE;
1031
1032 fail:
1033         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1034                       IPSTATS_MIB_FRAGFAILS);
1035         kfree_skb(skb);
1036         return err;
1037 }
1038
1039 static inline int ip6_rt_check(const struct rt6key *rt_key,
1040                                const struct in6_addr *fl_addr,
1041                                const struct in6_addr *addr_cache)
1042 {
1043         return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
1044                 (!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
1045 }
1046
1047 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
1048                                           struct dst_entry *dst,
1049                                           const struct flowi6 *fl6)
1050 {
1051         struct ipv6_pinfo *np = inet6_sk(sk);
1052         struct rt6_info *rt;
1053
1054         if (!dst)
1055                 goto out;
1056
1057         if (dst->ops->family != AF_INET6) {
1058                 dst_release(dst);
1059                 return NULL;
1060         }
1061
1062         rt = (struct rt6_info *)dst;
1063         /* Yes, checking route validity in not connected
1064          * case is not very simple. Take into account,
1065          * that we do not support routing by source, TOS,
1066          * and MSG_DONTROUTE            --ANK (980726)
1067          *
1068          * 1. ip6_rt_check(): If route was host route,
1069          *    check that cached destination is current.
1070          *    If it is network route, we still may
1071          *    check its validity using saved pointer
1072          *    to the last used address: daddr_cache.
1073          *    We do not want to save whole address now,
1074          *    (because main consumer of this service
1075          *    is tcp, which has not this problem),
1076          *    so that the last trick works only on connected
1077          *    sockets.
1078          * 2. oif also should be the same.
1079          */
1080         if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
1081 #ifdef CONFIG_IPV6_SUBTREES
1082             ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
1083 #endif
1084            (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
1085                 dst_release(dst);
1086                 dst = NULL;
1087         }
1088
1089 out:
1090         return dst;
1091 }
1092
1093 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
1094                                struct dst_entry **dst, struct flowi6 *fl6)
1095 {
1096 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1097         struct neighbour *n;
1098         struct rt6_info *rt;
1099 #endif
1100         int err;
1101         int flags = 0;
1102
1103         /* The correct way to handle this would be to do
1104          * ip6_route_get_saddr, and then ip6_route_output; however,
1105          * the route-specific preferred source forces the
1106          * ip6_route_output call _before_ ip6_route_get_saddr.
1107          *
1108          * In source specific routing (no src=any default route),
1109          * ip6_route_output will fail given src=any saddr, though, so
1110          * that's why we try it again later.
1111          */
1112         if (ipv6_addr_any(&fl6->saddr)) {
1113                 struct fib6_info *from;
1114                 struct rt6_info *rt;
1115
1116                 *dst = ip6_route_output(net, sk, fl6);
1117                 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
1118
1119                 rcu_read_lock();
1120                 from = rt ? rcu_dereference(rt->from) : NULL;
1121                 err = ip6_route_get_saddr(net, from, &fl6->daddr,
1122                                           sk ? inet6_sk(sk)->srcprefs : 0,
1123                                           &fl6->saddr);
1124                 rcu_read_unlock();
1125
1126                 if (err)
1127                         goto out_err_release;
1128
1129                 /* If we had an erroneous initial result, pretend it
1130                  * never existed and let the SA-enabled version take
1131                  * over.
1132                  */
1133                 if ((*dst)->error) {
1134                         dst_release(*dst);
1135                         *dst = NULL;
1136                 }
1137
1138                 if (fl6->flowi6_oif)
1139                         flags |= RT6_LOOKUP_F_IFACE;
1140         }
1141
1142         if (!*dst)
1143                 *dst = ip6_route_output_flags(net, sk, fl6, flags);
1144
1145         err = (*dst)->error;
1146         if (err)
1147                 goto out_err_release;
1148
1149 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1150         /*
1151          * Here if the dst entry we've looked up
1152          * has a neighbour entry that is in the INCOMPLETE
1153          * state and the src address from the flow is
1154          * marked as OPTIMISTIC, we release the found
1155          * dst entry and replace it instead with the
1156          * dst entry of the nexthop router
1157          */
1158         rt = (struct rt6_info *) *dst;
1159         rcu_read_lock();
1160         n = __ipv6_neigh_lookup_noref(rt->dst.dev,
1161                                       rt6_nexthop(rt, &fl6->daddr));
1162         err = n && !(READ_ONCE(n->nud_state) & NUD_VALID) ? -EINVAL : 0;
1163         rcu_read_unlock();
1164
1165         if (err) {
1166                 struct inet6_ifaddr *ifp;
1167                 struct flowi6 fl_gw6;
1168                 int redirect;
1169
1170                 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
1171                                       (*dst)->dev, 1);
1172
1173                 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
1174                 if (ifp)
1175                         in6_ifa_put(ifp);
1176
1177                 if (redirect) {
1178                         /*
1179                          * We need to get the dst entry for the
1180                          * default router instead
1181                          */
1182                         dst_release(*dst);
1183                         memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1184                         memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1185                         *dst = ip6_route_output(net, sk, &fl_gw6);
1186                         err = (*dst)->error;
1187                         if (err)
1188                                 goto out_err_release;
1189                 }
1190         }
1191 #endif
1192         if (ipv6_addr_v4mapped(&fl6->saddr) &&
1193             !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1194                 err = -EAFNOSUPPORT;
1195                 goto out_err_release;
1196         }
1197
1198         return 0;
1199
1200 out_err_release:
1201         dst_release(*dst);
1202         *dst = NULL;
1203
1204         if (err == -ENETUNREACH)
1205                 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1206         return err;
1207 }
1208
1209 /**
1210  *      ip6_dst_lookup - perform route lookup on flow
1211  *      @net: Network namespace to perform lookup in
1212  *      @sk: socket which provides route info
1213  *      @dst: pointer to dst_entry * for result
1214  *      @fl6: flow to lookup
1215  *
1216  *      This function performs a route lookup on the given flow.
1217  *
1218  *      It returns zero on success, or a standard errno code on error.
1219  */
1220 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1221                    struct flowi6 *fl6)
1222 {
1223         *dst = NULL;
1224         return ip6_dst_lookup_tail(net, sk, dst, fl6);
1225 }
1226 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1227
1228 /**
1229  *      ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1230  *      @net: Network namespace to perform lookup in
1231  *      @sk: socket which provides route info
1232  *      @fl6: flow to lookup
1233  *      @final_dst: final destination address for ipsec lookup
1234  *
1235  *      This function performs a route lookup on the given flow.
1236  *
1237  *      It returns a valid dst pointer on success, or a pointer encoded
1238  *      error code.
1239  */
1240 struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
1241                                       const struct in6_addr *final_dst)
1242 {
1243         struct dst_entry *dst = NULL;
1244         int err;
1245
1246         err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
1247         if (err)
1248                 return ERR_PTR(err);
1249         if (final_dst)
1250                 fl6->daddr = *final_dst;
1251
1252         return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
1253 }
1254 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1255
1256 /**
1257  *      ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1258  *      @sk: socket which provides the dst cache and route info
1259  *      @fl6: flow to lookup
1260  *      @final_dst: final destination address for ipsec lookup
1261  *      @connected: whether @sk is connected or not
1262  *
1263  *      This function performs a route lookup on the given flow with the
1264  *      possibility of using the cached route in the socket if it is valid.
1265  *      It will take the socket dst lock when operating on the dst cache.
1266  *      As a result, this function can only be used in process context.
1267  *
1268  *      In addition, for a connected socket, cache the dst in the socket
1269  *      if the current cache is not valid.
1270  *
1271  *      It returns a valid dst pointer on success, or a pointer encoded
1272  *      error code.
1273  */
1274 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1275                                          const struct in6_addr *final_dst,
1276                                          bool connected)
1277 {
1278         struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1279
1280         dst = ip6_sk_dst_check(sk, dst, fl6);
1281         if (dst)
1282                 return dst;
1283
1284         dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
1285         if (connected && !IS_ERR(dst))
1286                 ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
1287
1288         return dst;
1289 }
1290 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1291
1292 /**
1293  *      ip6_dst_lookup_tunnel - perform route lookup on tunnel
1294  *      @skb: Packet for which lookup is done
1295  *      @dev: Tunnel device
1296  *      @net: Network namespace of tunnel device
1297  *      @sock: Socket which provides route info
1298  *      @saddr: Memory to store the src ip address
1299  *      @info: Tunnel information
1300  *      @protocol: IP protocol
1301  *      @use_cache: Flag to enable cache usage
1302  *      This function performs a route lookup on a tunnel
1303  *
1304  *      It returns a valid dst pointer and stores src address to be used in
1305  *      tunnel in param saddr on success, else a pointer encoded error code.
1306  */
1307
1308 struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
1309                                         struct net_device *dev,
1310                                         struct net *net,
1311                                         struct socket *sock,
1312                                         struct in6_addr *saddr,
1313                                         const struct ip_tunnel_info *info,
1314                                         u8 protocol,
1315                                         bool use_cache)
1316 {
1317         struct dst_entry *dst = NULL;
1318 #ifdef CONFIG_DST_CACHE
1319         struct dst_cache *dst_cache;
1320 #endif
1321         struct flowi6 fl6;
1322         __u8 prio;
1323
1324 #ifdef CONFIG_DST_CACHE
1325         dst_cache = (struct dst_cache *)&info->dst_cache;
1326         if (use_cache) {
1327                 dst = dst_cache_get_ip6(dst_cache, saddr);
1328                 if (dst)
1329                         return dst;
1330         }
1331 #endif
1332         memset(&fl6, 0, sizeof(fl6));
1333         fl6.flowi6_mark = skb->mark;
1334         fl6.flowi6_proto = protocol;
1335         fl6.daddr = info->key.u.ipv6.dst;
1336         fl6.saddr = info->key.u.ipv6.src;
1337         prio = info->key.tos;
1338         fl6.flowlabel = ip6_make_flowinfo(prio, info->key.label);
1339
1340         dst = ipv6_stub->ipv6_dst_lookup_flow(net, sock->sk, &fl6,
1341                                               NULL);
1342         if (IS_ERR(dst)) {
1343                 netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr);
1344                 return ERR_PTR(-ENETUNREACH);
1345         }
1346         if (dst->dev == dev) { /* is this necessary? */
1347                 netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr);
1348                 dst_release(dst);
1349                 return ERR_PTR(-ELOOP);
1350         }
1351 #ifdef CONFIG_DST_CACHE
1352         if (use_cache)
1353                 dst_cache_set_ip6(dst_cache, dst, &fl6.saddr);
1354 #endif
1355         *saddr = fl6.saddr;
1356         return dst;
1357 }
1358 EXPORT_SYMBOL_GPL(ip6_dst_lookup_tunnel);
1359
1360 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1361                                                gfp_t gfp)
1362 {
1363         return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1364 }
1365
1366 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1367                                                 gfp_t gfp)
1368 {
1369         return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1370 }
1371
1372 static void ip6_append_data_mtu(unsigned int *mtu,
1373                                 int *maxfraglen,
1374                                 unsigned int fragheaderlen,
1375                                 struct sk_buff *skb,
1376                                 struct rt6_info *rt,
1377                                 unsigned int orig_mtu)
1378 {
1379         if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1380                 if (!skb) {
1381                         /* first fragment, reserve header_len */
1382                         *mtu = orig_mtu - rt->dst.header_len;
1383
1384                 } else {
1385                         /*
1386                          * this fragment is not first, the headers
1387                          * space is regarded as data space.
1388                          */
1389                         *mtu = orig_mtu;
1390                 }
1391                 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1392                               + fragheaderlen - sizeof(struct frag_hdr);
1393         }
1394 }
1395
1396 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1397                           struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1398                           struct rt6_info *rt)
1399 {
1400         struct ipv6_pinfo *np = inet6_sk(sk);
1401         unsigned int mtu;
1402         struct ipv6_txoptions *nopt, *opt = ipc6->opt;
1403
1404         /* callers pass dst together with a reference, set it first so
1405          * ip6_cork_release() can put it down even in case of an error.
1406          */
1407         cork->base.dst = &rt->dst;
1408
1409         /*
1410          * setup for corking
1411          */
1412         if (opt) {
1413                 if (WARN_ON(v6_cork->opt))
1414                         return -EINVAL;
1415
1416                 nopt = v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
1417                 if (unlikely(!nopt))
1418                         return -ENOBUFS;
1419
1420                 nopt->tot_len = sizeof(*opt);
1421                 nopt->opt_flen = opt->opt_flen;
1422                 nopt->opt_nflen = opt->opt_nflen;
1423
1424                 nopt->dst0opt = ip6_opt_dup(opt->dst0opt, sk->sk_allocation);
1425                 if (opt->dst0opt && !nopt->dst0opt)
1426                         return -ENOBUFS;
1427
1428                 nopt->dst1opt = ip6_opt_dup(opt->dst1opt, sk->sk_allocation);
1429                 if (opt->dst1opt && !nopt->dst1opt)
1430                         return -ENOBUFS;
1431
1432                 nopt->hopopt = ip6_opt_dup(opt->hopopt, sk->sk_allocation);
1433                 if (opt->hopopt && !nopt->hopopt)
1434                         return -ENOBUFS;
1435
1436                 nopt->srcrt = ip6_rthdr_dup(opt->srcrt, sk->sk_allocation);
1437                 if (opt->srcrt && !nopt->srcrt)
1438                         return -ENOBUFS;
1439
1440                 /* need source address above miyazawa*/
1441         }
1442         v6_cork->hop_limit = ipc6->hlimit;
1443         v6_cork->tclass = ipc6->tclass;
1444         if (rt->dst.flags & DST_XFRM_TUNNEL)
1445                 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1446                       READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
1447         else
1448                 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1449                         READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
1450         if (np->frag_size < mtu) {
1451                 if (np->frag_size)
1452                         mtu = np->frag_size;
1453         }
1454         cork->base.fragsize = mtu;
1455         cork->base.gso_size = ipc6->gso_size;
1456         cork->base.tx_flags = 0;
1457         cork->base.mark = ipc6->sockc.mark;
1458         sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
1459
1460         if (dst_allfrag(xfrm_dst_path(&rt->dst)))
1461                 cork->base.flags |= IPCORK_ALLFRAG;
1462         cork->base.length = 0;
1463
1464         cork->base.transmit_time = ipc6->sockc.transmit_time;
1465
1466         return 0;
1467 }
1468
1469 static int __ip6_append_data(struct sock *sk,
1470                              struct sk_buff_head *queue,
1471                              struct inet_cork_full *cork_full,
1472                              struct inet6_cork *v6_cork,
1473                              struct page_frag *pfrag,
1474                              int getfrag(void *from, char *to, int offset,
1475                                          int len, int odd, struct sk_buff *skb),
1476                              void *from, size_t length, int transhdrlen,
1477                              unsigned int flags, struct ipcm6_cookie *ipc6)
1478 {
1479         struct sk_buff *skb, *skb_prev = NULL;
1480         struct inet_cork *cork = &cork_full->base;
1481         struct flowi6 *fl6 = &cork_full->fl.u.ip6;
1482         unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
1483         struct ubuf_info *uarg = NULL;
1484         int exthdrlen = 0;
1485         int dst_exthdrlen = 0;
1486         int hh_len;
1487         int copy;
1488         int err;
1489         int offset = 0;
1490         bool zc = false;
1491         u32 tskey = 0;
1492         struct rt6_info *rt = (struct rt6_info *)cork->dst;
1493         struct ipv6_txoptions *opt = v6_cork->opt;
1494         int csummode = CHECKSUM_NONE;
1495         unsigned int maxnonfragsize, headersize;
1496         unsigned int wmem_alloc_delta = 0;
1497         bool paged, extra_uref = false;
1498
1499         skb = skb_peek_tail(queue);
1500         if (!skb) {
1501                 exthdrlen = opt ? opt->opt_flen : 0;
1502                 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1503         }
1504
1505         paged = !!cork->gso_size;
1506         mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
1507         orig_mtu = mtu;
1508
1509         if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
1510             READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
1511                 tskey = atomic_inc_return(&sk->sk_tskey) - 1;
1512
1513         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1514
1515         fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1516                         (opt ? opt->opt_nflen : 0);
1517
1518         headersize = sizeof(struct ipv6hdr) +
1519                      (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1520                      (dst_allfrag(&rt->dst) ?
1521                       sizeof(struct frag_hdr) : 0) +
1522                      rt->rt6i_nfheader_len;
1523
1524         if (mtu <= fragheaderlen ||
1525             ((mtu - fragheaderlen) & ~7) + fragheaderlen <= sizeof(struct frag_hdr))
1526                 goto emsgsize;
1527
1528         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1529                      sizeof(struct frag_hdr);
1530
1531         /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
1532          * the first fragment
1533          */
1534         if (headersize + transhdrlen > mtu)
1535                 goto emsgsize;
1536
1537         if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1538             (sk->sk_protocol == IPPROTO_UDP ||
1539              sk->sk_protocol == IPPROTO_ICMPV6 ||
1540              sk->sk_protocol == IPPROTO_RAW)) {
1541                 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1542                                 sizeof(struct ipv6hdr));
1543                 goto emsgsize;
1544         }
1545
1546         if (ip6_sk_ignore_df(sk))
1547                 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1548         else
1549                 maxnonfragsize = mtu;
1550
1551         if (cork->length + length > maxnonfragsize - headersize) {
1552 emsgsize:
1553                 pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
1554                 ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
1555                 return -EMSGSIZE;
1556         }
1557
1558         /* CHECKSUM_PARTIAL only with no extension headers and when
1559          * we are not going to fragment
1560          */
1561         if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1562             headersize == sizeof(struct ipv6hdr) &&
1563             length <= mtu - headersize &&
1564             (!(flags & MSG_MORE) || cork->gso_size) &&
1565             rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1566                 csummode = CHECKSUM_PARTIAL;
1567
1568         if ((flags & MSG_ZEROCOPY) && length) {
1569                 struct msghdr *msg = from;
1570
1571                 if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1572                         if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1573                                 return -EINVAL;
1574
1575                         /* Leave uarg NULL if can't zerocopy, callers should
1576                          * be able to handle it.
1577                          */
1578                         if ((rt->dst.dev->features & NETIF_F_SG) &&
1579                             csummode == CHECKSUM_PARTIAL) {
1580                                 paged = true;
1581                                 zc = true;
1582                                 uarg = msg->msg_ubuf;
1583                         }
1584                 } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1585                         uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1586                         if (!uarg)
1587                                 return -ENOBUFS;
1588                         extra_uref = !skb_zcopy(skb);   /* only ref on new uarg */
1589                         if (rt->dst.dev->features & NETIF_F_SG &&
1590                             csummode == CHECKSUM_PARTIAL) {
1591                                 paged = true;
1592                                 zc = true;
1593                         } else {
1594                                 uarg_to_msgzc(uarg)->zerocopy = 0;
1595                                 skb_zcopy_set(skb, uarg, &extra_uref);
1596                         }
1597                 }
1598         }
1599
1600         /*
1601          * Let's try using as much space as possible.
1602          * Use MTU if total length of the message fits into the MTU.
1603          * Otherwise, we need to reserve fragment header and
1604          * fragment alignment (= 8-15 octects, in total).
1605          *
1606          * Note that we may need to "move" the data from the tail
1607          * of the buffer to the new fragment when we split
1608          * the message.
1609          *
1610          * FIXME: It may be fragmented into multiple chunks
1611          *        at once if non-fragmentable extension headers
1612          *        are too large.
1613          * --yoshfuji
1614          */
1615
1616         cork->length += length;
1617         if (!skb)
1618                 goto alloc_new_skb;
1619
1620         while (length > 0) {
1621                 /* Check if the remaining data fits into current packet. */
1622                 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1623                 if (copy < length)
1624                         copy = maxfraglen - skb->len;
1625
1626                 if (copy <= 0) {
1627                         char *data;
1628                         unsigned int datalen;
1629                         unsigned int fraglen;
1630                         unsigned int fraggap;
1631                         unsigned int alloclen, alloc_extra;
1632                         unsigned int pagedlen;
1633 alloc_new_skb:
1634                         /* There's no room in the current skb */
1635                         if (skb)
1636                                 fraggap = skb->len - maxfraglen;
1637                         else
1638                                 fraggap = 0;
1639                         /* update mtu and maxfraglen if necessary */
1640                         if (!skb || !skb_prev)
1641                                 ip6_append_data_mtu(&mtu, &maxfraglen,
1642                                                     fragheaderlen, skb, rt,
1643                                                     orig_mtu);
1644
1645                         skb_prev = skb;
1646
1647                         /*
1648                          * If remaining data exceeds the mtu,
1649                          * we know we need more fragment(s).
1650                          */
1651                         datalen = length + fraggap;
1652
1653                         if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1654                                 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1655                         fraglen = datalen + fragheaderlen;
1656                         pagedlen = 0;
1657
1658                         alloc_extra = hh_len;
1659                         alloc_extra += dst_exthdrlen;
1660                         alloc_extra += rt->dst.trailer_len;
1661
1662                         /* We just reserve space for fragment header.
1663                          * Note: this may be overallocation if the message
1664                          * (without MSG_MORE) fits into the MTU.
1665                          */
1666                         alloc_extra += sizeof(struct frag_hdr);
1667
1668                         if ((flags & MSG_MORE) &&
1669                             !(rt->dst.dev->features&NETIF_F_SG))
1670                                 alloclen = mtu;
1671                         else if (!paged &&
1672                                  (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1673                                   !(rt->dst.dev->features & NETIF_F_SG)))
1674                                 alloclen = fraglen;
1675                         else {
1676                                 alloclen = fragheaderlen + transhdrlen;
1677                                 pagedlen = datalen - transhdrlen;
1678                         }
1679                         alloclen += alloc_extra;
1680
1681                         if (datalen != length + fraggap) {
1682                                 /*
1683                                  * this is not the last fragment, the trailer
1684                                  * space is regarded as data space.
1685                                  */
1686                                 datalen += rt->dst.trailer_len;
1687                         }
1688
1689                         fraglen = datalen + fragheaderlen;
1690
1691                         copy = datalen - transhdrlen - fraggap - pagedlen;
1692                         if (copy < 0) {
1693                                 err = -EINVAL;
1694                                 goto error;
1695                         }
1696                         if (transhdrlen) {
1697                                 skb = sock_alloc_send_skb(sk, alloclen,
1698                                                 (flags & MSG_DONTWAIT), &err);
1699                         } else {
1700                                 skb = NULL;
1701                                 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1702                                     2 * sk->sk_sndbuf)
1703                                         skb = alloc_skb(alloclen,
1704                                                         sk->sk_allocation);
1705                                 if (unlikely(!skb))
1706                                         err = -ENOBUFS;
1707                         }
1708                         if (!skb)
1709                                 goto error;
1710                         /*
1711                          *      Fill in the control structures
1712                          */
1713                         skb->protocol = htons(ETH_P_IPV6);
1714                         skb->ip_summed = csummode;
1715                         skb->csum = 0;
1716                         /* reserve for fragmentation and ipsec header */
1717                         skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1718                                     dst_exthdrlen);
1719
1720                         /*
1721                          *      Find where to start putting bytes
1722                          */
1723                         data = skb_put(skb, fraglen - pagedlen);
1724                         skb_set_network_header(skb, exthdrlen);
1725                         data += fragheaderlen;
1726                         skb->transport_header = (skb->network_header +
1727                                                  fragheaderlen);
1728                         if (fraggap) {
1729                                 skb->csum = skb_copy_and_csum_bits(
1730                                         skb_prev, maxfraglen,
1731                                         data + transhdrlen, fraggap);
1732                                 skb_prev->csum = csum_sub(skb_prev->csum,
1733                                                           skb->csum);
1734                                 data += fraggap;
1735                                 pskb_trim_unique(skb_prev, maxfraglen);
1736                         }
1737                         if (copy > 0 &&
1738                             getfrag(from, data + transhdrlen, offset,
1739                                     copy, fraggap, skb) < 0) {
1740                                 err = -EFAULT;
1741                                 kfree_skb(skb);
1742                                 goto error;
1743                         }
1744
1745                         offset += copy;
1746                         length -= copy + transhdrlen;
1747                         transhdrlen = 0;
1748                         exthdrlen = 0;
1749                         dst_exthdrlen = 0;
1750
1751                         /* Only the initial fragment is time stamped */
1752                         skb_shinfo(skb)->tx_flags = cork->tx_flags;
1753                         cork->tx_flags = 0;
1754                         skb_shinfo(skb)->tskey = tskey;
1755                         tskey = 0;
1756                         skb_zcopy_set(skb, uarg, &extra_uref);
1757
1758                         if ((flags & MSG_CONFIRM) && !skb_prev)
1759                                 skb_set_dst_pending_confirm(skb, 1);
1760
1761                         /*
1762                          * Put the packet on the pending queue
1763                          */
1764                         if (!skb->destructor) {
1765                                 skb->destructor = sock_wfree;
1766                                 skb->sk = sk;
1767                                 wmem_alloc_delta += skb->truesize;
1768                         }
1769                         __skb_queue_tail(queue, skb);
1770                         continue;
1771                 }
1772
1773                 if (copy > length)
1774                         copy = length;
1775
1776                 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1777                     skb_tailroom(skb) >= copy) {
1778                         unsigned int off;
1779
1780                         off = skb->len;
1781                         if (getfrag(from, skb_put(skb, copy),
1782                                                 offset, copy, off, skb) < 0) {
1783                                 __skb_trim(skb, off);
1784                                 err = -EFAULT;
1785                                 goto error;
1786                         }
1787                 } else if (!zc) {
1788                         int i = skb_shinfo(skb)->nr_frags;
1789
1790                         err = -ENOMEM;
1791                         if (!sk_page_frag_refill(sk, pfrag))
1792                                 goto error;
1793
1794                         skb_zcopy_downgrade_managed(skb);
1795                         if (!skb_can_coalesce(skb, i, pfrag->page,
1796                                               pfrag->offset)) {
1797                                 err = -EMSGSIZE;
1798                                 if (i == MAX_SKB_FRAGS)
1799                                         goto error;
1800
1801                                 __skb_fill_page_desc(skb, i, pfrag->page,
1802                                                      pfrag->offset, 0);
1803                                 skb_shinfo(skb)->nr_frags = ++i;
1804                                 get_page(pfrag->page);
1805                         }
1806                         copy = min_t(int, copy, pfrag->size - pfrag->offset);
1807                         if (getfrag(from,
1808                                     page_address(pfrag->page) + pfrag->offset,
1809                                     offset, copy, skb->len, skb) < 0)
1810                                 goto error_efault;
1811
1812                         pfrag->offset += copy;
1813                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1814                         skb->len += copy;
1815                         skb->data_len += copy;
1816                         skb->truesize += copy;
1817                         wmem_alloc_delta += copy;
1818                 } else {
1819                         err = skb_zerocopy_iter_dgram(skb, from, copy);
1820                         if (err < 0)
1821                                 goto error;
1822                 }
1823                 offset += copy;
1824                 length -= copy;
1825         }
1826
1827         if (wmem_alloc_delta)
1828                 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1829         return 0;
1830
1831 error_efault:
1832         err = -EFAULT;
1833 error:
1834         net_zcopy_put_abort(uarg, extra_uref);
1835         cork->length -= length;
1836         IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1837         refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1838         return err;
1839 }
1840
1841 int ip6_append_data(struct sock *sk,
1842                     int getfrag(void *from, char *to, int offset, int len,
1843                                 int odd, struct sk_buff *skb),
1844                     void *from, size_t length, int transhdrlen,
1845                     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1846                     struct rt6_info *rt, unsigned int flags)
1847 {
1848         struct inet_sock *inet = inet_sk(sk);
1849         struct ipv6_pinfo *np = inet6_sk(sk);
1850         int exthdrlen;
1851         int err;
1852
1853         if (flags&MSG_PROBE)
1854                 return 0;
1855         if (skb_queue_empty(&sk->sk_write_queue)) {
1856                 /*
1857                  * setup for corking
1858                  */
1859                 dst_hold(&rt->dst);
1860                 err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1861                                      ipc6, rt);
1862                 if (err)
1863                         return err;
1864
1865                 inet->cork.fl.u.ip6 = *fl6;
1866                 exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1867                 length += exthdrlen;
1868                 transhdrlen += exthdrlen;
1869         } else {
1870                 transhdrlen = 0;
1871         }
1872
1873         return __ip6_append_data(sk, &sk->sk_write_queue, &inet->cork,
1874                                  &np->cork, sk_page_frag(sk), getfrag,
1875                                  from, length, transhdrlen, flags, ipc6);
1876 }
1877 EXPORT_SYMBOL_GPL(ip6_append_data);
1878
1879 static void ip6_cork_steal_dst(struct sk_buff *skb, struct inet_cork_full *cork)
1880 {
1881         struct dst_entry *dst = cork->base.dst;
1882
1883         cork->base.dst = NULL;
1884         cork->base.flags &= ~IPCORK_ALLFRAG;
1885         skb_dst_set(skb, dst);
1886 }
1887
1888 static void ip6_cork_release(struct inet_cork_full *cork,
1889                              struct inet6_cork *v6_cork)
1890 {
1891         if (v6_cork->opt) {
1892                 struct ipv6_txoptions *opt = v6_cork->opt;
1893
1894                 kfree(opt->dst0opt);
1895                 kfree(opt->dst1opt);
1896                 kfree(opt->hopopt);
1897                 kfree(opt->srcrt);
1898                 kfree(opt);
1899                 v6_cork->opt = NULL;
1900         }
1901
1902         if (cork->base.dst) {
1903                 dst_release(cork->base.dst);
1904                 cork->base.dst = NULL;
1905                 cork->base.flags &= ~IPCORK_ALLFRAG;
1906         }
1907 }
1908
1909 struct sk_buff *__ip6_make_skb(struct sock *sk,
1910                                struct sk_buff_head *queue,
1911                                struct inet_cork_full *cork,
1912                                struct inet6_cork *v6_cork)
1913 {
1914         struct sk_buff *skb, *tmp_skb;
1915         struct sk_buff **tail_skb;
1916         struct in6_addr *final_dst;
1917         struct ipv6_pinfo *np = inet6_sk(sk);
1918         struct net *net = sock_net(sk);
1919         struct ipv6hdr *hdr;
1920         struct ipv6_txoptions *opt = v6_cork->opt;
1921         struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1922         struct flowi6 *fl6 = &cork->fl.u.ip6;
1923         unsigned char proto = fl6->flowi6_proto;
1924
1925         skb = __skb_dequeue(queue);
1926         if (!skb)
1927                 goto out;
1928         tail_skb = &(skb_shinfo(skb)->frag_list);
1929
1930         /* move skb->data to ip header from ext header */
1931         if (skb->data < skb_network_header(skb))
1932                 __skb_pull(skb, skb_network_offset(skb));
1933         while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1934                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1935                 *tail_skb = tmp_skb;
1936                 tail_skb = &(tmp_skb->next);
1937                 skb->len += tmp_skb->len;
1938                 skb->data_len += tmp_skb->len;
1939                 skb->truesize += tmp_skb->truesize;
1940                 tmp_skb->destructor = NULL;
1941                 tmp_skb->sk = NULL;
1942         }
1943
1944         /* Allow local fragmentation. */
1945         skb->ignore_df = ip6_sk_ignore_df(sk);
1946         __skb_pull(skb, skb_network_header_len(skb));
1947
1948         final_dst = &fl6->daddr;
1949         if (opt && opt->opt_flen)
1950                 ipv6_push_frag_opts(skb, opt, &proto);
1951         if (opt && opt->opt_nflen)
1952                 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr);
1953
1954         skb_push(skb, sizeof(struct ipv6hdr));
1955         skb_reset_network_header(skb);
1956         hdr = ipv6_hdr(skb);
1957
1958         ip6_flow_hdr(hdr, v6_cork->tclass,
1959                      ip6_make_flowlabel(net, skb, fl6->flowlabel,
1960                                         ip6_autoflowlabel(net, np), fl6));
1961         hdr->hop_limit = v6_cork->hop_limit;
1962         hdr->nexthdr = proto;
1963         hdr->saddr = fl6->saddr;
1964         hdr->daddr = *final_dst;
1965
1966         skb->priority = sk->sk_priority;
1967         skb->mark = cork->base.mark;
1968         skb->tstamp = cork->base.transmit_time;
1969
1970         ip6_cork_steal_dst(skb, cork);
1971         IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1972         if (proto == IPPROTO_ICMPV6) {
1973                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1974                 u8 icmp6_type;
1975
1976                 if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl)
1977                         icmp6_type = fl6->fl6_icmp_type;
1978                 else
1979                         icmp6_type = icmp6_hdr(skb)->icmp6_type;
1980                 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_type);
1981                 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1982         }
1983
1984         ip6_cork_release(cork, v6_cork);
1985 out:
1986         return skb;
1987 }
1988
1989 int ip6_send_skb(struct sk_buff *skb)
1990 {
1991         struct net *net = sock_net(skb->sk);
1992         struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1993         int err;
1994
1995         err = ip6_local_out(net, skb->sk, skb);
1996         if (err) {
1997                 if (err > 0)
1998                         err = net_xmit_errno(err);
1999                 if (err)
2000                         IP6_INC_STATS(net, rt->rt6i_idev,
2001                                       IPSTATS_MIB_OUTDISCARDS);
2002         }
2003
2004         return err;
2005 }
2006
2007 int ip6_push_pending_frames(struct sock *sk)
2008 {
2009         struct sk_buff *skb;
2010
2011         skb = ip6_finish_skb(sk);
2012         if (!skb)
2013                 return 0;
2014
2015         return ip6_send_skb(skb);
2016 }
2017 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
2018
2019 static void __ip6_flush_pending_frames(struct sock *sk,
2020                                        struct sk_buff_head *queue,
2021                                        struct inet_cork_full *cork,
2022                                        struct inet6_cork *v6_cork)
2023 {
2024         struct sk_buff *skb;
2025
2026         while ((skb = __skb_dequeue_tail(queue)) != NULL) {
2027                 if (skb_dst(skb))
2028                         IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
2029                                       IPSTATS_MIB_OUTDISCARDS);
2030                 kfree_skb(skb);
2031         }
2032
2033         ip6_cork_release(cork, v6_cork);
2034 }
2035
2036 void ip6_flush_pending_frames(struct sock *sk)
2037 {
2038         __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
2039                                    &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
2040 }
2041 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
2042
2043 struct sk_buff *ip6_make_skb(struct sock *sk,
2044                              int getfrag(void *from, char *to, int offset,
2045                                          int len, int odd, struct sk_buff *skb),
2046                              void *from, size_t length, int transhdrlen,
2047                              struct ipcm6_cookie *ipc6, struct rt6_info *rt,
2048                              unsigned int flags, struct inet_cork_full *cork)
2049 {
2050         struct inet6_cork v6_cork;
2051         struct sk_buff_head queue;
2052         int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
2053         int err;
2054
2055         if (flags & MSG_PROBE) {
2056                 dst_release(&rt->dst);
2057                 return NULL;
2058         }
2059
2060         __skb_queue_head_init(&queue);
2061
2062         cork->base.flags = 0;
2063         cork->base.addr = 0;
2064         cork->base.opt = NULL;
2065         v6_cork.opt = NULL;
2066         err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt);
2067         if (err) {
2068                 ip6_cork_release(cork, &v6_cork);
2069                 return ERR_PTR(err);
2070         }
2071         if (ipc6->dontfrag < 0)
2072                 ipc6->dontfrag = inet6_sk(sk)->dontfrag;
2073
2074         err = __ip6_append_data(sk, &queue, cork, &v6_cork,
2075                                 &current->task_frag, getfrag, from,
2076                                 length + exthdrlen, transhdrlen + exthdrlen,
2077                                 flags, ipc6);
2078         if (err) {
2079                 __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
2080                 return ERR_PTR(err);
2081         }
2082
2083         return __ip6_make_skb(sk, &queue, cork, &v6_cork);
2084 }