1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
20 * Fixed routing subtrees.
23 #define pr_fmt(fmt) "IPv6: " fmt
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <linux/siphash.h>
45 #include <net/net_namespace.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
53 #include <linux/rtnetlink.h>
55 #include <net/dst_metadata.h>
57 #include <net/netevent.h>
58 #include <net/netlink.h>
60 #include <net/lwtunnel.h>
61 #include <net/ip_tunnels.h>
62 #include <net/l3mdev.h>
64 #include <linux/uaccess.h>
65 #include <linux/btf_ids.h>
68 #include <linux/sysctl.h>
71 static int ip6_rt_type_to_error(u8 fib6_type);
73 #define CREATE_TRACE_POINTS
74 #include <trace/events/fib6.h>
75 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
76 #undef CREATE_TRACE_POINTS
79 RT6_NUD_FAIL_HARD = -3,
80 RT6_NUD_FAIL_PROBE = -2,
81 RT6_NUD_FAIL_DO_RR = -1,
85 INDIRECT_CALLABLE_SCOPE
86 struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
87 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
88 INDIRECT_CALLABLE_SCOPE
89 unsigned int ip6_mtu(const struct dst_entry *dst);
90 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
91 static void ip6_dst_destroy(struct dst_entry *);
92 static void ip6_dst_ifdown(struct dst_entry *,
93 struct net_device *dev, int how);
94 static void ip6_dst_gc(struct dst_ops *ops);
96 static int ip6_pkt_discard(struct sk_buff *skb);
97 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
98 static int ip6_pkt_prohibit(struct sk_buff *skb);
99 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
100 static void ip6_link_failure(struct sk_buff *skb);
101 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
102 struct sk_buff *skb, u32 mtu,
104 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
105 struct sk_buff *skb);
106 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
108 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
109 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
110 struct fib6_info *rt, struct dst_entry *dst,
111 struct in6_addr *dest, struct in6_addr *src,
112 int iif, int type, u32 portid, u32 seq,
114 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
115 const struct in6_addr *daddr,
116 const struct in6_addr *saddr);
118 #ifdef CONFIG_IPV6_ROUTE_INFO
119 static struct fib6_info *rt6_add_route_info(struct net *net,
120 const struct in6_addr *prefix, int prefixlen,
121 const struct in6_addr *gwaddr,
122 struct net_device *dev,
124 static struct fib6_info *rt6_get_route_info(struct net *net,
125 const struct in6_addr *prefix, int prefixlen,
126 const struct in6_addr *gwaddr,
127 struct net_device *dev);
130 struct uncached_list {
132 struct list_head head;
133 struct list_head quarantine;
136 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
138 void rt6_uncached_list_add(struct rt6_info *rt)
140 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
142 rt->rt6i_uncached_list = ul;
144 spin_lock_bh(&ul->lock);
145 list_add_tail(&rt->rt6i_uncached, &ul->head);
146 spin_unlock_bh(&ul->lock);
149 void rt6_uncached_list_del(struct rt6_info *rt)
151 if (!list_empty(&rt->rt6i_uncached)) {
152 struct uncached_list *ul = rt->rt6i_uncached_list;
154 spin_lock_bh(&ul->lock);
155 list_del_init(&rt->rt6i_uncached);
156 spin_unlock_bh(&ul->lock);
160 static void rt6_uncached_list_flush_dev(struct net_device *dev)
164 for_each_possible_cpu(cpu) {
165 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
166 struct rt6_info *rt, *safe;
168 if (list_empty(&ul->head))
171 spin_lock_bh(&ul->lock);
172 list_for_each_entry_safe(rt, safe, &ul->head, rt6i_uncached) {
173 struct inet6_dev *rt_idev = rt->rt6i_idev;
174 struct net_device *rt_dev = rt->dst.dev;
175 bool handled = false;
177 if (rt_idev->dev == dev) {
178 rt->rt6i_idev = in6_dev_get(blackhole_netdev);
179 in6_dev_put(rt_idev);
184 rt->dst.dev = blackhole_netdev;
185 netdev_ref_replace(rt_dev, blackhole_netdev,
186 &rt->dst.dev_tracker,
191 list_move(&rt->rt6i_uncached,
194 spin_unlock_bh(&ul->lock);
198 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
202 if (!ipv6_addr_any(p))
203 return (const void *) p;
205 return &ipv6_hdr(skb)->daddr;
209 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
210 struct net_device *dev,
216 daddr = choose_neigh_daddr(gw, skb, daddr);
217 n = __ipv6_neigh_lookup(dev, daddr);
221 n = neigh_create(&nd_tbl, daddr, dev);
222 return IS_ERR(n) ? NULL : n;
225 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
229 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
231 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
232 dst->dev, skb, daddr);
235 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
237 struct net_device *dev = dst->dev;
238 struct rt6_info *rt = (struct rt6_info *)dst;
240 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
243 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
245 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
247 __ipv6_confirm_neigh(dev, daddr);
250 static struct dst_ops ip6_dst_ops_template = {
254 .check = ip6_dst_check,
255 .default_advmss = ip6_default_advmss,
257 .cow_metrics = dst_cow_metrics_generic,
258 .destroy = ip6_dst_destroy,
259 .ifdown = ip6_dst_ifdown,
260 .negative_advice = ip6_negative_advice,
261 .link_failure = ip6_link_failure,
262 .update_pmtu = ip6_rt_update_pmtu,
263 .redirect = rt6_do_redirect,
264 .local_out = __ip6_local_out,
265 .neigh_lookup = ip6_dst_neigh_lookup,
266 .confirm_neigh = ip6_confirm_neigh,
269 static struct dst_ops ip6_dst_blackhole_ops = {
271 .default_advmss = ip6_default_advmss,
272 .neigh_lookup = ip6_dst_neigh_lookup,
273 .check = ip6_dst_check,
274 .destroy = ip6_dst_destroy,
275 .cow_metrics = dst_cow_metrics_generic,
276 .update_pmtu = dst_blackhole_update_pmtu,
277 .redirect = dst_blackhole_redirect,
278 .mtu = dst_blackhole_mtu,
281 static const u32 ip6_template_metrics[RTAX_MAX] = {
282 [RTAX_HOPLIMIT - 1] = 0,
285 static const struct fib6_info fib6_null_entry_template = {
286 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
287 .fib6_protocol = RTPROT_KERNEL,
288 .fib6_metric = ~(u32)0,
289 .fib6_ref = REFCOUNT_INIT(1),
290 .fib6_type = RTN_UNREACHABLE,
291 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
294 static const struct rt6_info ip6_null_entry_template = {
296 .__refcnt = ATOMIC_INIT(1),
298 .obsolete = DST_OBSOLETE_FORCE_CHK,
299 .error = -ENETUNREACH,
300 .input = ip6_pkt_discard,
301 .output = ip6_pkt_discard_out,
303 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
306 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
308 static const struct rt6_info ip6_prohibit_entry_template = {
310 .__refcnt = ATOMIC_INIT(1),
312 .obsolete = DST_OBSOLETE_FORCE_CHK,
314 .input = ip6_pkt_prohibit,
315 .output = ip6_pkt_prohibit_out,
317 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
320 static const struct rt6_info ip6_blk_hole_entry_template = {
322 .__refcnt = ATOMIC_INIT(1),
324 .obsolete = DST_OBSOLETE_FORCE_CHK,
326 .input = dst_discard,
327 .output = dst_discard_out,
329 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
334 static void rt6_info_init(struct rt6_info *rt)
336 memset_after(rt, 0, dst);
337 INIT_LIST_HEAD(&rt->rt6i_uncached);
340 /* allocate dst with ip6_dst_ops */
341 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
344 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
345 1, DST_OBSOLETE_FORCE_CHK, flags);
349 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
354 EXPORT_SYMBOL(ip6_dst_alloc);
356 static void ip6_dst_destroy(struct dst_entry *dst)
358 struct rt6_info *rt = (struct rt6_info *)dst;
359 struct fib6_info *from;
360 struct inet6_dev *idev;
362 ip_dst_metrics_put(dst);
363 rt6_uncached_list_del(rt);
365 idev = rt->rt6i_idev;
367 rt->rt6i_idev = NULL;
371 from = xchg((__force struct fib6_info **)&rt->from, NULL);
372 fib6_info_release(from);
375 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
378 struct rt6_info *rt = (struct rt6_info *)dst;
379 struct inet6_dev *idev = rt->rt6i_idev;
381 if (idev && idev->dev != blackhole_netdev) {
382 struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);
384 if (blackhole_idev) {
385 rt->rt6i_idev = blackhole_idev;
391 static bool __rt6_check_expired(const struct rt6_info *rt)
393 if (rt->rt6i_flags & RTF_EXPIRES)
394 return time_after(jiffies, rt->dst.expires);
399 static bool rt6_check_expired(const struct rt6_info *rt)
401 struct fib6_info *from;
403 from = rcu_dereference(rt->from);
405 if (rt->rt6i_flags & RTF_EXPIRES) {
406 if (time_after(jiffies, rt->dst.expires))
409 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
410 fib6_check_expired(from);
415 void fib6_select_path(const struct net *net, struct fib6_result *res,
416 struct flowi6 *fl6, int oif, bool have_oif_match,
417 const struct sk_buff *skb, int strict)
419 struct fib6_info *sibling, *next_sibling;
420 struct fib6_info *match = res->f6i;
422 if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
425 if (match->nh && have_oif_match && res->nh)
429 IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
431 /* We might have already computed the hash for ICMPv6 errors. In such
432 * case it will always be non-zero. Otherwise now is the time to do it.
435 (!match->nh || nexthop_is_multipath(match->nh)))
436 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
438 if (unlikely(match->nh)) {
439 nexthop_path_fib6_result(res, fl6->mp_hash);
443 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
446 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
448 const struct fib6_nh *nh = sibling->fib6_nh;
451 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
452 if (fl6->mp_hash > nh_upper_bound)
454 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
462 res->nh = match->fib6_nh;
466 * Route lookup. rcu_read_lock() should be held.
469 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
470 const struct in6_addr *saddr, int oif, int flags)
472 const struct net_device *dev;
474 if (nh->fib_nh_flags & RTNH_F_DEAD)
477 dev = nh->fib_nh_dev;
479 if (dev->ifindex == oif)
482 if (ipv6_chk_addr(net, saddr, dev,
483 flags & RT6_LOOKUP_F_IFACE))
490 struct fib6_nh_dm_arg {
492 const struct in6_addr *saddr;
498 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
500 struct fib6_nh_dm_arg *arg = _arg;
503 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
507 /* returns fib6_nh from nexthop or NULL */
508 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
509 struct fib6_result *res,
510 const struct in6_addr *saddr,
513 struct fib6_nh_dm_arg arg = {
520 if (nexthop_is_blackhole(nh))
523 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
529 static void rt6_device_match(struct net *net, struct fib6_result *res,
530 const struct in6_addr *saddr, int oif, int flags)
532 struct fib6_info *f6i = res->f6i;
533 struct fib6_info *spf6i;
536 if (!oif && ipv6_addr_any(saddr)) {
537 if (unlikely(f6i->nh)) {
538 nh = nexthop_fib6_nh(f6i->nh);
539 if (nexthop_is_blackhole(f6i->nh))
544 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
548 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
549 bool matched = false;
551 if (unlikely(spf6i->nh)) {
552 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
558 if (__rt6_device_match(net, nh, saddr, oif, flags))
567 if (oif && flags & RT6_LOOKUP_F_IFACE) {
568 res->f6i = net->ipv6.fib6_null_entry;
569 nh = res->f6i->fib6_nh;
573 if (unlikely(f6i->nh)) {
574 nh = nexthop_fib6_nh(f6i->nh);
575 if (nexthop_is_blackhole(f6i->nh))
581 if (nh->fib_nh_flags & RTNH_F_DEAD) {
582 res->f6i = net->ipv6.fib6_null_entry;
583 nh = res->f6i->fib6_nh;
587 res->fib6_type = res->f6i->fib6_type;
588 res->fib6_flags = res->f6i->fib6_flags;
592 res->fib6_flags |= RTF_REJECT;
593 res->fib6_type = RTN_BLACKHOLE;
597 #ifdef CONFIG_IPV6_ROUTER_PREF
598 struct __rt6_probe_work {
599 struct work_struct work;
600 struct in6_addr target;
601 struct net_device *dev;
602 netdevice_tracker dev_tracker;
605 static void rt6_probe_deferred(struct work_struct *w)
607 struct in6_addr mcaddr;
608 struct __rt6_probe_work *work =
609 container_of(w, struct __rt6_probe_work, work);
611 addrconf_addr_solict_mult(&work->target, &mcaddr);
612 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
613 netdev_put(work->dev, &work->dev_tracker);
617 static void rt6_probe(struct fib6_nh *fib6_nh)
619 struct __rt6_probe_work *work = NULL;
620 const struct in6_addr *nh_gw;
621 unsigned long last_probe;
622 struct neighbour *neigh;
623 struct net_device *dev;
624 struct inet6_dev *idev;
627 * Okay, this does not seem to be appropriate
628 * for now, however, we need to check if it
629 * is really so; aka Router Reachability Probing.
631 * Router Reachability Probe MUST be rate-limited
632 * to no more than one per minute.
634 if (!fib6_nh->fib_nh_gw_family)
637 nh_gw = &fib6_nh->fib_nh_gw6;
638 dev = fib6_nh->fib_nh_dev;
640 last_probe = READ_ONCE(fib6_nh->last_probe);
641 idev = __in6_dev_get(dev);
642 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
644 if (READ_ONCE(neigh->nud_state) & NUD_VALID)
647 write_lock_bh(&neigh->lock);
648 if (!(neigh->nud_state & NUD_VALID) &&
650 neigh->updated + idev->cnf.rtr_probe_interval)) {
651 work = kmalloc(sizeof(*work), GFP_ATOMIC);
653 __neigh_set_probe_once(neigh);
655 write_unlock_bh(&neigh->lock);
656 } else if (time_after(jiffies, last_probe +
657 idev->cnf.rtr_probe_interval)) {
658 work = kmalloc(sizeof(*work), GFP_ATOMIC);
661 if (!work || cmpxchg(&fib6_nh->last_probe,
662 last_probe, jiffies) != last_probe) {
665 INIT_WORK(&work->work, rt6_probe_deferred);
666 work->target = *nh_gw;
667 netdev_hold(dev, &work->dev_tracker, GFP_ATOMIC);
669 schedule_work(&work->work);
676 static inline void rt6_probe(struct fib6_nh *fib6_nh)
682 * Default Router Selection (RFC 2461 6.3.6)
684 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
686 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
687 struct neighbour *neigh;
690 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
691 &fib6_nh->fib_nh_gw6);
693 u8 nud_state = READ_ONCE(neigh->nud_state);
695 if (nud_state & NUD_VALID)
696 ret = RT6_NUD_SUCCEED;
697 #ifdef CONFIG_IPV6_ROUTER_PREF
698 else if (!(nud_state & NUD_FAILED))
699 ret = RT6_NUD_SUCCEED;
701 ret = RT6_NUD_FAIL_PROBE;
704 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
705 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
712 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
717 if (!oif || nh->fib_nh_dev->ifindex == oif)
720 if (!m && (strict & RT6_LOOKUP_F_IFACE))
721 return RT6_NUD_FAIL_HARD;
722 #ifdef CONFIG_IPV6_ROUTER_PREF
723 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
725 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
726 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
727 int n = rt6_check_neigh(nh);
734 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
735 int oif, int strict, int *mpri, bool *do_rr)
737 bool match_do_rr = false;
741 if (nh->fib_nh_flags & RTNH_F_DEAD)
744 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
745 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
746 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
749 m = rt6_score_route(nh, fib6_flags, oif, strict);
750 if (m == RT6_NUD_FAIL_DO_RR) {
752 m = 0; /* lowest valid score */
753 } else if (m == RT6_NUD_FAIL_HARD) {
757 if (strict & RT6_LOOKUP_F_REACHABLE)
760 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
762 *do_rr = match_do_rr;
770 struct fib6_nh_frl_arg {
779 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
781 struct fib6_nh_frl_arg *arg = _arg;
784 return find_match(nh, arg->flags, arg->oif, arg->strict,
785 arg->mpri, arg->do_rr);
788 static void __find_rr_leaf(struct fib6_info *f6i_start,
789 struct fib6_info *nomatch, u32 metric,
790 struct fib6_result *res, struct fib6_info **cont,
791 int oif, int strict, bool *do_rr, int *mpri)
793 struct fib6_info *f6i;
795 for (f6i = f6i_start;
796 f6i && f6i != nomatch;
797 f6i = rcu_dereference(f6i->fib6_next)) {
798 bool matched = false;
801 if (cont && f6i->fib6_metric != metric) {
806 if (fib6_check_expired(f6i))
809 if (unlikely(f6i->nh)) {
810 struct fib6_nh_frl_arg arg = {
811 .flags = f6i->fib6_flags,
818 if (nexthop_is_blackhole(f6i->nh)) {
819 res->fib6_flags = RTF_REJECT;
820 res->fib6_type = RTN_BLACKHOLE;
822 res->nh = nexthop_fib6_nh(f6i->nh);
825 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
832 if (find_match(nh, f6i->fib6_flags, oif, strict,
839 res->fib6_flags = f6i->fib6_flags;
840 res->fib6_type = f6i->fib6_type;
845 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
846 struct fib6_info *rr_head, int oif, int strict,
847 bool *do_rr, struct fib6_result *res)
849 u32 metric = rr_head->fib6_metric;
850 struct fib6_info *cont = NULL;
853 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
854 oif, strict, do_rr, &mpri);
856 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
857 oif, strict, do_rr, &mpri);
859 if (res->f6i || !cont)
862 __find_rr_leaf(cont, NULL, metric, res, NULL,
863 oif, strict, do_rr, &mpri);
866 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
867 struct fib6_result *res, int strict)
869 struct fib6_info *leaf = rcu_dereference(fn->leaf);
870 struct fib6_info *rt0;
874 /* make sure this function or its helpers sets f6i */
877 if (!leaf || leaf == net->ipv6.fib6_null_entry)
880 rt0 = rcu_dereference(fn->rr_ptr);
884 /* Double check to make sure fn is not an intermediate node
885 * and fn->leaf does not points to its child's leaf
886 * (This might happen if all routes under fn are deleted from
887 * the tree and fib6_repair_tree() is called on the node.)
889 key_plen = rt0->fib6_dst.plen;
890 #ifdef CONFIG_IPV6_SUBTREES
891 if (rt0->fib6_src.plen)
892 key_plen = rt0->fib6_src.plen;
894 if (fn->fn_bit != key_plen)
897 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
899 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
901 /* no entries matched; do round-robin */
902 if (!next || next->fib6_metric != rt0->fib6_metric)
906 spin_lock_bh(&leaf->fib6_table->tb6_lock);
907 /* make sure next is not being deleted from the tree */
909 rcu_assign_pointer(fn->rr_ptr, next);
910 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
916 res->f6i = net->ipv6.fib6_null_entry;
917 res->nh = res->f6i->fib6_nh;
918 res->fib6_flags = res->f6i->fib6_flags;
919 res->fib6_type = res->f6i->fib6_type;
923 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
925 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
926 res->nh->fib_nh_gw_family;
929 #ifdef CONFIG_IPV6_ROUTE_INFO
930 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
931 const struct in6_addr *gwaddr)
933 struct net *net = dev_net(dev);
934 struct route_info *rinfo = (struct route_info *) opt;
935 struct in6_addr prefix_buf, *prefix;
937 unsigned long lifetime;
938 struct fib6_info *rt;
940 if (len < sizeof(struct route_info)) {
944 /* Sanity check for prefix_len and length */
945 if (rinfo->length > 3) {
947 } else if (rinfo->prefix_len > 128) {
949 } else if (rinfo->prefix_len > 64) {
950 if (rinfo->length < 2) {
953 } else if (rinfo->prefix_len > 0) {
954 if (rinfo->length < 1) {
959 pref = rinfo->route_pref;
960 if (pref == ICMPV6_ROUTER_PREF_INVALID)
963 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
965 if (rinfo->length == 3)
966 prefix = (struct in6_addr *)rinfo->prefix;
968 /* this function is safe */
969 ipv6_addr_prefix(&prefix_buf,
970 (struct in6_addr *)rinfo->prefix,
972 prefix = &prefix_buf;
975 if (rinfo->prefix_len == 0)
976 rt = rt6_get_dflt_router(net, gwaddr, dev);
978 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
981 if (rt && !lifetime) {
982 ip6_del_rt(net, rt, false);
987 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
990 rt->fib6_flags = RTF_ROUTEINFO |
991 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
994 if (!addrconf_finite_timeout(lifetime))
995 fib6_clean_expires(rt);
997 fib6_set_expires(rt, jiffies + HZ * lifetime);
999 fib6_info_release(rt);
1006 * Misc support functions
1009 /* called with rcu_lock held */
1010 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1012 struct net_device *dev = res->nh->fib_nh_dev;
1014 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1015 /* for copies of local routes, dst->dev needs to be the
1016 * device if it is a master device, the master device if
1017 * device is enslaved, and the loopback as the default
1019 if (netif_is_l3_slave(dev) &&
1020 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1021 dev = l3mdev_master_dev_rcu(dev);
1022 else if (!netif_is_l3_master(dev))
1023 dev = dev_net(dev)->loopback_dev;
1024 /* last case is netif_is_l3_master(dev) is true in which
1025 * case we want dev returned to be dev
1032 static const int fib6_prop[RTN_MAX + 1] = {
1036 [RTN_BROADCAST] = 0,
1038 [RTN_MULTICAST] = 0,
1039 [RTN_BLACKHOLE] = -EINVAL,
1040 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1041 [RTN_PROHIBIT] = -EACCES,
1042 [RTN_THROW] = -EAGAIN,
1043 [RTN_NAT] = -EINVAL,
1044 [RTN_XRESOLVE] = -EINVAL,
1047 static int ip6_rt_type_to_error(u8 fib6_type)
1049 return fib6_prop[fib6_type];
1052 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1054 unsigned short flags = 0;
1056 if (rt->dst_nocount)
1057 flags |= DST_NOCOUNT;
1058 if (rt->dst_nopolicy)
1059 flags |= DST_NOPOLICY;
1064 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1066 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1068 switch (fib6_type) {
1070 rt->dst.output = dst_discard_out;
1071 rt->dst.input = dst_discard;
1074 rt->dst.output = ip6_pkt_prohibit_out;
1075 rt->dst.input = ip6_pkt_prohibit;
1078 case RTN_UNREACHABLE:
1080 rt->dst.output = ip6_pkt_discard_out;
1081 rt->dst.input = ip6_pkt_discard;
1086 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1088 struct fib6_info *f6i = res->f6i;
1090 if (res->fib6_flags & RTF_REJECT) {
1091 ip6_rt_init_dst_reject(rt, res->fib6_type);
1096 rt->dst.output = ip6_output;
1098 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1099 rt->dst.input = ip6_input;
1100 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1101 rt->dst.input = ip6_mc_input;
1103 rt->dst.input = ip6_forward;
1106 if (res->nh->fib_nh_lws) {
1107 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1108 lwtunnel_set_redirect(&rt->dst);
1111 rt->dst.lastuse = jiffies;
1114 /* Caller must already hold reference to @from */
1115 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1117 rt->rt6i_flags &= ~RTF_EXPIRES;
1118 rcu_assign_pointer(rt->from, from);
1119 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1122 /* Caller must already hold reference to f6i in result */
1123 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1125 const struct fib6_nh *nh = res->nh;
1126 const struct net_device *dev = nh->fib_nh_dev;
1127 struct fib6_info *f6i = res->f6i;
1129 ip6_rt_init_dst(rt, res);
1131 rt->rt6i_dst = f6i->fib6_dst;
1132 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1133 rt->rt6i_flags = res->fib6_flags;
1134 if (nh->fib_nh_gw_family) {
1135 rt->rt6i_gateway = nh->fib_nh_gw6;
1136 rt->rt6i_flags |= RTF_GATEWAY;
1138 rt6_set_from(rt, f6i);
1139 #ifdef CONFIG_IPV6_SUBTREES
1140 rt->rt6i_src = f6i->fib6_src;
1144 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1145 struct in6_addr *saddr)
1147 struct fib6_node *pn, *sn;
1149 if (fn->fn_flags & RTN_TL_ROOT)
1151 pn = rcu_dereference(fn->parent);
1152 sn = FIB6_SUBTREE(pn);
1154 fn = fib6_node_lookup(sn, NULL, saddr);
1157 if (fn->fn_flags & RTN_RTINFO)
1162 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1164 struct rt6_info *rt = *prt;
1166 if (dst_hold_safe(&rt->dst))
1169 rt = net->ipv6.ip6_null_entry;
1178 /* called with rcu_lock held */
1179 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1181 struct net_device *dev = res->nh->fib_nh_dev;
1182 struct fib6_info *f6i = res->f6i;
1183 unsigned short flags;
1184 struct rt6_info *nrt;
1186 if (!fib6_info_hold_safe(f6i))
1189 flags = fib6_info_dst_flags(f6i);
1190 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1192 fib6_info_release(f6i);
1196 ip6_rt_copy_init(nrt, res);
1200 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1201 dst_hold(&nrt->dst);
1205 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
1206 struct fib6_table *table,
1208 const struct sk_buff *skb,
1211 struct fib6_result res = {};
1212 struct fib6_node *fn;
1213 struct rt6_info *rt;
1216 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1218 res.f6i = rcu_dereference(fn->leaf);
1220 res.f6i = net->ipv6.fib6_null_entry;
1222 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1225 if (res.f6i == net->ipv6.fib6_null_entry) {
1226 fn = fib6_backtrack(fn, &fl6->saddr);
1230 rt = net->ipv6.ip6_null_entry;
1233 } else if (res.fib6_flags & RTF_REJECT) {
1237 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1238 fl6->flowi6_oif != 0, skb, flags);
1240 /* Search through exception table */
1241 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1243 if (ip6_hold_safe(net, &rt))
1244 dst_use_noref(&rt->dst, jiffies);
1247 rt = ip6_create_rt_rcu(&res);
1251 trace_fib6_table_lookup(net, &res, table, fl6);
1258 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1259 const struct sk_buff *skb, int flags)
1261 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1263 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1265 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1266 const struct in6_addr *saddr, int oif,
1267 const struct sk_buff *skb, int strict)
1269 struct flowi6 fl6 = {
1273 struct dst_entry *dst;
1274 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1277 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1278 flags |= RT6_LOOKUP_F_HAS_SADDR;
1281 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1282 if (dst->error == 0)
1283 return (struct rt6_info *) dst;
1289 EXPORT_SYMBOL(rt6_lookup);
1291 /* ip6_ins_rt is called with FREE table->tb6_lock.
1292 * It takes new route entry, the addition fails by any reason the
1293 * route is released.
1294 * Caller must hold dst before calling it.
1297 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1298 struct netlink_ext_ack *extack)
1301 struct fib6_table *table;
1303 table = rt->fib6_table;
1304 spin_lock_bh(&table->tb6_lock);
1305 err = fib6_add(&table->tb6_root, rt, info, extack);
1306 spin_unlock_bh(&table->tb6_lock);
1311 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1313 struct nl_info info = { .nl_net = net, };
1315 return __ip6_ins_rt(rt, &info, NULL);
1318 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1319 const struct in6_addr *daddr,
1320 const struct in6_addr *saddr)
1322 struct fib6_info *f6i = res->f6i;
1323 struct net_device *dev;
1324 struct rt6_info *rt;
1330 if (!fib6_info_hold_safe(f6i))
1333 dev = ip6_rt_get_dev_rcu(res);
1334 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1336 fib6_info_release(f6i);
1340 ip6_rt_copy_init(rt, res);
1341 rt->rt6i_flags |= RTF_CACHE;
1342 rt->rt6i_dst.addr = *daddr;
1343 rt->rt6i_dst.plen = 128;
1345 if (!rt6_is_gw_or_nonexthop(res)) {
1346 if (f6i->fib6_dst.plen != 128 &&
1347 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1348 rt->rt6i_flags |= RTF_ANYCAST;
1349 #ifdef CONFIG_IPV6_SUBTREES
1350 if (rt->rt6i_src.plen && saddr) {
1351 rt->rt6i_src.addr = *saddr;
1352 rt->rt6i_src.plen = 128;
1360 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1362 struct fib6_info *f6i = res->f6i;
1363 unsigned short flags = fib6_info_dst_flags(f6i);
1364 struct net_device *dev;
1365 struct rt6_info *pcpu_rt;
1367 if (!fib6_info_hold_safe(f6i))
1371 dev = ip6_rt_get_dev_rcu(res);
1372 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1375 fib6_info_release(f6i);
1378 ip6_rt_copy_init(pcpu_rt, res);
1379 pcpu_rt->rt6i_flags |= RTF_PCPU;
1382 pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1387 static bool rt6_is_valid(const struct rt6_info *rt6)
1389 return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1392 /* It should be called with rcu_read_lock() acquired */
1393 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1395 struct rt6_info *pcpu_rt;
1397 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1399 if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1400 struct rt6_info *prev, **p;
1402 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1403 prev = xchg(p, NULL);
1405 dst_dev_put(&prev->dst);
1406 dst_release(&prev->dst);
1415 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1416 const struct fib6_result *res)
1418 struct rt6_info *pcpu_rt, *prev, **p;
1420 pcpu_rt = ip6_rt_pcpu_alloc(res);
1424 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1425 prev = cmpxchg(p, NULL, pcpu_rt);
1428 if (res->f6i->fib6_destroying) {
1429 struct fib6_info *from;
1431 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1432 fib6_info_release(from);
1438 /* exception hash table implementation
1440 static DEFINE_SPINLOCK(rt6_exception_lock);
1442 /* Remove rt6_ex from hash table and free the memory
1443 * Caller must hold rt6_exception_lock
1445 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1446 struct rt6_exception *rt6_ex)
1448 struct fib6_info *from;
1451 if (!bucket || !rt6_ex)
1454 net = dev_net(rt6_ex->rt6i->dst.dev);
1455 net->ipv6.rt6_stats->fib_rt_cache--;
1457 /* purge completely the exception to allow releasing the held resources:
1458 * some [sk] cache may keep the dst around for unlimited time
1460 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1461 fib6_info_release(from);
1462 dst_dev_put(&rt6_ex->rt6i->dst);
1464 hlist_del_rcu(&rt6_ex->hlist);
1465 dst_release(&rt6_ex->rt6i->dst);
1466 kfree_rcu(rt6_ex, rcu);
1467 WARN_ON_ONCE(!bucket->depth);
1471 /* Remove oldest rt6_ex in bucket and free the memory
1472 * Caller must hold rt6_exception_lock
1474 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1476 struct rt6_exception *rt6_ex, *oldest = NULL;
1481 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1482 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1485 rt6_remove_exception(bucket, oldest);
1488 static u32 rt6_exception_hash(const struct in6_addr *dst,
1489 const struct in6_addr *src)
1491 static siphash_aligned_key_t rt6_exception_key;
1493 struct in6_addr dst;
1494 struct in6_addr src;
1495 } __aligned(SIPHASH_ALIGNMENT) combined = {
1500 net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
1502 #ifdef CONFIG_IPV6_SUBTREES
1504 combined.src = *src;
1506 val = siphash(&combined, sizeof(combined), &rt6_exception_key);
1508 return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1511 /* Helper function to find the cached rt in the hash table
1512 * and update bucket pointer to point to the bucket for this
1513 * (daddr, saddr) pair
1514 * Caller must hold rt6_exception_lock
1516 static struct rt6_exception *
1517 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1518 const struct in6_addr *daddr,
1519 const struct in6_addr *saddr)
1521 struct rt6_exception *rt6_ex;
1524 if (!(*bucket) || !daddr)
1527 hval = rt6_exception_hash(daddr, saddr);
1530 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1531 struct rt6_info *rt6 = rt6_ex->rt6i;
1532 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1534 #ifdef CONFIG_IPV6_SUBTREES
1535 if (matched && saddr)
1536 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1544 /* Helper function to find the cached rt in the hash table
1545 * and update bucket pointer to point to the bucket for this
1546 * (daddr, saddr) pair
1547 * Caller must hold rcu_read_lock()
1549 static struct rt6_exception *
1550 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1551 const struct in6_addr *daddr,
1552 const struct in6_addr *saddr)
1554 struct rt6_exception *rt6_ex;
1557 WARN_ON_ONCE(!rcu_read_lock_held());
1559 if (!(*bucket) || !daddr)
1562 hval = rt6_exception_hash(daddr, saddr);
1565 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1566 struct rt6_info *rt6 = rt6_ex->rt6i;
1567 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1569 #ifdef CONFIG_IPV6_SUBTREES
1570 if (matched && saddr)
1571 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1579 static unsigned int fib6_mtu(const struct fib6_result *res)
1581 const struct fib6_nh *nh = res->nh;
1584 if (res->f6i->fib6_pmtu) {
1585 mtu = res->f6i->fib6_pmtu;
1587 struct net_device *dev = nh->fib_nh_dev;
1588 struct inet6_dev *idev;
1591 idev = __in6_dev_get(dev);
1592 mtu = idev->cnf.mtu6;
1596 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1598 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1601 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1603 /* used when the flushed bit is not relevant, only access to the bucket
1604 * (ie., all bucket users except rt6_insert_exception);
1606 * called under rcu lock; sometimes called with rt6_exception_lock held
1609 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1612 struct rt6_exception_bucket *bucket;
1615 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1616 lockdep_is_held(lock));
1618 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1620 /* remove bucket flushed bit if set */
1622 unsigned long p = (unsigned long)bucket;
1624 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1625 bucket = (struct rt6_exception_bucket *)p;
1631 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1633 unsigned long p = (unsigned long)bucket;
1635 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1638 /* called with rt6_exception_lock held */
1639 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1642 struct rt6_exception_bucket *bucket;
1645 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1646 lockdep_is_held(lock));
1648 p = (unsigned long)bucket;
1649 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1650 bucket = (struct rt6_exception_bucket *)p;
1651 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1654 static int rt6_insert_exception(struct rt6_info *nrt,
1655 const struct fib6_result *res)
1657 struct net *net = dev_net(nrt->dst.dev);
1658 struct rt6_exception_bucket *bucket;
1659 struct fib6_info *f6i = res->f6i;
1660 struct in6_addr *src_key = NULL;
1661 struct rt6_exception *rt6_ex;
1662 struct fib6_nh *nh = res->nh;
1666 spin_lock_bh(&rt6_exception_lock);
1668 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1669 lockdep_is_held(&rt6_exception_lock));
1671 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1677 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1678 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1683 #ifdef CONFIG_IPV6_SUBTREES
1684 /* fib6_src.plen != 0 indicates f6i is in subtree
1685 * and exception table is indexed by a hash of
1686 * both fib6_dst and fib6_src.
1687 * Otherwise, the exception table is indexed by
1688 * a hash of only fib6_dst.
1690 if (f6i->fib6_src.plen)
1691 src_key = &nrt->rt6i_src.addr;
1693 /* rt6_mtu_change() might lower mtu on f6i.
1694 * Only insert this exception route if its mtu
1695 * is less than f6i's mtu value.
1697 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1702 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1705 rt6_remove_exception(bucket, rt6_ex);
1707 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1713 rt6_ex->stamp = jiffies;
1714 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1716 net->ipv6.rt6_stats->fib_rt_cache++;
1718 /* Randomize max depth to avoid some side channels attacks. */
1719 max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
1720 while (bucket->depth > max_depth)
1721 rt6_exception_remove_oldest(bucket);
1724 spin_unlock_bh(&rt6_exception_lock);
1726 /* Update fn->fn_sernum to invalidate all cached dst */
1728 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1729 fib6_update_sernum(net, f6i);
1730 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1731 fib6_force_start_gc(net);
1737 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1739 struct rt6_exception_bucket *bucket;
1740 struct rt6_exception *rt6_ex;
1741 struct hlist_node *tmp;
1744 spin_lock_bh(&rt6_exception_lock);
1746 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1750 /* Prevent rt6_insert_exception() to recreate the bucket list */
1752 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1754 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1755 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1757 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1758 rt6_remove_exception(bucket, rt6_ex);
1760 WARN_ON_ONCE(!from && bucket->depth);
1764 spin_unlock_bh(&rt6_exception_lock);
1767 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1769 struct fib6_info *f6i = arg;
1771 fib6_nh_flush_exceptions(nh, f6i);
1776 void rt6_flush_exceptions(struct fib6_info *f6i)
1779 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1782 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1785 /* Find cached rt in the hash table inside passed in rt
1786 * Caller has to hold rcu_read_lock()
1788 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1789 const struct in6_addr *daddr,
1790 const struct in6_addr *saddr)
1792 const struct in6_addr *src_key = NULL;
1793 struct rt6_exception_bucket *bucket;
1794 struct rt6_exception *rt6_ex;
1795 struct rt6_info *ret = NULL;
1797 #ifdef CONFIG_IPV6_SUBTREES
1798 /* fib6i_src.plen != 0 indicates f6i is in subtree
1799 * and exception table is indexed by a hash of
1800 * both fib6_dst and fib6_src.
1801 * However, the src addr used to create the hash
1802 * might not be exactly the passed in saddr which
1803 * is a /128 addr from the flow.
1804 * So we need to use f6i->fib6_src to redo lookup
1805 * if the passed in saddr does not find anything.
1806 * (See the logic in ip6_rt_cache_alloc() on how
1807 * rt->rt6i_src is updated.)
1809 if (res->f6i->fib6_src.plen)
1813 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1814 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1816 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1819 #ifdef CONFIG_IPV6_SUBTREES
1820 /* Use fib6_src as src_key and redo lookup */
1821 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1822 src_key = &res->f6i->fib6_src.addr;
1830 /* Remove the passed in cached rt from the hash table that contains it */
1831 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1832 const struct rt6_info *rt)
1834 const struct in6_addr *src_key = NULL;
1835 struct rt6_exception_bucket *bucket;
1836 struct rt6_exception *rt6_ex;
1839 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1842 spin_lock_bh(&rt6_exception_lock);
1843 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1845 #ifdef CONFIG_IPV6_SUBTREES
1846 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1847 * and exception table is indexed by a hash of
1848 * both rt6i_dst and rt6i_src.
1849 * Otherwise, the exception table is indexed by
1850 * a hash of only rt6i_dst.
1853 src_key = &rt->rt6i_src.addr;
1855 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1859 rt6_remove_exception(bucket, rt6_ex);
1865 spin_unlock_bh(&rt6_exception_lock);
1869 struct fib6_nh_excptn_arg {
1870 struct rt6_info *rt;
1874 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1876 struct fib6_nh_excptn_arg *arg = _arg;
1879 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1886 static int rt6_remove_exception_rt(struct rt6_info *rt)
1888 struct fib6_info *from;
1890 from = rcu_dereference(rt->from);
1891 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1895 struct fib6_nh_excptn_arg arg = {
1897 .plen = from->fib6_src.plen
1901 /* rc = 1 means an entry was found */
1902 rc = nexthop_for_each_fib6_nh(from->nh,
1903 rt6_nh_remove_exception_rt,
1905 return rc ? 0 : -ENOENT;
1908 return fib6_nh_remove_exception(from->fib6_nh,
1909 from->fib6_src.plen, rt);
1912 /* Find rt6_ex which contains the passed in rt cache and
1915 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1916 const struct rt6_info *rt)
1918 const struct in6_addr *src_key = NULL;
1919 struct rt6_exception_bucket *bucket;
1920 struct rt6_exception *rt6_ex;
1922 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1923 #ifdef CONFIG_IPV6_SUBTREES
1924 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1925 * and exception table is indexed by a hash of
1926 * both rt6i_dst and rt6i_src.
1927 * Otherwise, the exception table is indexed by
1928 * a hash of only rt6i_dst.
1931 src_key = &rt->rt6i_src.addr;
1933 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1935 rt6_ex->stamp = jiffies;
1938 struct fib6_nh_match_arg {
1939 const struct net_device *dev;
1940 const struct in6_addr *gw;
1941 struct fib6_nh *match;
1944 /* determine if fib6_nh has given device and gateway */
1945 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1947 struct fib6_nh_match_arg *arg = _arg;
1949 if (arg->dev != nh->fib_nh_dev ||
1950 (arg->gw && !nh->fib_nh_gw_family) ||
1951 (!arg->gw && nh->fib_nh_gw_family) ||
1952 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1957 /* found a match, break the loop */
1961 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1963 struct fib6_info *from;
1964 struct fib6_nh *fib6_nh;
1968 from = rcu_dereference(rt->from);
1969 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1973 struct fib6_nh_match_arg arg = {
1975 .gw = &rt->rt6i_gateway,
1978 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1982 fib6_nh = arg.match;
1984 fib6_nh = from->fib6_nh;
1986 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1991 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1992 struct rt6_info *rt, int mtu)
1994 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1995 * lowest MTU in the path: always allow updating the route PMTU to
1996 * reflect PMTU decreases.
1998 * If the new MTU is higher, and the route PMTU is equal to the local
1999 * MTU, this means the old MTU is the lowest in the path, so allow
2000 * updating it: if other nodes now have lower MTUs, PMTU discovery will
2004 if (dst_mtu(&rt->dst) >= mtu)
2007 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
2013 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2014 const struct fib6_nh *nh, int mtu)
2016 struct rt6_exception_bucket *bucket;
2017 struct rt6_exception *rt6_ex;
2020 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2024 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2025 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2026 struct rt6_info *entry = rt6_ex->rt6i;
2028 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2029 * route), the metrics of its rt->from have already
2032 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2033 rt6_mtu_change_route_allowed(idev, entry, mtu))
2034 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2040 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2042 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2043 const struct in6_addr *gateway)
2045 struct rt6_exception_bucket *bucket;
2046 struct rt6_exception *rt6_ex;
2047 struct hlist_node *tmp;
2050 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2053 spin_lock_bh(&rt6_exception_lock);
2054 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2056 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2057 hlist_for_each_entry_safe(rt6_ex, tmp,
2058 &bucket->chain, hlist) {
2059 struct rt6_info *entry = rt6_ex->rt6i;
2061 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2062 RTF_CACHE_GATEWAY &&
2063 ipv6_addr_equal(gateway,
2064 &entry->rt6i_gateway)) {
2065 rt6_remove_exception(bucket, rt6_ex);
2072 spin_unlock_bh(&rt6_exception_lock);
2075 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2076 struct rt6_exception *rt6_ex,
2077 struct fib6_gc_args *gc_args,
2080 struct rt6_info *rt = rt6_ex->rt6i;
2082 /* we are pruning and obsoleting aged-out and non gateway exceptions
2083 * even if others have still references to them, so that on next
2084 * dst_check() such references can be dropped.
2085 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2086 * expired, independently from their aging, as per RFC 8201 section 4
2088 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2089 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2090 RT6_TRACE("aging clone %p\n", rt);
2091 rt6_remove_exception(bucket, rt6_ex);
2094 } else if (time_after(jiffies, rt->dst.expires)) {
2095 RT6_TRACE("purging expired route %p\n", rt);
2096 rt6_remove_exception(bucket, rt6_ex);
2100 if (rt->rt6i_flags & RTF_GATEWAY) {
2101 struct neighbour *neigh;
2103 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2105 if (!(neigh && (neigh->flags & NTF_ROUTER))) {
2106 RT6_TRACE("purging route %p via non-router but gateway\n",
2108 rt6_remove_exception(bucket, rt6_ex);
2116 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2117 struct fib6_gc_args *gc_args,
2120 struct rt6_exception_bucket *bucket;
2121 struct rt6_exception *rt6_ex;
2122 struct hlist_node *tmp;
2125 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2129 spin_lock(&rt6_exception_lock);
2130 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2132 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2133 hlist_for_each_entry_safe(rt6_ex, tmp,
2134 &bucket->chain, hlist) {
2135 rt6_age_examine_exception(bucket, rt6_ex,
2141 spin_unlock(&rt6_exception_lock);
2142 rcu_read_unlock_bh();
2145 struct fib6_nh_age_excptn_arg {
2146 struct fib6_gc_args *gc_args;
2150 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2152 struct fib6_nh_age_excptn_arg *arg = _arg;
2154 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2158 void rt6_age_exceptions(struct fib6_info *f6i,
2159 struct fib6_gc_args *gc_args,
2163 struct fib6_nh_age_excptn_arg arg = {
2168 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2171 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2175 /* must be called with rcu lock held */
2176 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2177 struct flowi6 *fl6, struct fib6_result *res, int strict)
2179 struct fib6_node *fn, *saved_fn;
2181 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2185 rt6_select(net, fn, oif, res, strict);
2186 if (res->f6i == net->ipv6.fib6_null_entry) {
2187 fn = fib6_backtrack(fn, &fl6->saddr);
2189 goto redo_rt6_select;
2190 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2191 /* also consider unreachable route */
2192 strict &= ~RT6_LOOKUP_F_REACHABLE;
2194 goto redo_rt6_select;
2198 trace_fib6_table_lookup(net, res, table, fl6);
2203 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2204 int oif, struct flowi6 *fl6,
2205 const struct sk_buff *skb, int flags)
2207 struct fib6_result res = {};
2208 struct rt6_info *rt = NULL;
2211 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2212 !rcu_read_lock_held());
2214 strict |= flags & RT6_LOOKUP_F_IFACE;
2215 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2216 if (net->ipv6.devconf_all->forwarding == 0)
2217 strict |= RT6_LOOKUP_F_REACHABLE;
2221 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2222 if (res.f6i == net->ipv6.fib6_null_entry)
2225 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2227 /*Search through exception table */
2228 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2231 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2232 !res.nh->fib_nh_gw_family)) {
2233 /* Create a RTF_CACHE clone which will not be
2234 * owned by the fib6 tree. It is for the special case where
2235 * the daddr in the skb during the neighbor look-up is different
2236 * from the fl6->daddr used to look-up route here.
2238 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2241 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2242 * As rt6_uncached_list_add() does not consume refcnt,
2243 * this refcnt is always returned to the caller even
2244 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2246 rt6_uncached_list_add(rt);
2252 /* Get a percpu copy */
2254 rt = rt6_get_pcpu_route(&res);
2257 rt = rt6_make_pcpu_route(net, &res);
2263 rt = net->ipv6.ip6_null_entry;
2264 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2265 ip6_hold_safe(net, &rt);
2270 EXPORT_SYMBOL_GPL(ip6_pol_route);
2272 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
2273 struct fib6_table *table,
2275 const struct sk_buff *skb,
2278 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2281 struct dst_entry *ip6_route_input_lookup(struct net *net,
2282 struct net_device *dev,
2284 const struct sk_buff *skb,
2287 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2288 flags |= RT6_LOOKUP_F_IFACE;
2290 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2292 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2294 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2295 struct flow_keys *keys,
2296 struct flow_keys *flkeys)
2298 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2299 const struct ipv6hdr *key_iph = outer_iph;
2300 struct flow_keys *_flkeys = flkeys;
2301 const struct ipv6hdr *inner_iph;
2302 const struct icmp6hdr *icmph;
2303 struct ipv6hdr _inner_iph;
2304 struct icmp6hdr _icmph;
2306 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2309 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2310 sizeof(_icmph), &_icmph);
2314 if (!icmpv6_is_err(icmph->icmp6_type))
2317 inner_iph = skb_header_pointer(skb,
2318 skb_transport_offset(skb) + sizeof(*icmph),
2319 sizeof(_inner_iph), &_inner_iph);
2323 key_iph = inner_iph;
2327 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2328 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2329 keys->tags.flow_label = _flkeys->tags.flow_label;
2330 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2332 keys->addrs.v6addrs.src = key_iph->saddr;
2333 keys->addrs.v6addrs.dst = key_iph->daddr;
2334 keys->tags.flow_label = ip6_flowlabel(key_iph);
2335 keys->basic.ip_proto = key_iph->nexthdr;
2339 static u32 rt6_multipath_custom_hash_outer(const struct net *net,
2340 const struct sk_buff *skb,
2343 u32 hash_fields = ip6_multipath_hash_fields(net);
2344 struct flow_keys keys, hash_keys;
2346 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2349 memset(&hash_keys, 0, sizeof(hash_keys));
2350 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
2352 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2353 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2354 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2355 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2356 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2357 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2358 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2359 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2360 hash_keys.tags.flow_label = keys.tags.flow_label;
2361 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2362 hash_keys.ports.src = keys.ports.src;
2363 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2364 hash_keys.ports.dst = keys.ports.dst;
2366 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
2367 return flow_hash_from_keys(&hash_keys);
2370 static u32 rt6_multipath_custom_hash_inner(const struct net *net,
2371 const struct sk_buff *skb,
2374 u32 hash_fields = ip6_multipath_hash_fields(net);
2375 struct flow_keys keys, hash_keys;
2377 /* We assume the packet carries an encapsulation, but if none was
2378 * encountered during dissection of the outer flow, then there is no
2379 * point in calling the flow dissector again.
2384 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
2387 memset(&hash_keys, 0, sizeof(hash_keys));
2388 skb_flow_dissect_flow_keys(skb, &keys, 0);
2390 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
2393 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2394 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2395 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2396 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2397 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2398 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2399 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2400 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2401 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2402 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2403 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2404 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2405 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
2406 hash_keys.tags.flow_label = keys.tags.flow_label;
2409 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
2410 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2411 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
2412 hash_keys.ports.src = keys.ports.src;
2413 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
2414 hash_keys.ports.dst = keys.ports.dst;
2416 return flow_hash_from_keys(&hash_keys);
2419 static u32 rt6_multipath_custom_hash_skb(const struct net *net,
2420 const struct sk_buff *skb)
2422 u32 mhash, mhash_inner;
2423 bool has_inner = true;
2425 mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
2426 mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);
2428 return jhash_2words(mhash, mhash_inner, 0);
2431 static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
2432 const struct flowi6 *fl6)
2434 u32 hash_fields = ip6_multipath_hash_fields(net);
2435 struct flow_keys hash_keys;
2437 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2440 memset(&hash_keys, 0, sizeof(hash_keys));
2441 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2442 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2443 hash_keys.addrs.v6addrs.src = fl6->saddr;
2444 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2445 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2446 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2447 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2448 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2449 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2450 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2451 hash_keys.ports.src = fl6->fl6_sport;
2452 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2453 hash_keys.ports.dst = fl6->fl6_dport;
2455 return flow_hash_from_keys(&hash_keys);
2458 /* if skb is set it will be used and fl6 can be NULL */
2459 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2460 const struct sk_buff *skb, struct flow_keys *flkeys)
2462 struct flow_keys hash_keys;
2465 switch (ip6_multipath_hash_policy(net)) {
2467 memset(&hash_keys, 0, sizeof(hash_keys));
2468 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2470 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2472 hash_keys.addrs.v6addrs.src = fl6->saddr;
2473 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2474 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2475 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2477 mhash = flow_hash_from_keys(&hash_keys);
2481 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2482 struct flow_keys keys;
2484 /* short-circuit if we already have L4 hash present */
2486 return skb_get_hash_raw(skb) >> 1;
2488 memset(&hash_keys, 0, sizeof(hash_keys));
2491 skb_flow_dissect_flow_keys(skb, &keys, flag);
2494 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2495 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2496 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2497 hash_keys.ports.src = flkeys->ports.src;
2498 hash_keys.ports.dst = flkeys->ports.dst;
2499 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2501 memset(&hash_keys, 0, sizeof(hash_keys));
2502 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2503 hash_keys.addrs.v6addrs.src = fl6->saddr;
2504 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2505 hash_keys.ports.src = fl6->fl6_sport;
2506 hash_keys.ports.dst = fl6->fl6_dport;
2507 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2509 mhash = flow_hash_from_keys(&hash_keys);
2512 memset(&hash_keys, 0, sizeof(hash_keys));
2513 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2515 struct flow_keys keys;
2518 skb_flow_dissect_flow_keys(skb, &keys, 0);
2522 /* Inner can be v4 or v6 */
2523 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2524 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2525 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2526 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2527 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2528 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2529 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2530 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2531 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2532 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2534 /* Same as case 0 */
2535 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2536 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2539 /* Same as case 0 */
2540 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2541 hash_keys.addrs.v6addrs.src = fl6->saddr;
2542 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2543 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2544 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2546 mhash = flow_hash_from_keys(&hash_keys);
2550 mhash = rt6_multipath_custom_hash_skb(net, skb);
2552 mhash = rt6_multipath_custom_hash_fl6(net, fl6);
2559 /* Called with rcu held */
2560 void ip6_route_input(struct sk_buff *skb)
2562 const struct ipv6hdr *iph = ipv6_hdr(skb);
2563 struct net *net = dev_net(skb->dev);
2564 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2565 struct ip_tunnel_info *tun_info;
2566 struct flowi6 fl6 = {
2567 .flowi6_iif = skb->dev->ifindex,
2568 .daddr = iph->daddr,
2569 .saddr = iph->saddr,
2570 .flowlabel = ip6_flowinfo(iph),
2571 .flowi6_mark = skb->mark,
2572 .flowi6_proto = iph->nexthdr,
2574 struct flow_keys *flkeys = NULL, _flkeys;
2576 tun_info = skb_tunnel_info(skb);
2577 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2578 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2580 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2583 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2584 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2586 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2590 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
2591 struct fib6_table *table,
2593 const struct sk_buff *skb,
2596 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2599 struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2600 const struct sock *sk,
2601 struct flowi6 *fl6, int flags)
2605 if (ipv6_addr_type(&fl6->daddr) &
2606 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2607 struct dst_entry *dst;
2609 /* This function does not take refcnt on the dst */
2610 dst = l3mdev_link_scope_lookup(net, fl6);
2615 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2617 flags |= RT6_LOOKUP_F_DST_NOREF;
2618 any_src = ipv6_addr_any(&fl6->saddr);
2619 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2620 (fl6->flowi6_oif && any_src))
2621 flags |= RT6_LOOKUP_F_IFACE;
2624 flags |= RT6_LOOKUP_F_HAS_SADDR;
2626 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2628 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2630 EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2632 struct dst_entry *ip6_route_output_flags(struct net *net,
2633 const struct sock *sk,
2637 struct dst_entry *dst;
2638 struct rt6_info *rt6;
2641 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2642 rt6 = (struct rt6_info *)dst;
2643 /* For dst cached in uncached_list, refcnt is already taken. */
2644 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2645 dst = &net->ipv6.ip6_null_entry->dst;
2652 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2654 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2656 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2657 struct net_device *loopback_dev = net->loopback_dev;
2658 struct dst_entry *new = NULL;
2660 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2661 DST_OBSOLETE_DEAD, 0);
2664 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2668 new->input = dst_discard;
2669 new->output = dst_discard_out;
2671 dst_copy_metrics(new, &ort->dst);
2673 rt->rt6i_idev = in6_dev_get(loopback_dev);
2674 rt->rt6i_gateway = ort->rt6i_gateway;
2675 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2677 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2678 #ifdef CONFIG_IPV6_SUBTREES
2679 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2683 dst_release(dst_orig);
2684 return new ? new : ERR_PTR(-ENOMEM);
2688 * Destination cache support functions
2691 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2695 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2698 if (fib6_check_expired(f6i))
2704 static struct dst_entry *rt6_check(struct rt6_info *rt,
2705 struct fib6_info *from,
2710 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2711 rt_cookie != cookie)
2714 if (rt6_check_expired(rt))
2720 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2721 struct fib6_info *from,
2724 if (!__rt6_check_expired(rt) &&
2725 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2726 fib6_check(from, cookie))
2732 INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
2735 struct dst_entry *dst_ret;
2736 struct fib6_info *from;
2737 struct rt6_info *rt;
2739 rt = container_of(dst, struct rt6_info, dst);
2742 return rt6_is_valid(rt) ? dst : NULL;
2746 /* All IPV6 dsts are created with ->obsolete set to the value
2747 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2748 * into this function always.
2751 from = rcu_dereference(rt->from);
2753 if (from && (rt->rt6i_flags & RTF_PCPU ||
2754 unlikely(!list_empty(&rt->rt6i_uncached))))
2755 dst_ret = rt6_dst_from_check(rt, from, cookie);
2757 dst_ret = rt6_check(rt, from, cookie);
2763 EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
2765 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2767 struct rt6_info *rt = (struct rt6_info *) dst;
2770 if (rt->rt6i_flags & RTF_CACHE) {
2772 if (rt6_check_expired(rt)) {
2773 rt6_remove_exception_rt(rt);
2785 static void ip6_link_failure(struct sk_buff *skb)
2787 struct rt6_info *rt;
2789 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2791 rt = (struct rt6_info *) skb_dst(skb);
2794 if (rt->rt6i_flags & RTF_CACHE) {
2795 rt6_remove_exception_rt(rt);
2797 struct fib6_info *from;
2798 struct fib6_node *fn;
2800 from = rcu_dereference(rt->from);
2802 fn = rcu_dereference(from->fib6_node);
2803 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2804 WRITE_ONCE(fn->fn_sernum, -1);
2811 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2813 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2814 struct fib6_info *from;
2817 from = rcu_dereference(rt0->from);
2819 rt0->dst.expires = from->expires;
2823 dst_set_expires(&rt0->dst, timeout);
2824 rt0->rt6i_flags |= RTF_EXPIRES;
2827 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2829 struct net *net = dev_net(rt->dst.dev);
2831 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2832 rt->rt6i_flags |= RTF_MODIFIED;
2833 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2836 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2838 return !(rt->rt6i_flags & RTF_CACHE) &&
2839 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2842 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2843 const struct ipv6hdr *iph, u32 mtu,
2846 const struct in6_addr *daddr, *saddr;
2847 struct rt6_info *rt6 = (struct rt6_info *)dst;
2849 /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2850 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2851 * [see also comment in rt6_mtu_change_route()]
2855 daddr = &iph->daddr;
2856 saddr = &iph->saddr;
2858 daddr = &sk->sk_v6_daddr;
2859 saddr = &inet6_sk(sk)->saddr;
2866 dst_confirm_neigh(dst, daddr);
2868 if (mtu < IPV6_MIN_MTU)
2870 if (mtu >= dst_mtu(dst))
2873 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2874 rt6_do_update_pmtu(rt6, mtu);
2875 /* update rt6_ex->stamp for cache */
2876 if (rt6->rt6i_flags & RTF_CACHE)
2877 rt6_update_exception_stamp_rt(rt6);
2879 struct fib6_result res = {};
2880 struct rt6_info *nrt6;
2883 res.f6i = rcu_dereference(rt6->from);
2887 res.fib6_flags = res.f6i->fib6_flags;
2888 res.fib6_type = res.f6i->fib6_type;
2891 struct fib6_nh_match_arg arg = {
2893 .gw = &rt6->rt6i_gateway,
2896 nexthop_for_each_fib6_nh(res.f6i->nh,
2897 fib6_nh_find_match, &arg);
2899 /* fib6_info uses a nexthop that does not have fib6_nh
2900 * using the dst->dev + gw. Should be impossible.
2907 res.nh = res.f6i->fib6_nh;
2910 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2912 rt6_do_update_pmtu(nrt6, mtu);
2913 if (rt6_insert_exception(nrt6, &res))
2914 dst_release_immediate(&nrt6->dst);
2921 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2922 struct sk_buff *skb, u32 mtu,
2925 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2929 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2930 int oif, u32 mark, kuid_t uid)
2932 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2933 struct dst_entry *dst;
2934 struct flowi6 fl6 = {
2936 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2937 .daddr = iph->daddr,
2938 .saddr = iph->saddr,
2939 .flowlabel = ip6_flowinfo(iph),
2943 dst = ip6_route_output(net, NULL, &fl6);
2945 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
2948 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2950 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2952 int oif = sk->sk_bound_dev_if;
2953 struct dst_entry *dst;
2955 if (!oif && skb->dev)
2956 oif = l3mdev_master_ifindex(skb->dev);
2958 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
2961 dst = __sk_dst_get(sk);
2962 if (!dst || !dst->obsolete ||
2963 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2967 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2968 ip6_datagram_dst_update(sk, false);
2971 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2973 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2974 const struct flowi6 *fl6)
2976 #ifdef CONFIG_IPV6_SUBTREES
2977 struct ipv6_pinfo *np = inet6_sk(sk);
2980 ip6_dst_store(sk, dst,
2981 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2982 &sk->sk_v6_daddr : NULL,
2983 #ifdef CONFIG_IPV6_SUBTREES
2984 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2990 static bool ip6_redirect_nh_match(const struct fib6_result *res,
2992 const struct in6_addr *gw,
2993 struct rt6_info **ret)
2995 const struct fib6_nh *nh = res->nh;
2997 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2998 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
3001 /* rt_cache's gateway might be different from its 'parent'
3002 * in the case of an ip redirect.
3003 * So we keep searching in the exception table if the gateway
3006 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
3007 struct rt6_info *rt_cache;
3009 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
3011 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
3020 struct fib6_nh_rd_arg {
3021 struct fib6_result *res;
3023 const struct in6_addr *gw;
3024 struct rt6_info **ret;
3027 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
3029 struct fib6_nh_rd_arg *arg = _arg;
3032 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
3035 /* Handle redirects */
3036 struct ip6rd_flowi {
3038 struct in6_addr gateway;
3041 INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
3042 struct fib6_table *table,
3044 const struct sk_buff *skb,
3047 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
3048 struct rt6_info *ret = NULL;
3049 struct fib6_result res = {};
3050 struct fib6_nh_rd_arg arg = {
3053 .gw = &rdfl->gateway,
3056 struct fib6_info *rt;
3057 struct fib6_node *fn;
3059 /* Get the "current" route for this destination and
3060 * check if the redirect has come from appropriate router.
3062 * RFC 4861 specifies that redirects should only be
3063 * accepted if they come from the nexthop to the target.
3064 * Due to the way the routes are chosen, this notion
3065 * is a bit fuzzy and one might need to check all possible
3070 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
3072 for_each_fib6_node_rt_rcu(fn) {
3074 if (fib6_check_expired(rt))
3076 if (rt->fib6_flags & RTF_REJECT)
3078 if (unlikely(rt->nh)) {
3079 if (nexthop_is_blackhole(rt->nh))
3081 /* on match, res->nh is filled in and potentially ret */
3082 if (nexthop_for_each_fib6_nh(rt->nh,
3083 fib6_nh_redirect_match,
3087 res.nh = rt->fib6_nh;
3088 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
3095 rt = net->ipv6.fib6_null_entry;
3096 else if (rt->fib6_flags & RTF_REJECT) {
3097 ret = net->ipv6.ip6_null_entry;
3101 if (rt == net->ipv6.fib6_null_entry) {
3102 fn = fib6_backtrack(fn, &fl6->saddr);
3108 res.nh = rt->fib6_nh;
3111 ip6_hold_safe(net, &ret);
3113 res.fib6_flags = res.f6i->fib6_flags;
3114 res.fib6_type = res.f6i->fib6_type;
3115 ret = ip6_create_rt_rcu(&res);
3120 trace_fib6_table_lookup(net, &res, table, fl6);
3124 static struct dst_entry *ip6_route_redirect(struct net *net,
3125 const struct flowi6 *fl6,
3126 const struct sk_buff *skb,
3127 const struct in6_addr *gateway)
3129 int flags = RT6_LOOKUP_F_HAS_SADDR;
3130 struct ip6rd_flowi rdfl;
3133 rdfl.gateway = *gateway;
3135 return fib6_rule_lookup(net, &rdfl.fl6, skb,
3136 flags, __ip6_route_redirect);
3139 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3142 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3143 struct dst_entry *dst;
3144 struct flowi6 fl6 = {
3145 .flowi6_iif = LOOPBACK_IFINDEX,
3147 .flowi6_mark = mark,
3148 .daddr = iph->daddr,
3149 .saddr = iph->saddr,
3150 .flowlabel = ip6_flowinfo(iph),
3154 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3155 rt6_do_redirect(dst, NULL, skb);
3158 EXPORT_SYMBOL_GPL(ip6_redirect);
3160 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3162 const struct ipv6hdr *iph = ipv6_hdr(skb);
3163 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3164 struct dst_entry *dst;
3165 struct flowi6 fl6 = {
3166 .flowi6_iif = LOOPBACK_IFINDEX,
3169 .saddr = iph->daddr,
3170 .flowi6_uid = sock_net_uid(net, NULL),
3173 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3174 rt6_do_redirect(dst, NULL, skb);
3178 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3180 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
3181 READ_ONCE(sk->sk_mark), sk->sk_uid);
3183 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3185 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3187 struct net_device *dev = dst->dev;
3188 unsigned int mtu = dst_mtu(dst);
3189 struct net *net = dev_net(dev);
3191 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3193 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3194 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3197 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3198 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3199 * IPV6_MAXPLEN is also valid and means: "any MSS,
3200 * rely only on pmtu discovery"
3202 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3207 INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst)
3209 return ip6_dst_mtu_maybe_forward(dst, false);
3211 EXPORT_INDIRECT_CALLABLE(ip6_mtu);
3214 * 1. mtu on route is locked - use it
3215 * 2. mtu from nexthop exception
3216 * 3. mtu from egress device
3218 * based on ip6_dst_mtu_forward and exception logic of
3219 * rt6_find_cached_rt; called with rcu_read_lock
3221 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3222 const struct in6_addr *daddr,
3223 const struct in6_addr *saddr)
3225 const struct fib6_nh *nh = res->nh;
3226 struct fib6_info *f6i = res->f6i;
3227 struct inet6_dev *idev;
3228 struct rt6_info *rt;
3231 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3232 mtu = f6i->fib6_pmtu;
3237 rt = rt6_find_cached_rt(res, daddr, saddr);
3239 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3241 struct net_device *dev = nh->fib_nh_dev;
3244 idev = __in6_dev_get(dev);
3245 if (idev && idev->cnf.mtu6 > mtu)
3246 mtu = idev->cnf.mtu6;
3249 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3251 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3254 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3257 struct dst_entry *dst;
3258 struct rt6_info *rt;
3259 struct inet6_dev *idev = in6_dev_get(dev);
3260 struct net *net = dev_net(dev);
3262 if (unlikely(!idev))
3263 return ERR_PTR(-ENODEV);
3265 rt = ip6_dst_alloc(net, dev, 0);
3266 if (unlikely(!rt)) {
3268 dst = ERR_PTR(-ENOMEM);
3272 rt->dst.input = ip6_input;
3273 rt->dst.output = ip6_output;
3274 rt->rt6i_gateway = fl6->daddr;
3275 rt->rt6i_dst.addr = fl6->daddr;
3276 rt->rt6i_dst.plen = 128;
3277 rt->rt6i_idev = idev;
3278 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3280 /* Add this dst into uncached_list so that rt6_disable_ip() can
3281 * do proper release of the net_device
3283 rt6_uncached_list_add(rt);
3285 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3291 static void ip6_dst_gc(struct dst_ops *ops)
3293 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3294 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3295 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3296 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3297 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3301 entries = dst_entries_get_fast(ops);
3302 if (entries > ops->gc_thresh)
3303 entries = dst_entries_get_slow(ops);
3305 if (time_after(rt_last_gc + rt_min_interval, jiffies))
3308 fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
3309 entries = dst_entries_get_slow(ops);
3310 if (entries < ops->gc_thresh)
3311 atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
3313 val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
3314 atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
3317 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3318 const struct in6_addr *gw_addr, u32 tbid,
3319 int flags, struct fib6_result *res)
3321 struct flowi6 fl6 = {
3322 .flowi6_oif = cfg->fc_ifindex,
3324 .saddr = cfg->fc_prefsrc,
3326 struct fib6_table *table;
3329 table = fib6_get_table(net, tbid);
3333 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3334 flags |= RT6_LOOKUP_F_HAS_SADDR;
3336 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3338 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3339 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3340 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3341 cfg->fc_ifindex != 0, NULL, flags);
3346 static int ip6_route_check_nh_onlink(struct net *net,
3347 struct fib6_config *cfg,
3348 const struct net_device *dev,
3349 struct netlink_ext_ack *extack)
3351 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3352 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3353 struct fib6_result res = {};
3356 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3357 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3358 /* ignore match if it is the default route */
3359 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3360 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3361 NL_SET_ERR_MSG(extack,
3362 "Nexthop has invalid gateway or device mismatch");
3369 static int ip6_route_check_nh(struct net *net,
3370 struct fib6_config *cfg,
3371 struct net_device **_dev,
3372 struct inet6_dev **idev)
3374 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3375 struct net_device *dev = _dev ? *_dev : NULL;
3376 int flags = RT6_LOOKUP_F_IFACE;
3377 struct fib6_result res = {};
3378 int err = -EHOSTUNREACH;
3380 if (cfg->fc_table) {
3381 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3382 cfg->fc_table, flags, &res);
3383 /* gw_addr can not require a gateway or resolve to a reject
3384 * route. If a device is given, it must match the result.
3386 if (err || res.fib6_flags & RTF_REJECT ||
3387 res.nh->fib_nh_gw_family ||
3388 (dev && dev != res.nh->fib_nh_dev))
3389 err = -EHOSTUNREACH;
3393 struct flowi6 fl6 = {
3394 .flowi6_oif = cfg->fc_ifindex,
3398 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3399 if (err || res.fib6_flags & RTF_REJECT ||
3400 res.nh->fib_nh_gw_family)
3401 err = -EHOSTUNREACH;
3406 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3407 cfg->fc_ifindex != 0, NULL, flags);
3412 if (dev != res.nh->fib_nh_dev)
3413 err = -EHOSTUNREACH;
3415 *_dev = dev = res.nh->fib_nh_dev;
3417 *idev = in6_dev_get(dev);
3423 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3424 struct net_device **_dev, struct inet6_dev **idev,
3425 struct netlink_ext_ack *extack)
3427 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3428 int gwa_type = ipv6_addr_type(gw_addr);
3429 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3430 const struct net_device *dev = *_dev;
3431 bool need_addr_check = !dev;
3434 /* if gw_addr is local we will fail to detect this in case
3435 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3436 * will return already-added prefix route via interface that
3437 * prefix route was assigned to, which might be non-loopback.
3440 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3441 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3445 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3446 /* IPv6 strictly inhibits using not link-local
3447 * addresses as nexthop address.
3448 * Otherwise, router will not able to send redirects.
3449 * It is very good, but in some (rare!) circumstances
3450 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3451 * some exceptions. --ANK
3452 * We allow IPv4-mapped nexthops to support RFC4798-type
3455 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3456 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3462 if (cfg->fc_flags & RTNH_F_ONLINK)
3463 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3465 err = ip6_route_check_nh(net, cfg, _dev, idev);
3473 /* reload in case device was changed */
3478 NL_SET_ERR_MSG(extack, "Egress device not specified");
3480 } else if (dev->flags & IFF_LOOPBACK) {
3481 NL_SET_ERR_MSG(extack,
3482 "Egress device can not be loopback device for this route");
3486 /* if we did not check gw_addr above, do so now that the
3487 * egress device has been resolved.
3489 if (need_addr_check &&
3490 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3491 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3500 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3502 if ((flags & RTF_REJECT) ||
3503 (dev && (dev->flags & IFF_LOOPBACK) &&
3504 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3505 !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3511 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3512 struct fib6_config *cfg, gfp_t gfp_flags,
3513 struct netlink_ext_ack *extack)
3515 struct net_device *dev = NULL;
3516 struct inet6_dev *idev = NULL;
3520 fib6_nh->fib_nh_family = AF_INET6;
3521 #ifdef CONFIG_IPV6_ROUTER_PREF
3522 fib6_nh->last_probe = jiffies;
3524 if (cfg->fc_is_fdb) {
3525 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3526 fib6_nh->fib_nh_gw_family = AF_INET6;
3531 if (cfg->fc_ifindex) {
3532 dev = dev_get_by_index(net, cfg->fc_ifindex);
3535 idev = in6_dev_get(dev);
3540 if (cfg->fc_flags & RTNH_F_ONLINK) {
3542 NL_SET_ERR_MSG(extack,
3543 "Nexthop device required for onlink");
3547 if (!(dev->flags & IFF_UP)) {
3548 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3553 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3556 fib6_nh->fib_nh_weight = 1;
3558 /* We cannot add true routes via loopback here,
3559 * they would result in kernel looping; promote them to reject routes
3561 addr_type = ipv6_addr_type(&cfg->fc_dst);
3562 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3563 /* hold loopback dev/idev if we haven't done so. */
3564 if (dev != net->loopback_dev) {
3569 dev = net->loopback_dev;
3571 idev = in6_dev_get(dev);
3580 if (cfg->fc_flags & RTF_GATEWAY) {
3581 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3585 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3586 fib6_nh->fib_nh_gw_family = AF_INET6;
3593 if (idev->cnf.disable_ipv6) {
3594 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3599 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3600 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3605 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3606 !netif_carrier_ok(dev))
3607 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3609 err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3610 cfg->fc_encap_type, cfg, gfp_flags, extack);
3615 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3616 if (!fib6_nh->rt6i_pcpu) {
3621 fib6_nh->fib_nh_dev = dev;
3622 netdev_tracker_alloc(dev, &fib6_nh->fib_nh_dev_tracker, gfp_flags);
3624 fib6_nh->fib_nh_oif = dev->ifindex;
3631 lwtstate_put(fib6_nh->fib_nh_lws);
3632 fib6_nh->fib_nh_lws = NULL;
3639 void fib6_nh_release(struct fib6_nh *fib6_nh)
3641 struct rt6_exception_bucket *bucket;
3645 fib6_nh_flush_exceptions(fib6_nh, NULL);
3646 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3648 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3654 fib6_nh_release_dsts(fib6_nh);
3655 free_percpu(fib6_nh->rt6i_pcpu);
3657 fib_nh_common_release(&fib6_nh->nh_common);
3660 void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
3664 if (!fib6_nh->rt6i_pcpu)
3667 for_each_possible_cpu(cpu) {
3668 struct rt6_info *pcpu_rt, **ppcpu_rt;
3670 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3671 pcpu_rt = xchg(ppcpu_rt, NULL);
3673 dst_dev_put(&pcpu_rt->dst);
3674 dst_release(&pcpu_rt->dst);
3679 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3681 struct netlink_ext_ack *extack)
3683 struct net *net = cfg->fc_nlinfo.nl_net;
3684 struct fib6_info *rt = NULL;
3685 struct nexthop *nh = NULL;
3686 struct fib6_table *table;
3687 struct fib6_nh *fib6_nh;
3691 /* RTF_PCPU is an internal flag; can not be set by userspace */
3692 if (cfg->fc_flags & RTF_PCPU) {
3693 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3697 /* RTF_CACHE is an internal flag; can not be set by userspace */
3698 if (cfg->fc_flags & RTF_CACHE) {
3699 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3703 if (cfg->fc_type > RTN_MAX) {
3704 NL_SET_ERR_MSG(extack, "Invalid route type");
3708 if (cfg->fc_dst_len > 128) {
3709 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3712 if (cfg->fc_src_len > 128) {
3713 NL_SET_ERR_MSG(extack, "Invalid source address length");
3716 #ifndef CONFIG_IPV6_SUBTREES
3717 if (cfg->fc_src_len) {
3718 NL_SET_ERR_MSG(extack,
3719 "Specifying source address requires IPV6_SUBTREES to be enabled");
3723 if (cfg->fc_nh_id) {
3724 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3726 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3729 err = fib6_check_nexthop(nh, cfg, extack);
3735 if (cfg->fc_nlinfo.nlh &&
3736 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3737 table = fib6_get_table(net, cfg->fc_table);
3739 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3740 table = fib6_new_table(net, cfg->fc_table);
3743 table = fib6_new_table(net, cfg->fc_table);
3750 rt = fib6_info_alloc(gfp_flags, !nh);
3754 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3756 if (IS_ERR(rt->fib6_metrics)) {
3757 err = PTR_ERR(rt->fib6_metrics);
3758 /* Do not leave garbage there. */
3759 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3763 if (cfg->fc_flags & RTF_ADDRCONF)
3764 rt->dst_nocount = true;
3766 if (cfg->fc_flags & RTF_EXPIRES)
3767 fib6_set_expires(rt, jiffies +
3768 clock_t_to_jiffies(cfg->fc_expires));
3770 fib6_clean_expires(rt);
3772 if (cfg->fc_protocol == RTPROT_UNSPEC)
3773 cfg->fc_protocol = RTPROT_BOOT;
3774 rt->fib6_protocol = cfg->fc_protocol;
3776 rt->fib6_table = table;
3777 rt->fib6_metric = cfg->fc_metric;
3778 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3779 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3781 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3782 rt->fib6_dst.plen = cfg->fc_dst_len;
3784 #ifdef CONFIG_IPV6_SUBTREES
3785 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3786 rt->fib6_src.plen = cfg->fc_src_len;
3789 if (rt->fib6_src.plen) {
3790 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3793 if (!nexthop_get(nh)) {
3794 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3798 fib6_nh = nexthop_fib6_nh(rt->nh);
3800 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3804 fib6_nh = rt->fib6_nh;
3806 /* We cannot add true routes via loopback here, they would
3807 * result in kernel looping; promote them to reject routes
3809 addr_type = ipv6_addr_type(&cfg->fc_dst);
3810 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3812 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3815 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3816 struct net_device *dev = fib6_nh->fib_nh_dev;
3818 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3819 NL_SET_ERR_MSG(extack, "Invalid source address");
3823 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3824 rt->fib6_prefsrc.plen = 128;
3826 rt->fib6_prefsrc.plen = 0;
3830 fib6_info_release(rt);
3831 return ERR_PTR(err);
3833 ip_fib_metrics_put(rt->fib6_metrics);
3835 return ERR_PTR(err);
3838 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3839 struct netlink_ext_ack *extack)
3841 struct fib6_info *rt;
3844 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3848 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3849 fib6_info_release(rt);
3854 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3856 struct net *net = info->nl_net;
3857 struct fib6_table *table;
3860 if (rt == net->ipv6.fib6_null_entry) {
3865 table = rt->fib6_table;
3866 spin_lock_bh(&table->tb6_lock);
3867 err = fib6_del(rt, info);
3868 spin_unlock_bh(&table->tb6_lock);
3871 fib6_info_release(rt);
3875 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3877 struct nl_info info = {
3879 .skip_notify = skip_notify
3882 return __ip6_del_rt(rt, &info);
3885 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3887 struct nl_info *info = &cfg->fc_nlinfo;
3888 struct net *net = info->nl_net;
3889 struct sk_buff *skb = NULL;
3890 struct fib6_table *table;
3893 if (rt == net->ipv6.fib6_null_entry)
3895 table = rt->fib6_table;
3896 spin_lock_bh(&table->tb6_lock);
3898 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3899 struct fib6_info *sibling, *next_sibling;
3900 struct fib6_node *fn;
3902 /* prefer to send a single notification with all hops */
3903 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3905 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3907 if (rt6_fill_node(net, skb, rt, NULL,
3908 NULL, NULL, 0, RTM_DELROUTE,
3909 info->portid, seq, 0) < 0) {
3913 info->skip_notify = 1;
3916 /* 'rt' points to the first sibling route. If it is not the
3917 * leaf, then we do not need to send a notification. Otherwise,
3918 * we need to check if the last sibling has a next route or not
3919 * and emit a replace or delete notification, respectively.
3921 info->skip_notify_kernel = 1;
3922 fn = rcu_dereference_protected(rt->fib6_node,
3923 lockdep_is_held(&table->tb6_lock));
3924 if (rcu_access_pointer(fn->leaf) == rt) {
3925 struct fib6_info *last_sibling, *replace_rt;
3927 last_sibling = list_last_entry(&rt->fib6_siblings,
3930 replace_rt = rcu_dereference_protected(
3931 last_sibling->fib6_next,
3932 lockdep_is_held(&table->tb6_lock));
3934 call_fib6_entry_notifiers_replace(net,
3937 call_fib6_multipath_entry_notifiers(net,
3938 FIB_EVENT_ENTRY_DEL,
3939 rt, rt->fib6_nsiblings,
3942 list_for_each_entry_safe(sibling, next_sibling,
3945 err = fib6_del(sibling, info);
3951 err = fib6_del(rt, info);
3953 spin_unlock_bh(&table->tb6_lock);
3955 fib6_info_release(rt);
3958 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3959 info->nlh, gfp_any());
3964 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3968 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3971 if (cfg->fc_flags & RTF_GATEWAY &&
3972 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3975 rc = rt6_remove_exception_rt(rt);
3980 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3983 struct fib6_result res = {
3987 struct rt6_info *rt_cache;
3989 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3991 return __ip6_del_cached_rt(rt_cache, cfg);
3996 struct fib6_nh_del_cached_rt_arg {
3997 struct fib6_config *cfg;
3998 struct fib6_info *f6i;
4001 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
4003 struct fib6_nh_del_cached_rt_arg *arg = _arg;
4006 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
4007 return rc != -ESRCH ? rc : 0;
4010 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
4012 struct fib6_nh_del_cached_rt_arg arg = {
4017 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
4020 static int ip6_route_del(struct fib6_config *cfg,
4021 struct netlink_ext_ack *extack)
4023 struct fib6_table *table;
4024 struct fib6_info *rt;
4025 struct fib6_node *fn;
4028 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
4030 NL_SET_ERR_MSG(extack, "FIB table does not exist");
4036 fn = fib6_locate(&table->tb6_root,
4037 &cfg->fc_dst, cfg->fc_dst_len,
4038 &cfg->fc_src, cfg->fc_src_len,
4039 !(cfg->fc_flags & RTF_CACHE));
4042 for_each_fib6_node_rt_rcu(fn) {
4045 if (rt->nh && cfg->fc_nh_id &&
4046 rt->nh->id != cfg->fc_nh_id)
4049 if (cfg->fc_flags & RTF_CACHE) {
4053 rc = ip6_del_cached_rt_nh(cfg, rt);
4054 } else if (cfg->fc_nh_id) {
4058 rc = ip6_del_cached_rt(cfg, rt, nh);
4067 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
4069 if (cfg->fc_protocol &&
4070 cfg->fc_protocol != rt->fib6_protocol)
4074 if (!fib6_info_hold_safe(rt))
4078 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
4084 if (cfg->fc_ifindex &&
4086 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
4088 if (cfg->fc_flags & RTF_GATEWAY &&
4089 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
4091 if (!fib6_info_hold_safe(rt))
4095 /* if gateway was specified only delete the one hop */
4096 if (cfg->fc_flags & RTF_GATEWAY)
4097 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
4099 return __ip6_del_rt_siblings(rt, cfg);
4107 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4109 struct netevent_redirect netevent;
4110 struct rt6_info *rt, *nrt = NULL;
4111 struct fib6_result res = {};
4112 struct ndisc_options ndopts;
4113 struct inet6_dev *in6_dev;
4114 struct neighbour *neigh;
4116 int optlen, on_link;
4119 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4120 optlen -= sizeof(*msg);
4123 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4127 msg = (struct rd_msg *)icmp6_hdr(skb);
4129 if (ipv6_addr_is_multicast(&msg->dest)) {
4130 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4135 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4137 } else if (ipv6_addr_type(&msg->target) !=
4138 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4139 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4143 in6_dev = __in6_dev_get(skb->dev);
4146 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
4150 * The IP source address of the Redirect MUST be the same as the current
4151 * first-hop router for the specified ICMP Destination Address.
4154 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4155 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4160 if (ndopts.nd_opts_tgt_lladdr) {
4161 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4164 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4169 rt = (struct rt6_info *) dst;
4170 if (rt->rt6i_flags & RTF_REJECT) {
4171 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4175 /* Redirect received -> path was valid.
4176 * Look, redirects are sent only in response to data packets,
4177 * so that this nexthop apparently is reachable. --ANK
4179 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4181 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4186 * We have finally decided to accept it.
4189 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4190 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4191 NEIGH_UPDATE_F_OVERRIDE|
4192 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4193 NEIGH_UPDATE_F_ISROUTER)),
4194 NDISC_REDIRECT, &ndopts);
4197 res.f6i = rcu_dereference(rt->from);
4202 struct fib6_nh_match_arg arg = {
4204 .gw = &rt->rt6i_gateway,
4207 nexthop_for_each_fib6_nh(res.f6i->nh,
4208 fib6_nh_find_match, &arg);
4210 /* fib6_info uses a nexthop that does not have fib6_nh
4211 * using the dst->dev. Should be impossible
4217 res.nh = res.f6i->fib6_nh;
4220 res.fib6_flags = res.f6i->fib6_flags;
4221 res.fib6_type = res.f6i->fib6_type;
4222 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4226 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4228 nrt->rt6i_flags &= ~RTF_GATEWAY;
4230 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4232 /* rt6_insert_exception() will take care of duplicated exceptions */
4233 if (rt6_insert_exception(nrt, &res)) {
4234 dst_release_immediate(&nrt->dst);
4238 netevent.old = &rt->dst;
4239 netevent.new = &nrt->dst;
4240 netevent.daddr = &msg->dest;
4241 netevent.neigh = neigh;
4242 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4246 neigh_release(neigh);
4249 #ifdef CONFIG_IPV6_ROUTE_INFO
4250 static struct fib6_info *rt6_get_route_info(struct net *net,
4251 const struct in6_addr *prefix, int prefixlen,
4252 const struct in6_addr *gwaddr,
4253 struct net_device *dev)
4255 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4256 int ifindex = dev->ifindex;
4257 struct fib6_node *fn;
4258 struct fib6_info *rt = NULL;
4259 struct fib6_table *table;
4261 table = fib6_get_table(net, tb_id);
4266 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4270 for_each_fib6_node_rt_rcu(fn) {
4271 /* these routes do not use nexthops */
4274 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4276 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4277 !rt->fib6_nh->fib_nh_gw_family)
4279 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4281 if (!fib6_info_hold_safe(rt))
4290 static struct fib6_info *rt6_add_route_info(struct net *net,
4291 const struct in6_addr *prefix, int prefixlen,
4292 const struct in6_addr *gwaddr,
4293 struct net_device *dev,
4296 struct fib6_config cfg = {
4297 .fc_metric = IP6_RT_PRIO_USER,
4298 .fc_ifindex = dev->ifindex,
4299 .fc_dst_len = prefixlen,
4300 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4301 RTF_UP | RTF_PREF(pref),
4302 .fc_protocol = RTPROT_RA,
4303 .fc_type = RTN_UNICAST,
4304 .fc_nlinfo.portid = 0,
4305 .fc_nlinfo.nlh = NULL,
4306 .fc_nlinfo.nl_net = net,
4309 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4310 cfg.fc_dst = *prefix;
4311 cfg.fc_gateway = *gwaddr;
4313 /* We should treat it as a default route if prefix length is 0. */
4315 cfg.fc_flags |= RTF_DEFAULT;
4317 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4319 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4323 struct fib6_info *rt6_get_dflt_router(struct net *net,
4324 const struct in6_addr *addr,
4325 struct net_device *dev)
4327 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4328 struct fib6_info *rt;
4329 struct fib6_table *table;
4331 table = fib6_get_table(net, tb_id);
4336 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4339 /* RA routes do not use nexthops */
4344 if (dev == nh->fib_nh_dev &&
4345 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4346 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4349 if (rt && !fib6_info_hold_safe(rt))
4355 struct fib6_info *rt6_add_dflt_router(struct net *net,
4356 const struct in6_addr *gwaddr,
4357 struct net_device *dev,
4359 u32 defrtr_usr_metric)
4361 struct fib6_config cfg = {
4362 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4363 .fc_metric = defrtr_usr_metric,
4364 .fc_ifindex = dev->ifindex,
4365 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4366 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4367 .fc_protocol = RTPROT_RA,
4368 .fc_type = RTN_UNICAST,
4369 .fc_nlinfo.portid = 0,
4370 .fc_nlinfo.nlh = NULL,
4371 .fc_nlinfo.nl_net = net,
4374 cfg.fc_gateway = *gwaddr;
4376 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4377 struct fib6_table *table;
4379 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4381 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4384 return rt6_get_dflt_router(net, gwaddr, dev);
4387 static void __rt6_purge_dflt_routers(struct net *net,
4388 struct fib6_table *table)
4390 struct fib6_info *rt;
4394 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4395 struct net_device *dev = fib6_info_nh_dev(rt);
4396 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4398 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4399 (!idev || idev->cnf.accept_ra != 2) &&
4400 fib6_info_hold_safe(rt)) {
4402 ip6_del_rt(net, rt, false);
4408 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4411 void rt6_purge_dflt_routers(struct net *net)
4413 struct fib6_table *table;
4414 struct hlist_head *head;
4419 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4420 head = &net->ipv6.fib_table_hash[h];
4421 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4422 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4423 __rt6_purge_dflt_routers(net, table);
4430 static void rtmsg_to_fib6_config(struct net *net,
4431 struct in6_rtmsg *rtmsg,
4432 struct fib6_config *cfg)
4434 *cfg = (struct fib6_config){
4435 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4437 .fc_ifindex = rtmsg->rtmsg_ifindex,
4438 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
4439 .fc_expires = rtmsg->rtmsg_info,
4440 .fc_dst_len = rtmsg->rtmsg_dst_len,
4441 .fc_src_len = rtmsg->rtmsg_src_len,
4442 .fc_flags = rtmsg->rtmsg_flags,
4443 .fc_type = rtmsg->rtmsg_type,
4445 .fc_nlinfo.nl_net = net,
4447 .fc_dst = rtmsg->rtmsg_dst,
4448 .fc_src = rtmsg->rtmsg_src,
4449 .fc_gateway = rtmsg->rtmsg_gateway,
4453 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4455 struct fib6_config cfg;
4458 if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4460 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4463 rtmsg_to_fib6_config(net, rtmsg, &cfg);
4468 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4471 err = ip6_route_del(&cfg, NULL);
4479 * Drop the packet on the floor
4482 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4484 struct dst_entry *dst = skb_dst(skb);
4485 struct net *net = dev_net(dst->dev);
4486 struct inet6_dev *idev;
4490 if (netif_is_l3_master(skb->dev) ||
4491 dst->dev == net->loopback_dev)
4492 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4494 idev = ip6_dst_idev(dst);
4496 switch (ipstats_mib_noroutes) {
4497 case IPSTATS_MIB_INNOROUTES:
4498 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4499 if (type == IPV6_ADDR_ANY) {
4500 SKB_DR_SET(reason, IP_INADDRERRORS);
4501 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4504 SKB_DR_SET(reason, IP_INNOROUTES);
4506 case IPSTATS_MIB_OUTNOROUTES:
4507 SKB_DR_OR(reason, IP_OUTNOROUTES);
4508 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4512 /* Start over by dropping the dst for l3mdev case */
4513 if (netif_is_l3_master(skb->dev))
4516 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4517 kfree_skb_reason(skb, reason);
4521 static int ip6_pkt_discard(struct sk_buff *skb)
4523 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4526 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4528 skb->dev = skb_dst(skb)->dev;
4529 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4532 static int ip6_pkt_prohibit(struct sk_buff *skb)
4534 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4537 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4539 skb->dev = skb_dst(skb)->dev;
4540 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4544 * Allocate a dst for local (unicast / anycast) address.
4547 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4548 struct inet6_dev *idev,
4549 const struct in6_addr *addr,
4550 bool anycast, gfp_t gfp_flags)
4552 struct fib6_config cfg = {
4553 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4554 .fc_ifindex = idev->dev->ifindex,
4555 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4558 .fc_protocol = RTPROT_KERNEL,
4559 .fc_nlinfo.nl_net = net,
4560 .fc_ignore_dev_down = true,
4562 struct fib6_info *f6i;
4565 cfg.fc_type = RTN_ANYCAST;
4566 cfg.fc_flags |= RTF_ANYCAST;
4568 cfg.fc_type = RTN_LOCAL;
4569 cfg.fc_flags |= RTF_LOCAL;
4572 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
4574 f6i->dst_nocount = true;
4577 (net->ipv6.devconf_all->disable_policy ||
4578 idev->cnf.disable_policy))
4579 f6i->dst_nopolicy = true;
4585 /* remove deleted ip from prefsrc entries */
4586 struct arg_dev_net_ip {
4587 struct net_device *dev;
4589 struct in6_addr *addr;
4592 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4594 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4595 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4596 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4599 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
4600 rt != net->ipv6.fib6_null_entry &&
4601 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4602 spin_lock_bh(&rt6_exception_lock);
4603 /* remove prefsrc entry */
4604 rt->fib6_prefsrc.plen = 0;
4605 spin_unlock_bh(&rt6_exception_lock);
4610 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4612 struct net *net = dev_net(ifp->idev->dev);
4613 struct arg_dev_net_ip adni = {
4614 .dev = ifp->idev->dev,
4618 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4621 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4623 /* Remove routers and update dst entries when gateway turn into host. */
4624 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4626 struct in6_addr *gateway = (struct in6_addr *)arg;
4629 /* RA routes do not use nexthops */
4634 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4635 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4638 /* Further clean up cached routes in exception table.
4639 * This is needed because cached route may have a different
4640 * gateway than its 'parent' in the case of an ip redirect.
4642 fib6_nh_exceptions_clean_tohost(nh, gateway);
4647 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4649 fib6_clean_all(net, fib6_clean_tohost, gateway);
4652 struct arg_netdev_event {
4653 const struct net_device *dev;
4655 unsigned char nh_flags;
4656 unsigned long event;
4660 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4662 struct fib6_info *iter;
4663 struct fib6_node *fn;
4665 fn = rcu_dereference_protected(rt->fib6_node,
4666 lockdep_is_held(&rt->fib6_table->tb6_lock));
4667 iter = rcu_dereference_protected(fn->leaf,
4668 lockdep_is_held(&rt->fib6_table->tb6_lock));
4670 if (iter->fib6_metric == rt->fib6_metric &&
4671 rt6_qualify_for_ecmp(iter))
4673 iter = rcu_dereference_protected(iter->fib6_next,
4674 lockdep_is_held(&rt->fib6_table->tb6_lock));
4680 /* only called for fib entries with builtin fib6_nh */
4681 static bool rt6_is_dead(const struct fib6_info *rt)
4683 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4684 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4685 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4691 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4693 struct fib6_info *iter;
4696 if (!rt6_is_dead(rt))
4697 total += rt->fib6_nh->fib_nh_weight;
4699 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4700 if (!rt6_is_dead(iter))
4701 total += iter->fib6_nh->fib_nh_weight;
4707 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4709 int upper_bound = -1;
4711 if (!rt6_is_dead(rt)) {
4712 *weight += rt->fib6_nh->fib_nh_weight;
4713 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4716 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4719 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4721 struct fib6_info *iter;
4724 rt6_upper_bound_set(rt, &weight, total);
4726 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4727 rt6_upper_bound_set(iter, &weight, total);
4730 void rt6_multipath_rebalance(struct fib6_info *rt)
4732 struct fib6_info *first;
4735 /* In case the entire multipath route was marked for flushing,
4736 * then there is no need to rebalance upon the removal of every
4739 if (!rt->fib6_nsiblings || rt->should_flush)
4742 /* During lookup routes are evaluated in order, so we need to
4743 * make sure upper bounds are assigned from the first sibling
4746 first = rt6_multipath_first_sibling(rt);
4747 if (WARN_ON_ONCE(!first))
4750 total = rt6_multipath_total_weight(first);
4751 rt6_multipath_upper_bound_set(first, total);
4754 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4756 const struct arg_netdev_event *arg = p_arg;
4757 struct net *net = dev_net(arg->dev);
4759 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4760 rt->fib6_nh->fib_nh_dev == arg->dev) {
4761 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4762 fib6_update_sernum_upto_root(net, rt);
4763 rt6_multipath_rebalance(rt);
4769 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4771 struct arg_netdev_event arg = {
4774 .nh_flags = nh_flags,
4778 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4779 arg.nh_flags |= RTNH_F_LINKDOWN;
4781 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4784 /* only called for fib entries with inline fib6_nh */
4785 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4786 const struct net_device *dev)
4788 struct fib6_info *iter;
4790 if (rt->fib6_nh->fib_nh_dev == dev)
4792 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4793 if (iter->fib6_nh->fib_nh_dev == dev)
4799 static void rt6_multipath_flush(struct fib6_info *rt)
4801 struct fib6_info *iter;
4803 rt->should_flush = 1;
4804 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4805 iter->should_flush = 1;
4808 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4809 const struct net_device *down_dev)
4811 struct fib6_info *iter;
4812 unsigned int dead = 0;
4814 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4815 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4817 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4818 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4819 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4825 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4826 const struct net_device *dev,
4827 unsigned char nh_flags)
4829 struct fib6_info *iter;
4831 if (rt->fib6_nh->fib_nh_dev == dev)
4832 rt->fib6_nh->fib_nh_flags |= nh_flags;
4833 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4834 if (iter->fib6_nh->fib_nh_dev == dev)
4835 iter->fib6_nh->fib_nh_flags |= nh_flags;
4838 /* called with write lock held for table with rt */
4839 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4841 const struct arg_netdev_event *arg = p_arg;
4842 const struct net_device *dev = arg->dev;
4843 struct net *net = dev_net(dev);
4845 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4848 switch (arg->event) {
4849 case NETDEV_UNREGISTER:
4850 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4852 if (rt->should_flush)
4854 if (!rt->fib6_nsiblings)
4855 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4856 if (rt6_multipath_uses_dev(rt, dev)) {
4859 count = rt6_multipath_dead_count(rt, dev);
4860 if (rt->fib6_nsiblings + 1 == count) {
4861 rt6_multipath_flush(rt);
4864 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4866 fib6_update_sernum(net, rt);
4867 rt6_multipath_rebalance(rt);
4871 if (rt->fib6_nh->fib_nh_dev != dev ||
4872 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4874 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4875 rt6_multipath_rebalance(rt);
4882 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4884 struct arg_netdev_event arg = {
4890 struct net *net = dev_net(dev);
4892 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4893 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4895 fib6_clean_all(net, fib6_ifdown, &arg);
4898 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4900 rt6_sync_down_dev(dev, event);
4901 rt6_uncached_list_flush_dev(dev);
4902 neigh_ifdown(&nd_tbl, dev);
4905 struct rt6_mtu_change_arg {
4906 struct net_device *dev;
4908 struct fib6_info *f6i;
4911 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4913 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4914 struct fib6_info *f6i = arg->f6i;
4916 /* For administrative MTU increase, there is no way to discover
4917 * IPv6 PMTU increase, so PMTU increase should be updated here.
4918 * Since RFC 1981 doesn't include administrative MTU increase
4919 * update PMTU increase is a MUST. (i.e. jumbo frame)
4921 if (nh->fib_nh_dev == arg->dev) {
4922 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4923 u32 mtu = f6i->fib6_pmtu;
4925 if (mtu >= arg->mtu ||
4926 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4927 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4929 spin_lock_bh(&rt6_exception_lock);
4930 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4931 spin_unlock_bh(&rt6_exception_lock);
4937 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4939 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4940 struct inet6_dev *idev;
4942 /* In IPv6 pmtu discovery is not optional,
4943 so that RTAX_MTU lock cannot disable it.
4944 We still use this lock to block changes
4945 caused by addrconf/ndisc.
4948 idev = __in6_dev_get(arg->dev);
4952 if (fib6_metric_locked(f6i, RTAX_MTU))
4957 /* fib6_nh_mtu_change only returns 0, so this is safe */
4958 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4962 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4965 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4967 struct rt6_mtu_change_arg arg = {
4972 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4975 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4976 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4977 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4978 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4979 [RTA_OIF] = { .type = NLA_U32 },
4980 [RTA_IIF] = { .type = NLA_U32 },
4981 [RTA_PRIORITY] = { .type = NLA_U32 },
4982 [RTA_METRICS] = { .type = NLA_NESTED },
4983 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4984 [RTA_PREF] = { .type = NLA_U8 },
4985 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4986 [RTA_ENCAP] = { .type = NLA_NESTED },
4987 [RTA_EXPIRES] = { .type = NLA_U32 },
4988 [RTA_UID] = { .type = NLA_U32 },
4989 [RTA_MARK] = { .type = NLA_U32 },
4990 [RTA_TABLE] = { .type = NLA_U32 },
4991 [RTA_IP_PROTO] = { .type = NLA_U8 },
4992 [RTA_SPORT] = { .type = NLA_U16 },
4993 [RTA_DPORT] = { .type = NLA_U16 },
4994 [RTA_NH_ID] = { .type = NLA_U32 },
4997 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4998 struct fib6_config *cfg,
4999 struct netlink_ext_ack *extack)
5002 struct nlattr *tb[RTA_MAX+1];
5006 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5007 rtm_ipv6_policy, extack);
5012 rtm = nlmsg_data(nlh);
5015 NL_SET_ERR_MSG(extack,
5016 "Invalid dsfield (tos): option not available for IPv6");
5020 *cfg = (struct fib6_config){
5021 .fc_table = rtm->rtm_table,
5022 .fc_dst_len = rtm->rtm_dst_len,
5023 .fc_src_len = rtm->rtm_src_len,
5025 .fc_protocol = rtm->rtm_protocol,
5026 .fc_type = rtm->rtm_type,
5028 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
5029 .fc_nlinfo.nlh = nlh,
5030 .fc_nlinfo.nl_net = sock_net(skb->sk),
5033 if (rtm->rtm_type == RTN_UNREACHABLE ||
5034 rtm->rtm_type == RTN_BLACKHOLE ||
5035 rtm->rtm_type == RTN_PROHIBIT ||
5036 rtm->rtm_type == RTN_THROW)
5037 cfg->fc_flags |= RTF_REJECT;
5039 if (rtm->rtm_type == RTN_LOCAL)
5040 cfg->fc_flags |= RTF_LOCAL;
5042 if (rtm->rtm_flags & RTM_F_CLONED)
5043 cfg->fc_flags |= RTF_CACHE;
5045 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
5047 if (tb[RTA_NH_ID]) {
5048 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
5049 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
5050 NL_SET_ERR_MSG(extack,
5051 "Nexthop specification and nexthop id are mutually exclusive");
5054 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
5057 if (tb[RTA_GATEWAY]) {
5058 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
5059 cfg->fc_flags |= RTF_GATEWAY;
5062 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
5067 int plen = (rtm->rtm_dst_len + 7) >> 3;
5069 if (nla_len(tb[RTA_DST]) < plen)
5072 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
5076 int plen = (rtm->rtm_src_len + 7) >> 3;
5078 if (nla_len(tb[RTA_SRC]) < plen)
5081 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
5084 if (tb[RTA_PREFSRC])
5085 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
5088 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
5090 if (tb[RTA_PRIORITY])
5091 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
5093 if (tb[RTA_METRICS]) {
5094 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
5095 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
5099 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
5101 if (tb[RTA_MULTIPATH]) {
5102 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
5103 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
5105 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
5106 cfg->fc_mp_len, extack);
5112 pref = nla_get_u8(tb[RTA_PREF]);
5113 if (pref != ICMPV6_ROUTER_PREF_LOW &&
5114 pref != ICMPV6_ROUTER_PREF_HIGH)
5115 pref = ICMPV6_ROUTER_PREF_MEDIUM;
5116 cfg->fc_flags |= RTF_PREF(pref);
5120 cfg->fc_encap = tb[RTA_ENCAP];
5122 if (tb[RTA_ENCAP_TYPE]) {
5123 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
5125 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5130 if (tb[RTA_EXPIRES]) {
5131 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5133 if (addrconf_finite_timeout(timeout)) {
5134 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5135 cfg->fc_flags |= RTF_EXPIRES;
5145 struct fib6_info *fib6_info;
5146 struct fib6_config r_cfg;
5147 struct list_head next;
5150 static int ip6_route_info_append(struct net *net,
5151 struct list_head *rt6_nh_list,
5152 struct fib6_info *rt,
5153 struct fib6_config *r_cfg)
5158 list_for_each_entry(nh, rt6_nh_list, next) {
5159 /* check if fib6_info already exists */
5160 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5164 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5168 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5169 list_add_tail(&nh->next, rt6_nh_list);
5174 static void ip6_route_mpath_notify(struct fib6_info *rt,
5175 struct fib6_info *rt_last,
5176 struct nl_info *info,
5179 /* if this is an APPEND route, then rt points to the first route
5180 * inserted and rt_last points to last route inserted. Userspace
5181 * wants a consistent dump of the route which starts at the first
5182 * nexthop. Since sibling routes are always added at the end of
5183 * the list, find the first sibling of the last route appended
5185 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5186 rt = list_first_entry(&rt_last->fib6_siblings,
5192 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5195 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5197 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5198 bool should_notify = false;
5199 struct fib6_info *leaf;
5200 struct fib6_node *fn;
5203 fn = rcu_dereference(rt->fib6_node);
5207 leaf = rcu_dereference(fn->leaf);
5212 (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5213 rt6_qualify_for_ecmp(leaf)))
5214 should_notify = true;
5218 return should_notify;
5221 static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
5222 struct netlink_ext_ack *extack)
5224 if (nla_len(nla) < sizeof(*gw)) {
5225 NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
5229 *gw = nla_get_in6_addr(nla);
5234 static int ip6_route_multipath_add(struct fib6_config *cfg,
5235 struct netlink_ext_ack *extack)
5237 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5238 struct nl_info *info = &cfg->fc_nlinfo;
5239 struct fib6_config r_cfg;
5240 struct rtnexthop *rtnh;
5241 struct fib6_info *rt;
5242 struct rt6_nh *err_nh;
5243 struct rt6_nh *nh, *nh_safe;
5249 int replace = (cfg->fc_nlinfo.nlh &&
5250 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5251 LIST_HEAD(rt6_nh_list);
5253 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5254 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5255 nlflags |= NLM_F_APPEND;
5257 remaining = cfg->fc_mp_len;
5258 rtnh = (struct rtnexthop *)cfg->fc_mp;
5260 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
5261 * fib6_info structs per nexthop
5263 while (rtnh_ok(rtnh, remaining)) {
5264 memcpy(&r_cfg, cfg, sizeof(*cfg));
5265 if (rtnh->rtnh_ifindex)
5266 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5268 attrlen = rtnh_attrlen(rtnh);
5270 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5272 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5274 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
5279 r_cfg.fc_flags |= RTF_GATEWAY;
5281 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5283 /* RTA_ENCAP_TYPE length checked in
5284 * lwtunnel_valid_encap_type_attr
5286 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5288 r_cfg.fc_encap_type = nla_get_u16(nla);
5291 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5292 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5298 if (!rt6_qualify_for_ecmp(rt)) {
5300 NL_SET_ERR_MSG(extack,
5301 "Device only routes can not be added for IPv6 using the multipath API.");
5302 fib6_info_release(rt);
5306 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5308 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5311 fib6_info_release(rt);
5315 rtnh = rtnh_next(rtnh, &remaining);
5318 if (list_empty(&rt6_nh_list)) {
5319 NL_SET_ERR_MSG(extack,
5320 "Invalid nexthop configuration - no valid nexthops");
5324 /* for add and replace send one notification with all nexthops.
5325 * Skip the notification in fib6_add_rt2node and send one with
5326 * the full route when done
5328 info->skip_notify = 1;
5330 /* For add and replace, send one notification with all nexthops. For
5331 * append, send one notification with all appended nexthops.
5333 info->skip_notify_kernel = 1;
5336 list_for_each_entry(nh, &rt6_nh_list, next) {
5337 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5341 NL_SET_ERR_MSG_MOD(extack,
5342 "multipath route replace failed (check consistency of installed routes)");
5346 /* save reference to last route successfully inserted */
5347 rt_last = nh->fib6_info;
5349 /* save reference to first route for notification */
5351 rt_notif = nh->fib6_info;
5353 /* Because each route is added like a single route we remove
5354 * these flags after the first nexthop: if there is a collision,
5355 * we have already failed to add the first nexthop:
5356 * fib6_add_rt2node() has rejected it; when replacing, old
5357 * nexthops have been replaced by first new, the rest should
5360 if (cfg->fc_nlinfo.nlh) {
5361 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5363 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5368 /* An in-kernel notification should only be sent in case the new
5369 * multipath route is added as the first route in the node, or if
5370 * it was appended to it. We pass 'rt_notif' since it is the first
5371 * sibling and might allow us to skip some checks in the replace case.
5373 if (ip6_route_mpath_should_notify(rt_notif)) {
5374 enum fib_event_type fib_event;
5376 if (rt_notif->fib6_nsiblings != nhn - 1)
5377 fib_event = FIB_EVENT_ENTRY_APPEND;
5379 fib_event = FIB_EVENT_ENTRY_REPLACE;
5381 err = call_fib6_multipath_entry_notifiers(info->nl_net,
5382 fib_event, rt_notif,
5385 /* Delete all the siblings that were just added */
5391 /* success ... tell user about new route */
5392 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5396 /* send notification for routes that were added so that
5397 * the delete notifications sent by ip6_route_del are
5401 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5403 /* Delete routes that were already added */
5404 list_for_each_entry(nh, &rt6_nh_list, next) {
5407 ip6_route_del(&nh->r_cfg, extack);
5411 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5412 fib6_info_release(nh->fib6_info);
5413 list_del(&nh->next);
5420 static int ip6_route_multipath_del(struct fib6_config *cfg,
5421 struct netlink_ext_ack *extack)
5423 struct fib6_config r_cfg;
5424 struct rtnexthop *rtnh;
5430 remaining = cfg->fc_mp_len;
5431 rtnh = (struct rtnexthop *)cfg->fc_mp;
5433 /* Parse a Multipath Entry */
5434 while (rtnh_ok(rtnh, remaining)) {
5435 memcpy(&r_cfg, cfg, sizeof(*cfg));
5436 if (rtnh->rtnh_ifindex)
5437 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5439 attrlen = rtnh_attrlen(rtnh);
5441 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5443 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5445 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
5452 r_cfg.fc_flags |= RTF_GATEWAY;
5455 err = ip6_route_del(&r_cfg, extack);
5460 rtnh = rtnh_next(rtnh, &remaining);
5466 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5467 struct netlink_ext_ack *extack)
5469 struct fib6_config cfg;
5472 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5477 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5478 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5483 return ip6_route_multipath_del(&cfg, extack);
5485 cfg.fc_delete_all_nh = 1;
5486 return ip6_route_del(&cfg, extack);
5490 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5491 struct netlink_ext_ack *extack)
5493 struct fib6_config cfg;
5496 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5500 if (cfg.fc_metric == 0)
5501 cfg.fc_metric = IP6_RT_PRIO_USER;
5504 return ip6_route_multipath_add(&cfg, extack);
5506 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5509 /* add the overhead of this fib6_nh to nexthop_len */
5510 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5512 int *nexthop_len = arg;
5514 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5515 + NLA_ALIGN(sizeof(struct rtnexthop))
5516 + nla_total_size(16); /* RTA_GATEWAY */
5518 if (nh->fib_nh_lws) {
5519 /* RTA_ENCAP_TYPE */
5520 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5522 *nexthop_len += nla_total_size(2);
5528 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5533 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5534 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5537 struct fib6_info *sibling, *next_sibling;
5538 struct fib6_nh *nh = f6i->fib6_nh;
5541 if (f6i->fib6_nsiblings) {
5542 rt6_nh_nlmsg_size(nh, &nexthop_len);
5544 list_for_each_entry_safe(sibling, next_sibling,
5545 &f6i->fib6_siblings, fib6_siblings) {
5546 rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
5549 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5552 return NLMSG_ALIGN(sizeof(struct rtmsg))
5553 + nla_total_size(16) /* RTA_SRC */
5554 + nla_total_size(16) /* RTA_DST */
5555 + nla_total_size(16) /* RTA_GATEWAY */
5556 + nla_total_size(16) /* RTA_PREFSRC */
5557 + nla_total_size(4) /* RTA_TABLE */
5558 + nla_total_size(4) /* RTA_IIF */
5559 + nla_total_size(4) /* RTA_OIF */
5560 + nla_total_size(4) /* RTA_PRIORITY */
5561 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5562 + nla_total_size(sizeof(struct rta_cacheinfo))
5563 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5564 + nla_total_size(1) /* RTA_PREF */
5568 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5569 unsigned char *flags)
5571 if (nexthop_is_multipath(nh)) {
5574 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5576 goto nla_put_failure;
5578 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5579 goto nla_put_failure;
5581 nla_nest_end(skb, mp);
5583 struct fib6_nh *fib6_nh;
5585 fib6_nh = nexthop_fib6_nh(nh);
5586 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5588 goto nla_put_failure;
5597 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5598 struct fib6_info *rt, struct dst_entry *dst,
5599 struct in6_addr *dest, struct in6_addr *src,
5600 int iif, int type, u32 portid, u32 seq,
5603 struct rt6_info *rt6 = (struct rt6_info *)dst;
5604 struct rt6key *rt6_dst, *rt6_src;
5605 u32 *pmetrics, table, rt6_flags;
5606 unsigned char nh_flags = 0;
5607 struct nlmsghdr *nlh;
5611 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5616 rt6_dst = &rt6->rt6i_dst;
5617 rt6_src = &rt6->rt6i_src;
5618 rt6_flags = rt6->rt6i_flags;
5620 rt6_dst = &rt->fib6_dst;
5621 rt6_src = &rt->fib6_src;
5622 rt6_flags = rt->fib6_flags;
5625 rtm = nlmsg_data(nlh);
5626 rtm->rtm_family = AF_INET6;
5627 rtm->rtm_dst_len = rt6_dst->plen;
5628 rtm->rtm_src_len = rt6_src->plen;
5631 table = rt->fib6_table->tb6_id;
5633 table = RT6_TABLE_UNSPEC;
5634 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5635 if (nla_put_u32(skb, RTA_TABLE, table))
5636 goto nla_put_failure;
5638 rtm->rtm_type = rt->fib6_type;
5640 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5641 rtm->rtm_protocol = rt->fib6_protocol;
5643 if (rt6_flags & RTF_CACHE)
5644 rtm->rtm_flags |= RTM_F_CLONED;
5647 if (nla_put_in6_addr(skb, RTA_DST, dest))
5648 goto nla_put_failure;
5649 rtm->rtm_dst_len = 128;
5650 } else if (rtm->rtm_dst_len)
5651 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5652 goto nla_put_failure;
5653 #ifdef CONFIG_IPV6_SUBTREES
5655 if (nla_put_in6_addr(skb, RTA_SRC, src))
5656 goto nla_put_failure;
5657 rtm->rtm_src_len = 128;
5658 } else if (rtm->rtm_src_len &&
5659 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5660 goto nla_put_failure;
5663 #ifdef CONFIG_IPV6_MROUTE
5664 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5665 int err = ip6mr_get_route(net, skb, rtm, portid);
5670 goto nla_put_failure;
5673 if (nla_put_u32(skb, RTA_IIF, iif))
5674 goto nla_put_failure;
5676 struct in6_addr saddr_buf;
5677 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5678 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5679 goto nla_put_failure;
5682 if (rt->fib6_prefsrc.plen) {
5683 struct in6_addr saddr_buf;
5684 saddr_buf = rt->fib6_prefsrc.addr;
5685 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5686 goto nla_put_failure;
5689 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5690 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5691 goto nla_put_failure;
5693 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5694 goto nla_put_failure;
5696 /* For multipath routes, walk the siblings list and add
5697 * each as a nexthop within RTA_MULTIPATH.
5700 if (rt6_flags & RTF_GATEWAY &&
5701 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5702 goto nla_put_failure;
5704 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5705 goto nla_put_failure;
5707 if (dst->lwtstate &&
5708 lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
5709 goto nla_put_failure;
5710 } else if (rt->fib6_nsiblings) {
5711 struct fib6_info *sibling, *next_sibling;
5714 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5716 goto nla_put_failure;
5718 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5719 rt->fib6_nh->fib_nh_weight, AF_INET6,
5721 goto nla_put_failure;
5723 list_for_each_entry_safe(sibling, next_sibling,
5724 &rt->fib6_siblings, fib6_siblings) {
5725 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5726 sibling->fib6_nh->fib_nh_weight,
5728 goto nla_put_failure;
5731 nla_nest_end(skb, mp);
5732 } else if (rt->nh) {
5733 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5734 goto nla_put_failure;
5736 if (nexthop_is_blackhole(rt->nh))
5737 rtm->rtm_type = RTN_BLACKHOLE;
5739 if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) &&
5740 rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5741 goto nla_put_failure;
5743 rtm->rtm_flags |= nh_flags;
5745 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5746 &nh_flags, false) < 0)
5747 goto nla_put_failure;
5749 rtm->rtm_flags |= nh_flags;
5752 if (rt6_flags & RTF_EXPIRES) {
5753 expires = dst ? dst->expires : rt->expires;
5758 if (READ_ONCE(rt->offload))
5759 rtm->rtm_flags |= RTM_F_OFFLOAD;
5760 if (READ_ONCE(rt->trap))
5761 rtm->rtm_flags |= RTM_F_TRAP;
5762 if (READ_ONCE(rt->offload_failed))
5763 rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
5766 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5767 goto nla_put_failure;
5769 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5770 goto nla_put_failure;
5773 nlmsg_end(skb, nlh);
5777 nlmsg_cancel(skb, nlh);
5781 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5783 const struct net_device *dev = arg;
5785 if (nh->fib_nh_dev == dev)
5791 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5792 const struct net_device *dev)
5795 struct net_device *_dev = (struct net_device *)dev;
5797 return !!nexthop_for_each_fib6_nh(f6i->nh,
5798 fib6_info_nh_uses_dev,
5802 if (f6i->fib6_nh->fib_nh_dev == dev)
5805 if (f6i->fib6_nsiblings) {
5806 struct fib6_info *sibling, *next_sibling;
5808 list_for_each_entry_safe(sibling, next_sibling,
5809 &f6i->fib6_siblings, fib6_siblings) {
5810 if (sibling->fib6_nh->fib_nh_dev == dev)
5818 struct fib6_nh_exception_dump_walker {
5819 struct rt6_rtnl_dump_arg *dump;
5820 struct fib6_info *rt;
5826 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5828 struct fib6_nh_exception_dump_walker *w = arg;
5829 struct rt6_rtnl_dump_arg *dump = w->dump;
5830 struct rt6_exception_bucket *bucket;
5831 struct rt6_exception *rt6_ex;
5834 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5838 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5839 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5845 /* Expiration of entries doesn't bump sernum, insertion
5846 * does. Removal is triggered by insertion, so we can
5847 * rely on the fact that if entries change between two
5848 * partial dumps, this node is scanned again completely,
5849 * see rt6_insert_exception() and fib6_dump_table().
5851 * Count expired entries we go through as handled
5852 * entries that we'll skip next time, in case of partial
5853 * node dump. Otherwise, if entries expire meanwhile,
5854 * we'll skip the wrong amount.
5856 if (rt6_check_expired(rt6_ex->rt6i)) {
5861 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5862 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5864 NETLINK_CB(dump->cb->skb).portid,
5865 dump->cb->nlh->nlmsg_seq, w->flags);
5877 /* Return -1 if done with node, number of handled routes on partial dump */
5878 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5880 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5881 struct fib_dump_filter *filter = &arg->filter;
5882 unsigned int flags = NLM_F_MULTI;
5883 struct net *net = arg->net;
5886 if (rt == net->ipv6.fib6_null_entry)
5889 if ((filter->flags & RTM_F_PREFIX) &&
5890 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5891 /* success since this is not a prefix route */
5894 if (filter->filter_set &&
5895 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5896 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5897 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5901 if (filter->filter_set ||
5902 !filter->dump_routes || !filter->dump_exceptions) {
5903 flags |= NLM_F_DUMP_FILTERED;
5906 if (filter->dump_routes) {
5910 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5912 NETLINK_CB(arg->cb->skb).portid,
5913 arg->cb->nlh->nlmsg_seq, flags)) {
5920 if (filter->dump_exceptions) {
5921 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5930 err = nexthop_for_each_fib6_nh(rt->nh,
5931 rt6_nh_dump_exceptions,
5934 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5939 return count + w.count;
5945 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5946 const struct nlmsghdr *nlh,
5948 struct netlink_ext_ack *extack)
5953 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5954 NL_SET_ERR_MSG_MOD(extack,
5955 "Invalid header for get route request");
5959 if (!netlink_strict_get_check(skb))
5960 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5961 rtm_ipv6_policy, extack);
5963 rtm = nlmsg_data(nlh);
5964 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5965 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5966 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5968 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5971 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5972 NL_SET_ERR_MSG_MOD(extack,
5973 "Invalid flags for get route request");
5977 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5978 rtm_ipv6_policy, extack);
5982 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5983 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5984 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5988 for (i = 0; i <= RTA_MAX; i++) {
6004 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
6012 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6013 struct netlink_ext_ack *extack)
6015 struct net *net = sock_net(in_skb->sk);
6016 struct nlattr *tb[RTA_MAX+1];
6017 int err, iif = 0, oif = 0;
6018 struct fib6_info *from;
6019 struct dst_entry *dst;
6020 struct rt6_info *rt;
6021 struct sk_buff *skb;
6023 struct flowi6 fl6 = {};
6026 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
6031 rtm = nlmsg_data(nlh);
6032 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
6033 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
6036 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
6039 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
6043 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
6046 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
6050 iif = nla_get_u32(tb[RTA_IIF]);
6053 oif = nla_get_u32(tb[RTA_OIF]);
6056 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
6059 fl6.flowi6_uid = make_kuid(current_user_ns(),
6060 nla_get_u32(tb[RTA_UID]));
6062 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
6065 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
6068 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
6070 if (tb[RTA_IP_PROTO]) {
6071 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
6072 &fl6.flowi6_proto, AF_INET6,
6079 struct net_device *dev;
6084 dev = dev_get_by_index_rcu(net, iif);
6091 fl6.flowi6_iif = iif;
6093 if (!ipv6_addr_any(&fl6.saddr))
6094 flags |= RT6_LOOKUP_F_HAS_SADDR;
6096 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
6100 fl6.flowi6_oif = oif;
6102 dst = ip6_route_output(net, NULL, &fl6);
6106 rt = container_of(dst, struct rt6_info, dst);
6107 if (rt->dst.error) {
6108 err = rt->dst.error;
6113 if (rt == net->ipv6.ip6_null_entry) {
6114 err = rt->dst.error;
6119 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
6126 skb_dst_set(skb, &rt->dst);
6129 from = rcu_dereference(rt->from);
6132 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
6134 NETLINK_CB(in_skb).portid,
6137 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
6138 &fl6.saddr, iif, RTM_NEWROUTE,
6139 NETLINK_CB(in_skb).portid,
6151 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
6156 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
6157 unsigned int nlm_flags)
6159 struct sk_buff *skb;
6160 struct net *net = info->nl_net;
6165 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6167 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6171 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6172 event, info->portid, seq, nlm_flags);
6174 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6175 WARN_ON(err == -EMSGSIZE);
6179 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6180 info->nlh, gfp_any());
6184 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6187 void fib6_rt_update(struct net *net, struct fib6_info *rt,
6188 struct nl_info *info)
6190 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6191 struct sk_buff *skb;
6194 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6198 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6199 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6201 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6202 WARN_ON(err == -EMSGSIZE);
6206 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6207 info->nlh, gfp_any());
6211 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6214 void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
6215 bool offload, bool trap, bool offload_failed)
6217 struct sk_buff *skb;
6220 if (READ_ONCE(f6i->offload) == offload &&
6221 READ_ONCE(f6i->trap) == trap &&
6222 READ_ONCE(f6i->offload_failed) == offload_failed)
6225 WRITE_ONCE(f6i->offload, offload);
6226 WRITE_ONCE(f6i->trap, trap);
6228 /* 2 means send notifications only if offload_failed was changed. */
6229 if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
6230 READ_ONCE(f6i->offload_failed) == offload_failed)
6233 WRITE_ONCE(f6i->offload_failed, offload_failed);
6235 if (!rcu_access_pointer(f6i->fib6_node))
6236 /* The route was removed from the tree, do not send
6241 if (!net->ipv6.sysctl.fib_notify_on_flag_change)
6244 skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL);
6250 err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0,
6253 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6254 WARN_ON(err == -EMSGSIZE);
6259 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL);
6263 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6265 EXPORT_SYMBOL(fib6_info_hw_flags_set);
6267 static int ip6_route_dev_notify(struct notifier_block *this,
6268 unsigned long event, void *ptr)
6270 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6271 struct net *net = dev_net(dev);
6273 if (!(dev->flags & IFF_LOOPBACK))
6276 if (event == NETDEV_REGISTER) {
6277 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6278 net->ipv6.ip6_null_entry->dst.dev = dev;
6279 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6280 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6281 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6282 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6283 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6284 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6286 } else if (event == NETDEV_UNREGISTER &&
6287 dev->reg_state != NETREG_UNREGISTERED) {
6288 /* NETDEV_UNREGISTER could be fired for multiple times by
6289 * netdev_wait_allrefs(). Make sure we only call this once.
6291 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6292 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6293 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6294 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6305 #ifdef CONFIG_PROC_FS
6306 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6308 struct net *net = (struct net *)seq->private;
6309 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6310 net->ipv6.rt6_stats->fib_nodes,
6311 net->ipv6.rt6_stats->fib_route_nodes,
6312 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6313 net->ipv6.rt6_stats->fib_rt_entries,
6314 net->ipv6.rt6_stats->fib_rt_cache,
6315 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6316 net->ipv6.rt6_stats->fib_discarded_routes);
6320 #endif /* CONFIG_PROC_FS */
6322 #ifdef CONFIG_SYSCTL
6324 static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6325 void *buffer, size_t *lenp, loff_t *ppos)
6333 net = (struct net *)ctl->extra1;
6334 delay = net->ipv6.sysctl.flush_delay;
6335 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6339 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6343 static struct ctl_table ipv6_route_table_template[] = {
6345 .procname = "max_size",
6346 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
6347 .maxlen = sizeof(int),
6349 .proc_handler = proc_dointvec,
6352 .procname = "gc_thresh",
6353 .data = &ip6_dst_ops_template.gc_thresh,
6354 .maxlen = sizeof(int),
6356 .proc_handler = proc_dointvec,
6359 .procname = "flush",
6360 .data = &init_net.ipv6.sysctl.flush_delay,
6361 .maxlen = sizeof(int),
6363 .proc_handler = ipv6_sysctl_rtcache_flush
6366 .procname = "gc_min_interval",
6367 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6368 .maxlen = sizeof(int),
6370 .proc_handler = proc_dointvec_jiffies,
6373 .procname = "gc_timeout",
6374 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6375 .maxlen = sizeof(int),
6377 .proc_handler = proc_dointvec_jiffies,
6380 .procname = "gc_interval",
6381 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
6382 .maxlen = sizeof(int),
6384 .proc_handler = proc_dointvec_jiffies,
6387 .procname = "gc_elasticity",
6388 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6389 .maxlen = sizeof(int),
6391 .proc_handler = proc_dointvec,
6394 .procname = "mtu_expires",
6395 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6396 .maxlen = sizeof(int),
6398 .proc_handler = proc_dointvec_jiffies,
6401 .procname = "min_adv_mss",
6402 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
6403 .maxlen = sizeof(int),
6405 .proc_handler = proc_dointvec,
6408 .procname = "gc_min_interval_ms",
6409 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6410 .maxlen = sizeof(int),
6412 .proc_handler = proc_dointvec_ms_jiffies,
6415 .procname = "skip_notify_on_dev_down",
6416 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6417 .maxlen = sizeof(int),
6419 .proc_handler = proc_dointvec_minmax,
6420 .extra1 = SYSCTL_ZERO,
6421 .extra2 = SYSCTL_ONE,
6426 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6428 struct ctl_table *table;
6430 table = kmemdup(ipv6_route_table_template,
6431 sizeof(ipv6_route_table_template),
6435 table[0].data = &net->ipv6.sysctl.ip6_rt_max_size;
6436 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6437 table[2].data = &net->ipv6.sysctl.flush_delay;
6438 table[2].extra1 = net;
6439 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6440 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6441 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6442 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6443 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6444 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6445 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6446 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6448 /* Don't export sysctls to unprivileged users */
6449 if (net->user_ns != &init_user_ns)
6450 table[1].procname = NULL;
6457 static int __net_init ip6_route_net_init(struct net *net)
6461 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6462 sizeof(net->ipv6.ip6_dst_ops));
6464 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6465 goto out_ip6_dst_ops;
6467 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6468 if (!net->ipv6.fib6_null_entry)
6469 goto out_ip6_dst_entries;
6470 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6471 sizeof(*net->ipv6.fib6_null_entry));
6473 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6474 sizeof(*net->ipv6.ip6_null_entry),
6476 if (!net->ipv6.ip6_null_entry)
6477 goto out_fib6_null_entry;
6478 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6479 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6480 ip6_template_metrics, true);
6481 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
6483 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6484 net->ipv6.fib6_has_custom_rules = false;
6485 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6486 sizeof(*net->ipv6.ip6_prohibit_entry),
6488 if (!net->ipv6.ip6_prohibit_entry)
6489 goto out_ip6_null_entry;
6490 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6491 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6492 ip6_template_metrics, true);
6493 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
6495 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6496 sizeof(*net->ipv6.ip6_blk_hole_entry),
6498 if (!net->ipv6.ip6_blk_hole_entry)
6499 goto out_ip6_prohibit_entry;
6500 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6501 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6502 ip6_template_metrics, true);
6503 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
6504 #ifdef CONFIG_IPV6_SUBTREES
6505 net->ipv6.fib6_routes_require_src = 0;
6509 net->ipv6.sysctl.flush_delay = 0;
6510 net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
6511 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6512 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6513 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6514 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6515 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6516 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6517 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6519 atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
6525 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6526 out_ip6_prohibit_entry:
6527 kfree(net->ipv6.ip6_prohibit_entry);
6529 kfree(net->ipv6.ip6_null_entry);
6531 out_fib6_null_entry:
6532 kfree(net->ipv6.fib6_null_entry);
6533 out_ip6_dst_entries:
6534 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6539 static void __net_exit ip6_route_net_exit(struct net *net)
6541 kfree(net->ipv6.fib6_null_entry);
6542 kfree(net->ipv6.ip6_null_entry);
6543 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6544 kfree(net->ipv6.ip6_prohibit_entry);
6545 kfree(net->ipv6.ip6_blk_hole_entry);
6547 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6550 static int __net_init ip6_route_net_init_late(struct net *net)
6552 #ifdef CONFIG_PROC_FS
6553 if (!proc_create_net("ipv6_route", 0, net->proc_net,
6554 &ipv6_route_seq_ops,
6555 sizeof(struct ipv6_route_iter)))
6558 if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
6559 rt6_stats_seq_show, NULL)) {
6560 remove_proc_entry("ipv6_route", net->proc_net);
6567 static void __net_exit ip6_route_net_exit_late(struct net *net)
6569 #ifdef CONFIG_PROC_FS
6570 remove_proc_entry("ipv6_route", net->proc_net);
6571 remove_proc_entry("rt6_stats", net->proc_net);
6575 static struct pernet_operations ip6_route_net_ops = {
6576 .init = ip6_route_net_init,
6577 .exit = ip6_route_net_exit,
6580 static int __net_init ipv6_inetpeer_init(struct net *net)
6582 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6586 inet_peer_base_init(bp);
6587 net->ipv6.peers = bp;
6591 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6593 struct inet_peer_base *bp = net->ipv6.peers;
6595 net->ipv6.peers = NULL;
6596 inetpeer_invalidate_tree(bp);
6600 static struct pernet_operations ipv6_inetpeer_ops = {
6601 .init = ipv6_inetpeer_init,
6602 .exit = ipv6_inetpeer_exit,
6605 static struct pernet_operations ip6_route_net_late_ops = {
6606 .init = ip6_route_net_init_late,
6607 .exit = ip6_route_net_exit_late,
6610 static struct notifier_block ip6_route_dev_notifier = {
6611 .notifier_call = ip6_route_dev_notify,
6612 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6615 void __init ip6_route_init_special_entries(void)
6617 /* Registering of the loopback is done before this portion of code,
6618 * the loopback reference in rt6_info will not be taken, do it
6619 * manually for init_net */
6620 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6621 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6622 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6623 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6624 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6625 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6626 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6627 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6631 #if IS_BUILTIN(CONFIG_IPV6)
6632 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6633 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6635 BTF_ID_LIST(btf_fib6_info_id)
6636 BTF_ID(struct, fib6_info)
6638 static const struct bpf_iter_seq_info ipv6_route_seq_info = {
6639 .seq_ops = &ipv6_route_seq_ops,
6640 .init_seq_private = bpf_iter_init_seq_net,
6641 .fini_seq_private = bpf_iter_fini_seq_net,
6642 .seq_priv_size = sizeof(struct ipv6_route_iter),
6645 static struct bpf_iter_reg ipv6_route_reg_info = {
6646 .target = "ipv6_route",
6647 .ctx_arg_info_size = 1,
6649 { offsetof(struct bpf_iter__ipv6_route, rt),
6650 PTR_TO_BTF_ID_OR_NULL },
6652 .seq_info = &ipv6_route_seq_info,
6655 static int __init bpf_iter_register(void)
6657 ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
6658 return bpf_iter_reg_target(&ipv6_route_reg_info);
6661 static void bpf_iter_unregister(void)
6663 bpf_iter_unreg_target(&ipv6_route_reg_info);
6668 int __init ip6_route_init(void)
6674 ip6_dst_ops_template.kmem_cachep =
6675 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6676 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
6677 if (!ip6_dst_ops_template.kmem_cachep)
6680 ret = dst_entries_init(&ip6_dst_blackhole_ops);
6682 goto out_kmem_cache;
6684 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6686 goto out_dst_entries;
6688 ret = register_pernet_subsys(&ip6_route_net_ops);
6690 goto out_register_inetpeer;
6692 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6696 goto out_register_subsys;
6702 ret = fib6_rules_init();
6706 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6708 goto fib6_rules_init;
6710 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6711 inet6_rtm_newroute, NULL, 0);
6713 goto out_register_late_subsys;
6715 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6716 inet6_rtm_delroute, NULL, 0);
6718 goto out_register_late_subsys;
6720 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6721 inet6_rtm_getroute, NULL,
6722 RTNL_FLAG_DOIT_UNLOCKED);
6724 goto out_register_late_subsys;
6726 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6728 goto out_register_late_subsys;
6730 #if IS_BUILTIN(CONFIG_IPV6)
6731 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6732 ret = bpf_iter_register();
6734 goto out_register_late_subsys;
6738 for_each_possible_cpu(cpu) {
6739 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6741 INIT_LIST_HEAD(&ul->head);
6742 INIT_LIST_HEAD(&ul->quarantine);
6743 spin_lock_init(&ul->lock);
6749 out_register_late_subsys:
6750 rtnl_unregister_all(PF_INET6);
6751 unregister_pernet_subsys(&ip6_route_net_late_ops);
6753 fib6_rules_cleanup();
6758 out_register_subsys:
6759 unregister_pernet_subsys(&ip6_route_net_ops);
6760 out_register_inetpeer:
6761 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6763 dst_entries_destroy(&ip6_dst_blackhole_ops);
6765 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6769 void ip6_route_cleanup(void)
6771 #if IS_BUILTIN(CONFIG_IPV6)
6772 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6773 bpf_iter_unregister();
6776 unregister_netdevice_notifier(&ip6_route_dev_notifier);
6777 unregister_pernet_subsys(&ip6_route_net_late_ops);
6778 fib6_rules_cleanup();
6781 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6782 unregister_pernet_subsys(&ip6_route_net_ops);
6783 dst_entries_destroy(&ip6_dst_blackhole_ops);
6784 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);