1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
20 * Fixed routing subtrees.
23 #define pr_fmt(fmt) "IPv6: " fmt
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <linux/siphash.h>
45 #include <net/net_namespace.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
53 #include <linux/rtnetlink.h>
55 #include <net/dst_metadata.h>
57 #include <net/netevent.h>
58 #include <net/netlink.h>
60 #include <net/lwtunnel.h>
61 #include <net/ip_tunnels.h>
62 #include <net/l3mdev.h>
64 #include <linux/uaccess.h>
67 #include <linux/sysctl.h>
70 static int ip6_rt_type_to_error(u8 fib6_type);
72 #define CREATE_TRACE_POINTS
73 #include <trace/events/fib6.h>
74 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
75 #undef CREATE_TRACE_POINTS
78 RT6_NUD_FAIL_HARD = -3,
79 RT6_NUD_FAIL_PROBE = -2,
80 RT6_NUD_FAIL_DO_RR = -1,
84 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
85 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
86 static unsigned int ip6_mtu(const struct dst_entry *dst);
87 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
88 static void ip6_dst_destroy(struct dst_entry *);
89 static void ip6_dst_ifdown(struct dst_entry *,
90 struct net_device *dev, int how);
91 static int ip6_dst_gc(struct dst_ops *ops);
93 static int ip6_pkt_discard(struct sk_buff *skb);
94 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
95 static int ip6_pkt_prohibit(struct sk_buff *skb);
96 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
97 static void ip6_link_failure(struct sk_buff *skb);
98 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
99 struct sk_buff *skb, u32 mtu,
101 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
102 struct sk_buff *skb);
103 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
105 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
106 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
107 struct fib6_info *rt, struct dst_entry *dst,
108 struct in6_addr *dest, struct in6_addr *src,
109 int iif, int type, u32 portid, u32 seq,
111 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
112 const struct in6_addr *daddr,
113 const struct in6_addr *saddr);
115 #ifdef CONFIG_IPV6_ROUTE_INFO
116 static struct fib6_info *rt6_add_route_info(struct net *net,
117 const struct in6_addr *prefix, int prefixlen,
118 const struct in6_addr *gwaddr,
119 struct net_device *dev,
121 static struct fib6_info *rt6_get_route_info(struct net *net,
122 const struct in6_addr *prefix, int prefixlen,
123 const struct in6_addr *gwaddr,
124 struct net_device *dev);
127 struct uncached_list {
129 struct list_head head;
132 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
134 void rt6_uncached_list_add(struct rt6_info *rt)
136 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
138 rt->rt6i_uncached_list = ul;
140 spin_lock_bh(&ul->lock);
141 list_add_tail(&rt->rt6i_uncached, &ul->head);
142 spin_unlock_bh(&ul->lock);
145 void rt6_uncached_list_del(struct rt6_info *rt)
147 if (!list_empty(&rt->rt6i_uncached)) {
148 struct uncached_list *ul = rt->rt6i_uncached_list;
149 struct net *net = dev_net(rt->dst.dev);
151 spin_lock_bh(&ul->lock);
152 list_del(&rt->rt6i_uncached);
153 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
154 spin_unlock_bh(&ul->lock);
158 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
160 struct net_device *loopback_dev = net->loopback_dev;
163 if (dev == loopback_dev)
166 for_each_possible_cpu(cpu) {
167 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
170 spin_lock_bh(&ul->lock);
171 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
172 struct inet6_dev *rt_idev = rt->rt6i_idev;
173 struct net_device *rt_dev = rt->dst.dev;
175 if (rt_idev->dev == dev) {
176 rt->rt6i_idev = in6_dev_get(loopback_dev);
177 in6_dev_put(rt_idev);
181 rt->dst.dev = blackhole_netdev;
182 dev_hold(rt->dst.dev);
186 spin_unlock_bh(&ul->lock);
190 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
194 if (!ipv6_addr_any(p))
195 return (const void *) p;
197 return &ipv6_hdr(skb)->daddr;
201 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
202 struct net_device *dev,
208 daddr = choose_neigh_daddr(gw, skb, daddr);
209 n = __ipv6_neigh_lookup(dev, daddr);
213 n = neigh_create(&nd_tbl, daddr, dev);
214 return IS_ERR(n) ? NULL : n;
217 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
221 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
223 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
224 dst->dev, skb, daddr);
227 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
229 struct net_device *dev = dst->dev;
230 struct rt6_info *rt = (struct rt6_info *)dst;
232 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
235 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
237 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
239 __ipv6_confirm_neigh(dev, daddr);
242 static struct dst_ops ip6_dst_ops_template = {
246 .check = ip6_dst_check,
247 .default_advmss = ip6_default_advmss,
249 .cow_metrics = dst_cow_metrics_generic,
250 .destroy = ip6_dst_destroy,
251 .ifdown = ip6_dst_ifdown,
252 .negative_advice = ip6_negative_advice,
253 .link_failure = ip6_link_failure,
254 .update_pmtu = ip6_rt_update_pmtu,
255 .redirect = rt6_do_redirect,
256 .local_out = __ip6_local_out,
257 .neigh_lookup = ip6_dst_neigh_lookup,
258 .confirm_neigh = ip6_confirm_neigh,
261 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
263 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
265 return mtu ? : dst->dev->mtu;
268 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
269 struct sk_buff *skb, u32 mtu,
274 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
279 static struct dst_ops ip6_dst_blackhole_ops = {
281 .destroy = ip6_dst_destroy,
282 .check = ip6_dst_check,
283 .mtu = ip6_blackhole_mtu,
284 .default_advmss = ip6_default_advmss,
285 .update_pmtu = ip6_rt_blackhole_update_pmtu,
286 .redirect = ip6_rt_blackhole_redirect,
287 .cow_metrics = dst_cow_metrics_generic,
288 .neigh_lookup = ip6_dst_neigh_lookup,
291 static const u32 ip6_template_metrics[RTAX_MAX] = {
292 [RTAX_HOPLIMIT - 1] = 0,
295 static const struct fib6_info fib6_null_entry_template = {
296 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
297 .fib6_protocol = RTPROT_KERNEL,
298 .fib6_metric = ~(u32)0,
299 .fib6_ref = REFCOUNT_INIT(1),
300 .fib6_type = RTN_UNREACHABLE,
301 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
304 static const struct rt6_info ip6_null_entry_template = {
306 .__refcnt = ATOMIC_INIT(1),
308 .obsolete = DST_OBSOLETE_FORCE_CHK,
309 .error = -ENETUNREACH,
310 .input = ip6_pkt_discard,
311 .output = ip6_pkt_discard_out,
313 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
316 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
318 static const struct rt6_info ip6_prohibit_entry_template = {
320 .__refcnt = ATOMIC_INIT(1),
322 .obsolete = DST_OBSOLETE_FORCE_CHK,
324 .input = ip6_pkt_prohibit,
325 .output = ip6_pkt_prohibit_out,
327 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
330 static const struct rt6_info ip6_blk_hole_entry_template = {
332 .__refcnt = ATOMIC_INIT(1),
334 .obsolete = DST_OBSOLETE_FORCE_CHK,
336 .input = dst_discard,
337 .output = dst_discard_out,
339 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
344 static void rt6_info_init(struct rt6_info *rt)
346 struct dst_entry *dst = &rt->dst;
348 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
349 INIT_LIST_HEAD(&rt->rt6i_uncached);
352 /* allocate dst with ip6_dst_ops */
353 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
356 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
357 1, DST_OBSOLETE_FORCE_CHK, flags);
361 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
366 EXPORT_SYMBOL(ip6_dst_alloc);
368 static void ip6_dst_destroy(struct dst_entry *dst)
370 struct rt6_info *rt = (struct rt6_info *)dst;
371 struct fib6_info *from;
372 struct inet6_dev *idev;
374 ip_dst_metrics_put(dst);
375 rt6_uncached_list_del(rt);
377 idev = rt->rt6i_idev;
379 rt->rt6i_idev = NULL;
383 from = xchg((__force struct fib6_info **)&rt->from, NULL);
384 fib6_info_release(from);
387 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
390 struct rt6_info *rt = (struct rt6_info *)dst;
391 struct inet6_dev *idev = rt->rt6i_idev;
392 struct net_device *loopback_dev =
393 dev_net(dev)->loopback_dev;
395 if (idev && idev->dev != loopback_dev) {
396 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
398 rt->rt6i_idev = loopback_idev;
404 static bool __rt6_check_expired(const struct rt6_info *rt)
406 if (rt->rt6i_flags & RTF_EXPIRES)
407 return time_after(jiffies, rt->dst.expires);
412 static bool rt6_check_expired(const struct rt6_info *rt)
414 struct fib6_info *from;
416 from = rcu_dereference(rt->from);
418 if (rt->rt6i_flags & RTF_EXPIRES) {
419 if (time_after(jiffies, rt->dst.expires))
422 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
423 fib6_check_expired(from);
428 void fib6_select_path(const struct net *net, struct fib6_result *res,
429 struct flowi6 *fl6, int oif, bool have_oif_match,
430 const struct sk_buff *skb, int strict)
432 struct fib6_info *sibling, *next_sibling;
433 struct fib6_info *match = res->f6i;
435 if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
438 if (match->nh && have_oif_match && res->nh)
441 /* We might have already computed the hash for ICMPv6 errors. In such
442 * case it will always be non-zero. Otherwise now is the time to do it.
445 (!match->nh || nexthop_is_multipath(match->nh)))
446 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
448 if (unlikely(match->nh)) {
449 nexthop_path_fib6_result(res, fl6->mp_hash);
453 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
456 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
458 const struct fib6_nh *nh = sibling->fib6_nh;
461 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
462 if (fl6->mp_hash > nh_upper_bound)
464 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
472 res->nh = match->fib6_nh;
476 * Route lookup. rcu_read_lock() should be held.
479 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
480 const struct in6_addr *saddr, int oif, int flags)
482 const struct net_device *dev;
484 if (nh->fib_nh_flags & RTNH_F_DEAD)
487 dev = nh->fib_nh_dev;
489 if (dev->ifindex == oif)
492 if (ipv6_chk_addr(net, saddr, dev,
493 flags & RT6_LOOKUP_F_IFACE))
500 struct fib6_nh_dm_arg {
502 const struct in6_addr *saddr;
508 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
510 struct fib6_nh_dm_arg *arg = _arg;
513 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
517 /* returns fib6_nh from nexthop or NULL */
518 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
519 struct fib6_result *res,
520 const struct in6_addr *saddr,
523 struct fib6_nh_dm_arg arg = {
530 if (nexthop_is_blackhole(nh))
533 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
539 static void rt6_device_match(struct net *net, struct fib6_result *res,
540 const struct in6_addr *saddr, int oif, int flags)
542 struct fib6_info *f6i = res->f6i;
543 struct fib6_info *spf6i;
546 if (!oif && ipv6_addr_any(saddr)) {
547 if (unlikely(f6i->nh)) {
548 nh = nexthop_fib6_nh(f6i->nh);
549 if (nexthop_is_blackhole(f6i->nh))
554 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
558 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
559 bool matched = false;
561 if (unlikely(spf6i->nh)) {
562 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
568 if (__rt6_device_match(net, nh, saddr, oif, flags))
577 if (oif && flags & RT6_LOOKUP_F_IFACE) {
578 res->f6i = net->ipv6.fib6_null_entry;
579 nh = res->f6i->fib6_nh;
583 if (unlikely(f6i->nh)) {
584 nh = nexthop_fib6_nh(f6i->nh);
585 if (nexthop_is_blackhole(f6i->nh))
591 if (nh->fib_nh_flags & RTNH_F_DEAD) {
592 res->f6i = net->ipv6.fib6_null_entry;
593 nh = res->f6i->fib6_nh;
597 res->fib6_type = res->f6i->fib6_type;
598 res->fib6_flags = res->f6i->fib6_flags;
602 res->fib6_flags |= RTF_REJECT;
603 res->fib6_type = RTN_BLACKHOLE;
607 #ifdef CONFIG_IPV6_ROUTER_PREF
608 struct __rt6_probe_work {
609 struct work_struct work;
610 struct in6_addr target;
611 struct net_device *dev;
614 static void rt6_probe_deferred(struct work_struct *w)
616 struct in6_addr mcaddr;
617 struct __rt6_probe_work *work =
618 container_of(w, struct __rt6_probe_work, work);
620 addrconf_addr_solict_mult(&work->target, &mcaddr);
621 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
626 static void rt6_probe(struct fib6_nh *fib6_nh)
628 struct __rt6_probe_work *work = NULL;
629 const struct in6_addr *nh_gw;
630 unsigned long last_probe;
631 struct neighbour *neigh;
632 struct net_device *dev;
633 struct inet6_dev *idev;
636 * Okay, this does not seem to be appropriate
637 * for now, however, we need to check if it
638 * is really so; aka Router Reachability Probing.
640 * Router Reachability Probe MUST be rate-limited
641 * to no more than one per minute.
643 if (!fib6_nh->fib_nh_gw_family)
646 nh_gw = &fib6_nh->fib_nh_gw6;
647 dev = fib6_nh->fib_nh_dev;
649 last_probe = READ_ONCE(fib6_nh->last_probe);
650 idev = __in6_dev_get(dev);
651 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
653 if (neigh->nud_state & NUD_VALID)
656 write_lock(&neigh->lock);
657 if (!(neigh->nud_state & NUD_VALID) &&
659 neigh->updated + idev->cnf.rtr_probe_interval)) {
660 work = kmalloc(sizeof(*work), GFP_ATOMIC);
662 __neigh_set_probe_once(neigh);
664 write_unlock(&neigh->lock);
665 } else if (time_after(jiffies, last_probe +
666 idev->cnf.rtr_probe_interval)) {
667 work = kmalloc(sizeof(*work), GFP_ATOMIC);
670 if (!work || cmpxchg(&fib6_nh->last_probe,
671 last_probe, jiffies) != last_probe) {
674 INIT_WORK(&work->work, rt6_probe_deferred);
675 work->target = *nh_gw;
678 schedule_work(&work->work);
682 rcu_read_unlock_bh();
685 static inline void rt6_probe(struct fib6_nh *fib6_nh)
691 * Default Router Selection (RFC 2461 6.3.6)
693 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
695 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
696 struct neighbour *neigh;
699 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
700 &fib6_nh->fib_nh_gw6);
702 read_lock(&neigh->lock);
703 if (neigh->nud_state & NUD_VALID)
704 ret = RT6_NUD_SUCCEED;
705 #ifdef CONFIG_IPV6_ROUTER_PREF
706 else if (!(neigh->nud_state & NUD_FAILED))
707 ret = RT6_NUD_SUCCEED;
709 ret = RT6_NUD_FAIL_PROBE;
711 read_unlock(&neigh->lock);
713 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
714 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
716 rcu_read_unlock_bh();
721 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
726 if (!oif || nh->fib_nh_dev->ifindex == oif)
729 if (!m && (strict & RT6_LOOKUP_F_IFACE))
730 return RT6_NUD_FAIL_HARD;
731 #ifdef CONFIG_IPV6_ROUTER_PREF
732 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
734 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
735 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
736 int n = rt6_check_neigh(nh);
743 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
744 int oif, int strict, int *mpri, bool *do_rr)
746 bool match_do_rr = false;
750 if (nh->fib_nh_flags & RTNH_F_DEAD)
753 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
754 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
755 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
758 m = rt6_score_route(nh, fib6_flags, oif, strict);
759 if (m == RT6_NUD_FAIL_DO_RR) {
761 m = 0; /* lowest valid score */
762 } else if (m == RT6_NUD_FAIL_HARD) {
766 if (strict & RT6_LOOKUP_F_REACHABLE)
769 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
771 *do_rr = match_do_rr;
779 struct fib6_nh_frl_arg {
788 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
790 struct fib6_nh_frl_arg *arg = _arg;
793 return find_match(nh, arg->flags, arg->oif, arg->strict,
794 arg->mpri, arg->do_rr);
797 static void __find_rr_leaf(struct fib6_info *f6i_start,
798 struct fib6_info *nomatch, u32 metric,
799 struct fib6_result *res, struct fib6_info **cont,
800 int oif, int strict, bool *do_rr, int *mpri)
802 struct fib6_info *f6i;
804 for (f6i = f6i_start;
805 f6i && f6i != nomatch;
806 f6i = rcu_dereference(f6i->fib6_next)) {
807 bool matched = false;
810 if (cont && f6i->fib6_metric != metric) {
815 if (fib6_check_expired(f6i))
818 if (unlikely(f6i->nh)) {
819 struct fib6_nh_frl_arg arg = {
820 .flags = f6i->fib6_flags,
827 if (nexthop_is_blackhole(f6i->nh)) {
828 res->fib6_flags = RTF_REJECT;
829 res->fib6_type = RTN_BLACKHOLE;
831 res->nh = nexthop_fib6_nh(f6i->nh);
834 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
841 if (find_match(nh, f6i->fib6_flags, oif, strict,
848 res->fib6_flags = f6i->fib6_flags;
849 res->fib6_type = f6i->fib6_type;
854 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
855 struct fib6_info *rr_head, int oif, int strict,
856 bool *do_rr, struct fib6_result *res)
858 u32 metric = rr_head->fib6_metric;
859 struct fib6_info *cont = NULL;
862 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
863 oif, strict, do_rr, &mpri);
865 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
866 oif, strict, do_rr, &mpri);
868 if (res->f6i || !cont)
871 __find_rr_leaf(cont, NULL, metric, res, NULL,
872 oif, strict, do_rr, &mpri);
875 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
876 struct fib6_result *res, int strict)
878 struct fib6_info *leaf = rcu_dereference(fn->leaf);
879 struct fib6_info *rt0;
883 /* make sure this function or its helpers sets f6i */
886 if (!leaf || leaf == net->ipv6.fib6_null_entry)
889 rt0 = rcu_dereference(fn->rr_ptr);
893 /* Double check to make sure fn is not an intermediate node
894 * and fn->leaf does not points to its child's leaf
895 * (This might happen if all routes under fn are deleted from
896 * the tree and fib6_repair_tree() is called on the node.)
898 key_plen = rt0->fib6_dst.plen;
899 #ifdef CONFIG_IPV6_SUBTREES
900 if (rt0->fib6_src.plen)
901 key_plen = rt0->fib6_src.plen;
903 if (fn->fn_bit != key_plen)
906 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
908 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
910 /* no entries matched; do round-robin */
911 if (!next || next->fib6_metric != rt0->fib6_metric)
915 spin_lock_bh(&leaf->fib6_table->tb6_lock);
916 /* make sure next is not being deleted from the tree */
918 rcu_assign_pointer(fn->rr_ptr, next);
919 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
925 res->f6i = net->ipv6.fib6_null_entry;
926 res->nh = res->f6i->fib6_nh;
927 res->fib6_flags = res->f6i->fib6_flags;
928 res->fib6_type = res->f6i->fib6_type;
932 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
934 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
935 res->nh->fib_nh_gw_family;
938 #ifdef CONFIG_IPV6_ROUTE_INFO
939 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
940 const struct in6_addr *gwaddr)
942 struct net *net = dev_net(dev);
943 struct route_info *rinfo = (struct route_info *) opt;
944 struct in6_addr prefix_buf, *prefix;
946 unsigned long lifetime;
947 struct fib6_info *rt;
949 if (len < sizeof(struct route_info)) {
953 /* Sanity check for prefix_len and length */
954 if (rinfo->length > 3) {
956 } else if (rinfo->prefix_len > 128) {
958 } else if (rinfo->prefix_len > 64) {
959 if (rinfo->length < 2) {
962 } else if (rinfo->prefix_len > 0) {
963 if (rinfo->length < 1) {
968 pref = rinfo->route_pref;
969 if (pref == ICMPV6_ROUTER_PREF_INVALID)
972 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
974 if (rinfo->length == 3)
975 prefix = (struct in6_addr *)rinfo->prefix;
977 /* this function is safe */
978 ipv6_addr_prefix(&prefix_buf,
979 (struct in6_addr *)rinfo->prefix,
981 prefix = &prefix_buf;
984 if (rinfo->prefix_len == 0)
985 rt = rt6_get_dflt_router(net, gwaddr, dev);
987 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
990 if (rt && !lifetime) {
996 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
999 rt->fib6_flags = RTF_ROUTEINFO |
1000 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
1003 if (!addrconf_finite_timeout(lifetime))
1004 fib6_clean_expires(rt);
1006 fib6_set_expires(rt, jiffies + HZ * lifetime);
1008 fib6_info_release(rt);
1015 * Misc support functions
1018 /* called with rcu_lock held */
1019 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1021 struct net_device *dev = res->nh->fib_nh_dev;
1023 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1024 /* for copies of local routes, dst->dev needs to be the
1025 * device if it is a master device, the master device if
1026 * device is enslaved, and the loopback as the default
1028 if (netif_is_l3_slave(dev) &&
1029 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1030 dev = l3mdev_master_dev_rcu(dev);
1031 else if (!netif_is_l3_master(dev))
1032 dev = dev_net(dev)->loopback_dev;
1033 /* last case is netif_is_l3_master(dev) is true in which
1034 * case we want dev returned to be dev
1041 static const int fib6_prop[RTN_MAX + 1] = {
1045 [RTN_BROADCAST] = 0,
1047 [RTN_MULTICAST] = 0,
1048 [RTN_BLACKHOLE] = -EINVAL,
1049 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1050 [RTN_PROHIBIT] = -EACCES,
1051 [RTN_THROW] = -EAGAIN,
1052 [RTN_NAT] = -EINVAL,
1053 [RTN_XRESOLVE] = -EINVAL,
1056 static int ip6_rt_type_to_error(u8 fib6_type)
1058 return fib6_prop[fib6_type];
1061 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1063 unsigned short flags = 0;
1065 if (rt->dst_nocount)
1066 flags |= DST_NOCOUNT;
1067 if (rt->dst_nopolicy)
1068 flags |= DST_NOPOLICY;
1075 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1077 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1079 switch (fib6_type) {
1081 rt->dst.output = dst_discard_out;
1082 rt->dst.input = dst_discard;
1085 rt->dst.output = ip6_pkt_prohibit_out;
1086 rt->dst.input = ip6_pkt_prohibit;
1089 case RTN_UNREACHABLE:
1091 rt->dst.output = ip6_pkt_discard_out;
1092 rt->dst.input = ip6_pkt_discard;
1097 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1099 struct fib6_info *f6i = res->f6i;
1101 if (res->fib6_flags & RTF_REJECT) {
1102 ip6_rt_init_dst_reject(rt, res->fib6_type);
1107 rt->dst.output = ip6_output;
1109 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1110 rt->dst.input = ip6_input;
1111 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1112 rt->dst.input = ip6_mc_input;
1114 rt->dst.input = ip6_forward;
1117 if (res->nh->fib_nh_lws) {
1118 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1119 lwtunnel_set_redirect(&rt->dst);
1122 rt->dst.lastuse = jiffies;
1125 /* Caller must already hold reference to @from */
1126 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1128 rt->rt6i_flags &= ~RTF_EXPIRES;
1129 rcu_assign_pointer(rt->from, from);
1130 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1133 /* Caller must already hold reference to f6i in result */
1134 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1136 const struct fib6_nh *nh = res->nh;
1137 const struct net_device *dev = nh->fib_nh_dev;
1138 struct fib6_info *f6i = res->f6i;
1140 ip6_rt_init_dst(rt, res);
1142 rt->rt6i_dst = f6i->fib6_dst;
1143 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1144 rt->rt6i_flags = res->fib6_flags;
1145 if (nh->fib_nh_gw_family) {
1146 rt->rt6i_gateway = nh->fib_nh_gw6;
1147 rt->rt6i_flags |= RTF_GATEWAY;
1149 rt6_set_from(rt, f6i);
1150 #ifdef CONFIG_IPV6_SUBTREES
1151 rt->rt6i_src = f6i->fib6_src;
1155 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1156 struct in6_addr *saddr)
1158 struct fib6_node *pn, *sn;
1160 if (fn->fn_flags & RTN_TL_ROOT)
1162 pn = rcu_dereference(fn->parent);
1163 sn = FIB6_SUBTREE(pn);
1165 fn = fib6_node_lookup(sn, NULL, saddr);
1168 if (fn->fn_flags & RTN_RTINFO)
1173 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1175 struct rt6_info *rt = *prt;
1177 if (dst_hold_safe(&rt->dst))
1180 rt = net->ipv6.ip6_null_entry;
1189 /* called with rcu_lock held */
1190 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1192 struct net_device *dev = res->nh->fib_nh_dev;
1193 struct fib6_info *f6i = res->f6i;
1194 unsigned short flags;
1195 struct rt6_info *nrt;
1197 if (!fib6_info_hold_safe(f6i))
1200 flags = fib6_info_dst_flags(f6i);
1201 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1203 fib6_info_release(f6i);
1207 ip6_rt_copy_init(nrt, res);
1211 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1212 dst_hold(&nrt->dst);
1216 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1217 struct fib6_table *table,
1219 const struct sk_buff *skb,
1222 struct fib6_result res = {};
1223 struct fib6_node *fn;
1224 struct rt6_info *rt;
1226 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1227 flags &= ~RT6_LOOKUP_F_IFACE;
1230 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1232 res.f6i = rcu_dereference(fn->leaf);
1234 res.f6i = net->ipv6.fib6_null_entry;
1236 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1239 if (res.f6i == net->ipv6.fib6_null_entry) {
1240 fn = fib6_backtrack(fn, &fl6->saddr);
1244 rt = net->ipv6.ip6_null_entry;
1247 } else if (res.fib6_flags & RTF_REJECT) {
1251 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1252 fl6->flowi6_oif != 0, skb, flags);
1254 /* Search through exception table */
1255 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1257 if (ip6_hold_safe(net, &rt))
1258 dst_use_noref(&rt->dst, jiffies);
1261 rt = ip6_create_rt_rcu(&res);
1265 trace_fib6_table_lookup(net, &res, table, fl6);
1272 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1273 const struct sk_buff *skb, int flags)
1275 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1277 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1279 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1280 const struct in6_addr *saddr, int oif,
1281 const struct sk_buff *skb, int strict)
1283 struct flowi6 fl6 = {
1287 struct dst_entry *dst;
1288 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1291 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1292 flags |= RT6_LOOKUP_F_HAS_SADDR;
1295 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1296 if (dst->error == 0)
1297 return (struct rt6_info *) dst;
1303 EXPORT_SYMBOL(rt6_lookup);
1305 /* ip6_ins_rt is called with FREE table->tb6_lock.
1306 * It takes new route entry, the addition fails by any reason the
1307 * route is released.
1308 * Caller must hold dst before calling it.
1311 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1312 struct netlink_ext_ack *extack)
1315 struct fib6_table *table;
1317 table = rt->fib6_table;
1318 spin_lock_bh(&table->tb6_lock);
1319 err = fib6_add(&table->tb6_root, rt, info, extack);
1320 spin_unlock_bh(&table->tb6_lock);
1325 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1327 struct nl_info info = { .nl_net = net, };
1329 return __ip6_ins_rt(rt, &info, NULL);
1332 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1333 const struct in6_addr *daddr,
1334 const struct in6_addr *saddr)
1336 struct fib6_info *f6i = res->f6i;
1337 struct net_device *dev;
1338 struct rt6_info *rt;
1344 if (!fib6_info_hold_safe(f6i))
1347 dev = ip6_rt_get_dev_rcu(res);
1348 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1350 fib6_info_release(f6i);
1354 ip6_rt_copy_init(rt, res);
1355 rt->rt6i_flags |= RTF_CACHE;
1356 rt->dst.flags |= DST_HOST;
1357 rt->rt6i_dst.addr = *daddr;
1358 rt->rt6i_dst.plen = 128;
1360 if (!rt6_is_gw_or_nonexthop(res)) {
1361 if (f6i->fib6_dst.plen != 128 &&
1362 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1363 rt->rt6i_flags |= RTF_ANYCAST;
1364 #ifdef CONFIG_IPV6_SUBTREES
1365 if (rt->rt6i_src.plen && saddr) {
1366 rt->rt6i_src.addr = *saddr;
1367 rt->rt6i_src.plen = 128;
1375 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1377 struct fib6_info *f6i = res->f6i;
1378 unsigned short flags = fib6_info_dst_flags(f6i);
1379 struct net_device *dev;
1380 struct rt6_info *pcpu_rt;
1382 if (!fib6_info_hold_safe(f6i))
1386 dev = ip6_rt_get_dev_rcu(res);
1387 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1390 fib6_info_release(f6i);
1393 ip6_rt_copy_init(pcpu_rt, res);
1394 pcpu_rt->rt6i_flags |= RTF_PCPU;
1397 pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1402 static bool rt6_is_valid(const struct rt6_info *rt6)
1404 return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1407 /* It should be called with rcu_read_lock() acquired */
1408 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1410 struct rt6_info *pcpu_rt;
1412 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1414 if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1415 struct rt6_info *prev, **p;
1417 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1418 prev = xchg(p, NULL);
1420 dst_dev_put(&prev->dst);
1421 dst_release(&prev->dst);
1430 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1431 const struct fib6_result *res)
1433 struct rt6_info *pcpu_rt, *prev, **p;
1435 pcpu_rt = ip6_rt_pcpu_alloc(res);
1439 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1440 prev = cmpxchg(p, NULL, pcpu_rt);
1443 if (res->f6i->fib6_destroying) {
1444 struct fib6_info *from;
1446 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1447 fib6_info_release(from);
1453 /* exception hash table implementation
1455 static DEFINE_SPINLOCK(rt6_exception_lock);
1457 /* Remove rt6_ex from hash table and free the memory
1458 * Caller must hold rt6_exception_lock
1460 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1461 struct rt6_exception *rt6_ex)
1463 struct fib6_info *from;
1466 if (!bucket || !rt6_ex)
1469 net = dev_net(rt6_ex->rt6i->dst.dev);
1470 net->ipv6.rt6_stats->fib_rt_cache--;
1472 /* purge completely the exception to allow releasing the held resources:
1473 * some [sk] cache may keep the dst around for unlimited time
1475 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1476 fib6_info_release(from);
1477 dst_dev_put(&rt6_ex->rt6i->dst);
1479 hlist_del_rcu(&rt6_ex->hlist);
1480 dst_release(&rt6_ex->rt6i->dst);
1481 kfree_rcu(rt6_ex, rcu);
1482 WARN_ON_ONCE(!bucket->depth);
1486 /* Remove oldest rt6_ex in bucket and free the memory
1487 * Caller must hold rt6_exception_lock
1489 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1491 struct rt6_exception *rt6_ex, *oldest = NULL;
1496 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1497 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1500 rt6_remove_exception(bucket, oldest);
1503 static u32 rt6_exception_hash(const struct in6_addr *dst,
1504 const struct in6_addr *src)
1506 static siphash_key_t rt6_exception_key __read_mostly;
1508 struct in6_addr dst;
1509 struct in6_addr src;
1510 } __aligned(SIPHASH_ALIGNMENT) combined = {
1515 net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
1517 #ifdef CONFIG_IPV6_SUBTREES
1519 combined.src = *src;
1521 val = siphash(&combined, sizeof(combined), &rt6_exception_key);
1523 return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1526 /* Helper function to find the cached rt in the hash table
1527 * and update bucket pointer to point to the bucket for this
1528 * (daddr, saddr) pair
1529 * Caller must hold rt6_exception_lock
1531 static struct rt6_exception *
1532 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1533 const struct in6_addr *daddr,
1534 const struct in6_addr *saddr)
1536 struct rt6_exception *rt6_ex;
1539 if (!(*bucket) || !daddr)
1542 hval = rt6_exception_hash(daddr, saddr);
1545 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1546 struct rt6_info *rt6 = rt6_ex->rt6i;
1547 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1549 #ifdef CONFIG_IPV6_SUBTREES
1550 if (matched && saddr)
1551 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1559 /* Helper function to find the cached rt in the hash table
1560 * and update bucket pointer to point to the bucket for this
1561 * (daddr, saddr) pair
1562 * Caller must hold rcu_read_lock()
1564 static struct rt6_exception *
1565 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1566 const struct in6_addr *daddr,
1567 const struct in6_addr *saddr)
1569 struct rt6_exception *rt6_ex;
1572 WARN_ON_ONCE(!rcu_read_lock_held());
1574 if (!(*bucket) || !daddr)
1577 hval = rt6_exception_hash(daddr, saddr);
1580 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1581 struct rt6_info *rt6 = rt6_ex->rt6i;
1582 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1584 #ifdef CONFIG_IPV6_SUBTREES
1585 if (matched && saddr)
1586 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1594 static unsigned int fib6_mtu(const struct fib6_result *res)
1596 const struct fib6_nh *nh = res->nh;
1599 if (res->f6i->fib6_pmtu) {
1600 mtu = res->f6i->fib6_pmtu;
1602 struct net_device *dev = nh->fib_nh_dev;
1603 struct inet6_dev *idev;
1606 idev = __in6_dev_get(dev);
1607 mtu = idev->cnf.mtu6;
1611 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1613 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1616 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1618 /* used when the flushed bit is not relevant, only access to the bucket
1619 * (ie., all bucket users except rt6_insert_exception);
1621 * called under rcu lock; sometimes called with rt6_exception_lock held
1624 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1627 struct rt6_exception_bucket *bucket;
1630 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1631 lockdep_is_held(lock));
1633 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1635 /* remove bucket flushed bit if set */
1637 unsigned long p = (unsigned long)bucket;
1639 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1640 bucket = (struct rt6_exception_bucket *)p;
1646 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1648 unsigned long p = (unsigned long)bucket;
1650 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1653 /* called with rt6_exception_lock held */
1654 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1657 struct rt6_exception_bucket *bucket;
1660 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1661 lockdep_is_held(lock));
1663 p = (unsigned long)bucket;
1664 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1665 bucket = (struct rt6_exception_bucket *)p;
1666 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1669 static int rt6_insert_exception(struct rt6_info *nrt,
1670 const struct fib6_result *res)
1672 struct net *net = dev_net(nrt->dst.dev);
1673 struct rt6_exception_bucket *bucket;
1674 struct fib6_info *f6i = res->f6i;
1675 struct in6_addr *src_key = NULL;
1676 struct rt6_exception *rt6_ex;
1677 struct fib6_nh *nh = res->nh;
1681 spin_lock_bh(&rt6_exception_lock);
1683 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1684 lockdep_is_held(&rt6_exception_lock));
1686 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1692 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1693 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1698 #ifdef CONFIG_IPV6_SUBTREES
1699 /* fib6_src.plen != 0 indicates f6i is in subtree
1700 * and exception table is indexed by a hash of
1701 * both fib6_dst and fib6_src.
1702 * Otherwise, the exception table is indexed by
1703 * a hash of only fib6_dst.
1705 if (f6i->fib6_src.plen)
1706 src_key = &nrt->rt6i_src.addr;
1708 /* rt6_mtu_change() might lower mtu on f6i.
1709 * Only insert this exception route if its mtu
1710 * is less than f6i's mtu value.
1712 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1717 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1720 rt6_remove_exception(bucket, rt6_ex);
1722 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1728 rt6_ex->stamp = jiffies;
1729 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1731 net->ipv6.rt6_stats->fib_rt_cache++;
1733 /* Randomize max depth to avoid some side channels attacks. */
1734 max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
1735 while (bucket->depth > max_depth)
1736 rt6_exception_remove_oldest(bucket);
1739 spin_unlock_bh(&rt6_exception_lock);
1741 /* Update fn->fn_sernum to invalidate all cached dst */
1743 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1744 fib6_update_sernum(net, f6i);
1745 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1746 fib6_force_start_gc(net);
1752 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1754 struct rt6_exception_bucket *bucket;
1755 struct rt6_exception *rt6_ex;
1756 struct hlist_node *tmp;
1759 spin_lock_bh(&rt6_exception_lock);
1761 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1765 /* Prevent rt6_insert_exception() to recreate the bucket list */
1767 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1769 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1770 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1772 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1773 rt6_remove_exception(bucket, rt6_ex);
1775 WARN_ON_ONCE(!from && bucket->depth);
1779 spin_unlock_bh(&rt6_exception_lock);
1782 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1784 struct fib6_info *f6i = arg;
1786 fib6_nh_flush_exceptions(nh, f6i);
1791 void rt6_flush_exceptions(struct fib6_info *f6i)
1794 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1797 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1800 /* Find cached rt in the hash table inside passed in rt
1801 * Caller has to hold rcu_read_lock()
1803 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1804 const struct in6_addr *daddr,
1805 const struct in6_addr *saddr)
1807 const struct in6_addr *src_key = NULL;
1808 struct rt6_exception_bucket *bucket;
1809 struct rt6_exception *rt6_ex;
1810 struct rt6_info *ret = NULL;
1812 #ifdef CONFIG_IPV6_SUBTREES
1813 /* fib6i_src.plen != 0 indicates f6i is in subtree
1814 * and exception table is indexed by a hash of
1815 * both fib6_dst and fib6_src.
1816 * However, the src addr used to create the hash
1817 * might not be exactly the passed in saddr which
1818 * is a /128 addr from the flow.
1819 * So we need to use f6i->fib6_src to redo lookup
1820 * if the passed in saddr does not find anything.
1821 * (See the logic in ip6_rt_cache_alloc() on how
1822 * rt->rt6i_src is updated.)
1824 if (res->f6i->fib6_src.plen)
1828 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1829 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1831 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1834 #ifdef CONFIG_IPV6_SUBTREES
1835 /* Use fib6_src as src_key and redo lookup */
1836 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1837 src_key = &res->f6i->fib6_src.addr;
1845 /* Remove the passed in cached rt from the hash table that contains it */
1846 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1847 const struct rt6_info *rt)
1849 const struct in6_addr *src_key = NULL;
1850 struct rt6_exception_bucket *bucket;
1851 struct rt6_exception *rt6_ex;
1854 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1857 spin_lock_bh(&rt6_exception_lock);
1858 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1860 #ifdef CONFIG_IPV6_SUBTREES
1861 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1862 * and exception table is indexed by a hash of
1863 * both rt6i_dst and rt6i_src.
1864 * Otherwise, the exception table is indexed by
1865 * a hash of only rt6i_dst.
1868 src_key = &rt->rt6i_src.addr;
1870 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1874 rt6_remove_exception(bucket, rt6_ex);
1880 spin_unlock_bh(&rt6_exception_lock);
1884 struct fib6_nh_excptn_arg {
1885 struct rt6_info *rt;
1889 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1891 struct fib6_nh_excptn_arg *arg = _arg;
1894 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1901 static int rt6_remove_exception_rt(struct rt6_info *rt)
1903 struct fib6_info *from;
1905 from = rcu_dereference(rt->from);
1906 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1910 struct fib6_nh_excptn_arg arg = {
1912 .plen = from->fib6_src.plen
1916 /* rc = 1 means an entry was found */
1917 rc = nexthop_for_each_fib6_nh(from->nh,
1918 rt6_nh_remove_exception_rt,
1920 return rc ? 0 : -ENOENT;
1923 return fib6_nh_remove_exception(from->fib6_nh,
1924 from->fib6_src.plen, rt);
1927 /* Find rt6_ex which contains the passed in rt cache and
1930 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1931 const struct rt6_info *rt)
1933 const struct in6_addr *src_key = NULL;
1934 struct rt6_exception_bucket *bucket;
1935 struct rt6_exception *rt6_ex;
1937 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1938 #ifdef CONFIG_IPV6_SUBTREES
1939 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1940 * and exception table is indexed by a hash of
1941 * both rt6i_dst and rt6i_src.
1942 * Otherwise, the exception table is indexed by
1943 * a hash of only rt6i_dst.
1946 src_key = &rt->rt6i_src.addr;
1948 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1950 rt6_ex->stamp = jiffies;
1953 struct fib6_nh_match_arg {
1954 const struct net_device *dev;
1955 const struct in6_addr *gw;
1956 struct fib6_nh *match;
1959 /* determine if fib6_nh has given device and gateway */
1960 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1962 struct fib6_nh_match_arg *arg = _arg;
1964 if (arg->dev != nh->fib_nh_dev ||
1965 (arg->gw && !nh->fib_nh_gw_family) ||
1966 (!arg->gw && nh->fib_nh_gw_family) ||
1967 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1972 /* found a match, break the loop */
1976 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1978 struct fib6_info *from;
1979 struct fib6_nh *fib6_nh;
1983 from = rcu_dereference(rt->from);
1984 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1988 struct fib6_nh_match_arg arg = {
1990 .gw = &rt->rt6i_gateway,
1993 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1997 fib6_nh = arg.match;
1999 fib6_nh = from->fib6_nh;
2001 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
2006 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
2007 struct rt6_info *rt, int mtu)
2009 /* If the new MTU is lower than the route PMTU, this new MTU will be the
2010 * lowest MTU in the path: always allow updating the route PMTU to
2011 * reflect PMTU decreases.
2013 * If the new MTU is higher, and the route PMTU is equal to the local
2014 * MTU, this means the old MTU is the lowest in the path, so allow
2015 * updating it: if other nodes now have lower MTUs, PMTU discovery will
2019 if (dst_mtu(&rt->dst) >= mtu)
2022 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
2028 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2029 const struct fib6_nh *nh, int mtu)
2031 struct rt6_exception_bucket *bucket;
2032 struct rt6_exception *rt6_ex;
2035 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2039 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2040 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2041 struct rt6_info *entry = rt6_ex->rt6i;
2043 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2044 * route), the metrics of its rt->from have already
2047 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2048 rt6_mtu_change_route_allowed(idev, entry, mtu))
2049 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2055 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2057 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2058 const struct in6_addr *gateway)
2060 struct rt6_exception_bucket *bucket;
2061 struct rt6_exception *rt6_ex;
2062 struct hlist_node *tmp;
2065 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2068 spin_lock_bh(&rt6_exception_lock);
2069 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2071 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2072 hlist_for_each_entry_safe(rt6_ex, tmp,
2073 &bucket->chain, hlist) {
2074 struct rt6_info *entry = rt6_ex->rt6i;
2076 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2077 RTF_CACHE_GATEWAY &&
2078 ipv6_addr_equal(gateway,
2079 &entry->rt6i_gateway)) {
2080 rt6_remove_exception(bucket, rt6_ex);
2087 spin_unlock_bh(&rt6_exception_lock);
2090 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2091 struct rt6_exception *rt6_ex,
2092 struct fib6_gc_args *gc_args,
2095 struct rt6_info *rt = rt6_ex->rt6i;
2097 /* we are pruning and obsoleting aged-out and non gateway exceptions
2098 * even if others have still references to them, so that on next
2099 * dst_check() such references can be dropped.
2100 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2101 * expired, independently from their aging, as per RFC 8201 section 4
2103 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2104 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2105 RT6_TRACE("aging clone %p\n", rt);
2106 rt6_remove_exception(bucket, rt6_ex);
2109 } else if (time_after(jiffies, rt->dst.expires)) {
2110 RT6_TRACE("purging expired route %p\n", rt);
2111 rt6_remove_exception(bucket, rt6_ex);
2115 if (rt->rt6i_flags & RTF_GATEWAY) {
2116 struct neighbour *neigh;
2117 __u8 neigh_flags = 0;
2119 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2121 neigh_flags = neigh->flags;
2123 if (!(neigh_flags & NTF_ROUTER)) {
2124 RT6_TRACE("purging route %p via non-router but gateway\n",
2126 rt6_remove_exception(bucket, rt6_ex);
2134 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2135 struct fib6_gc_args *gc_args,
2138 struct rt6_exception_bucket *bucket;
2139 struct rt6_exception *rt6_ex;
2140 struct hlist_node *tmp;
2143 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2147 spin_lock(&rt6_exception_lock);
2148 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2150 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2151 hlist_for_each_entry_safe(rt6_ex, tmp,
2152 &bucket->chain, hlist) {
2153 rt6_age_examine_exception(bucket, rt6_ex,
2159 spin_unlock(&rt6_exception_lock);
2160 rcu_read_unlock_bh();
2163 struct fib6_nh_age_excptn_arg {
2164 struct fib6_gc_args *gc_args;
2168 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2170 struct fib6_nh_age_excptn_arg *arg = _arg;
2172 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2176 void rt6_age_exceptions(struct fib6_info *f6i,
2177 struct fib6_gc_args *gc_args,
2181 struct fib6_nh_age_excptn_arg arg = {
2186 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2189 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2193 /* must be called with rcu lock held */
2194 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2195 struct flowi6 *fl6, struct fib6_result *res, int strict)
2197 struct fib6_node *fn, *saved_fn;
2199 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2202 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2206 rt6_select(net, fn, oif, res, strict);
2207 if (res->f6i == net->ipv6.fib6_null_entry) {
2208 fn = fib6_backtrack(fn, &fl6->saddr);
2210 goto redo_rt6_select;
2211 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2212 /* also consider unreachable route */
2213 strict &= ~RT6_LOOKUP_F_REACHABLE;
2215 goto redo_rt6_select;
2219 trace_fib6_table_lookup(net, res, table, fl6);
2224 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2225 int oif, struct flowi6 *fl6,
2226 const struct sk_buff *skb, int flags)
2228 struct fib6_result res = {};
2229 struct rt6_info *rt = NULL;
2232 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2233 !rcu_read_lock_held());
2235 strict |= flags & RT6_LOOKUP_F_IFACE;
2236 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2237 if (net->ipv6.devconf_all->forwarding == 0)
2238 strict |= RT6_LOOKUP_F_REACHABLE;
2242 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2243 if (res.f6i == net->ipv6.fib6_null_entry)
2246 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2248 /*Search through exception table */
2249 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2252 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2253 !res.nh->fib_nh_gw_family)) {
2254 /* Create a RTF_CACHE clone which will not be
2255 * owned by the fib6 tree. It is for the special case where
2256 * the daddr in the skb during the neighbor look-up is different
2257 * from the fl6->daddr used to look-up route here.
2259 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2262 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2263 * As rt6_uncached_list_add() does not consume refcnt,
2264 * this refcnt is always returned to the caller even
2265 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2267 rt6_uncached_list_add(rt);
2268 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2274 /* Get a percpu copy */
2276 rt = rt6_get_pcpu_route(&res);
2279 rt = rt6_make_pcpu_route(net, &res);
2285 rt = net->ipv6.ip6_null_entry;
2286 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2287 ip6_hold_safe(net, &rt);
2292 EXPORT_SYMBOL_GPL(ip6_pol_route);
2294 static struct rt6_info *ip6_pol_route_input(struct net *net,
2295 struct fib6_table *table,
2297 const struct sk_buff *skb,
2300 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2303 struct dst_entry *ip6_route_input_lookup(struct net *net,
2304 struct net_device *dev,
2306 const struct sk_buff *skb,
2309 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2310 flags |= RT6_LOOKUP_F_IFACE;
2312 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2314 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2316 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2317 struct flow_keys *keys,
2318 struct flow_keys *flkeys)
2320 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2321 const struct ipv6hdr *key_iph = outer_iph;
2322 struct flow_keys *_flkeys = flkeys;
2323 const struct ipv6hdr *inner_iph;
2324 const struct icmp6hdr *icmph;
2325 struct ipv6hdr _inner_iph;
2326 struct icmp6hdr _icmph;
2328 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2331 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2332 sizeof(_icmph), &_icmph);
2336 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
2337 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
2338 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
2339 icmph->icmp6_type != ICMPV6_PARAMPROB)
2342 inner_iph = skb_header_pointer(skb,
2343 skb_transport_offset(skb) + sizeof(*icmph),
2344 sizeof(_inner_iph), &_inner_iph);
2348 key_iph = inner_iph;
2352 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2353 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2354 keys->tags.flow_label = _flkeys->tags.flow_label;
2355 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2357 keys->addrs.v6addrs.src = key_iph->saddr;
2358 keys->addrs.v6addrs.dst = key_iph->daddr;
2359 keys->tags.flow_label = ip6_flowlabel(key_iph);
2360 keys->basic.ip_proto = key_iph->nexthdr;
2364 /* if skb is set it will be used and fl6 can be NULL */
2365 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2366 const struct sk_buff *skb, struct flow_keys *flkeys)
2368 struct flow_keys hash_keys;
2371 switch (ip6_multipath_hash_policy(net)) {
2373 memset(&hash_keys, 0, sizeof(hash_keys));
2374 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2376 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2378 hash_keys.addrs.v6addrs.src = fl6->saddr;
2379 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2380 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2381 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2386 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2387 struct flow_keys keys;
2389 /* short-circuit if we already have L4 hash present */
2391 return skb_get_hash_raw(skb) >> 1;
2393 memset(&hash_keys, 0, sizeof(hash_keys));
2396 skb_flow_dissect_flow_keys(skb, &keys, flag);
2399 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2400 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2401 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2402 hash_keys.ports.src = flkeys->ports.src;
2403 hash_keys.ports.dst = flkeys->ports.dst;
2404 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2406 memset(&hash_keys, 0, sizeof(hash_keys));
2407 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2408 hash_keys.addrs.v6addrs.src = fl6->saddr;
2409 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2410 hash_keys.ports.src = fl6->fl6_sport;
2411 hash_keys.ports.dst = fl6->fl6_dport;
2412 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2416 memset(&hash_keys, 0, sizeof(hash_keys));
2417 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2419 struct flow_keys keys;
2422 skb_flow_dissect_flow_keys(skb, &keys, 0);
2426 /* Inner can be v4 or v6 */
2427 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2428 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2429 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2430 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2431 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2432 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2433 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2434 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2435 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2436 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2438 /* Same as case 0 */
2439 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2440 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2443 /* Same as case 0 */
2444 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2445 hash_keys.addrs.v6addrs.src = fl6->saddr;
2446 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2447 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2448 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2452 mhash = flow_hash_from_keys(&hash_keys);
2457 /* Called with rcu held */
2458 void ip6_route_input(struct sk_buff *skb)
2460 const struct ipv6hdr *iph = ipv6_hdr(skb);
2461 struct net *net = dev_net(skb->dev);
2462 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2463 struct ip_tunnel_info *tun_info;
2464 struct flowi6 fl6 = {
2465 .flowi6_iif = skb->dev->ifindex,
2466 .daddr = iph->daddr,
2467 .saddr = iph->saddr,
2468 .flowlabel = ip6_flowinfo(iph),
2469 .flowi6_mark = skb->mark,
2470 .flowi6_proto = iph->nexthdr,
2472 struct flow_keys *flkeys = NULL, _flkeys;
2474 tun_info = skb_tunnel_info(skb);
2475 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2476 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2478 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2481 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2482 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2484 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2488 static struct rt6_info *ip6_pol_route_output(struct net *net,
2489 struct fib6_table *table,
2491 const struct sk_buff *skb,
2494 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2497 struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2498 const struct sock *sk,
2499 struct flowi6 *fl6, int flags)
2503 if (ipv6_addr_type(&fl6->daddr) &
2504 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2505 struct dst_entry *dst;
2507 /* This function does not take refcnt on the dst */
2508 dst = l3mdev_link_scope_lookup(net, fl6);
2513 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2515 flags |= RT6_LOOKUP_F_DST_NOREF;
2516 any_src = ipv6_addr_any(&fl6->saddr);
2517 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2518 (fl6->flowi6_oif && any_src))
2519 flags |= RT6_LOOKUP_F_IFACE;
2522 flags |= RT6_LOOKUP_F_HAS_SADDR;
2524 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2526 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2528 EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2530 struct dst_entry *ip6_route_output_flags(struct net *net,
2531 const struct sock *sk,
2535 struct dst_entry *dst;
2536 struct rt6_info *rt6;
2539 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2540 rt6 = (struct rt6_info *)dst;
2541 /* For dst cached in uncached_list, refcnt is already taken. */
2542 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2543 dst = &net->ipv6.ip6_null_entry->dst;
2550 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2552 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2554 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2555 struct net_device *loopback_dev = net->loopback_dev;
2556 struct dst_entry *new = NULL;
2558 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2559 DST_OBSOLETE_DEAD, 0);
2562 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2566 new->input = dst_discard;
2567 new->output = dst_discard_out;
2569 dst_copy_metrics(new, &ort->dst);
2571 rt->rt6i_idev = in6_dev_get(loopback_dev);
2572 rt->rt6i_gateway = ort->rt6i_gateway;
2573 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2575 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2576 #ifdef CONFIG_IPV6_SUBTREES
2577 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2581 dst_release(dst_orig);
2582 return new ? new : ERR_PTR(-ENOMEM);
2586 * Destination cache support functions
2589 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2593 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2596 if (fib6_check_expired(f6i))
2602 static struct dst_entry *rt6_check(struct rt6_info *rt,
2603 struct fib6_info *from,
2608 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2609 rt_cookie != cookie)
2612 if (rt6_check_expired(rt))
2618 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2619 struct fib6_info *from,
2622 if (!__rt6_check_expired(rt) &&
2623 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2624 fib6_check(from, cookie))
2630 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2632 struct dst_entry *dst_ret;
2633 struct fib6_info *from;
2634 struct rt6_info *rt;
2636 rt = container_of(dst, struct rt6_info, dst);
2639 return rt6_is_valid(rt) ? dst : NULL;
2643 /* All IPV6 dsts are created with ->obsolete set to the value
2644 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2645 * into this function always.
2648 from = rcu_dereference(rt->from);
2650 if (from && (rt->rt6i_flags & RTF_PCPU ||
2651 unlikely(!list_empty(&rt->rt6i_uncached))))
2652 dst_ret = rt6_dst_from_check(rt, from, cookie);
2654 dst_ret = rt6_check(rt, from, cookie);
2661 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2663 struct rt6_info *rt = (struct rt6_info *) dst;
2666 if (rt->rt6i_flags & RTF_CACHE) {
2668 if (rt6_check_expired(rt)) {
2669 rt6_remove_exception_rt(rt);
2681 static void ip6_link_failure(struct sk_buff *skb)
2683 struct rt6_info *rt;
2685 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2687 rt = (struct rt6_info *) skb_dst(skb);
2690 if (rt->rt6i_flags & RTF_CACHE) {
2691 rt6_remove_exception_rt(rt);
2693 struct fib6_info *from;
2694 struct fib6_node *fn;
2696 from = rcu_dereference(rt->from);
2698 fn = rcu_dereference(from->fib6_node);
2699 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2700 WRITE_ONCE(fn->fn_sernum, -1);
2707 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2709 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2710 struct fib6_info *from;
2713 from = rcu_dereference(rt0->from);
2715 rt0->dst.expires = from->expires;
2719 dst_set_expires(&rt0->dst, timeout);
2720 rt0->rt6i_flags |= RTF_EXPIRES;
2723 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2725 struct net *net = dev_net(rt->dst.dev);
2727 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2728 rt->rt6i_flags |= RTF_MODIFIED;
2729 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2732 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2734 return !(rt->rt6i_flags & RTF_CACHE) &&
2735 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2738 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2739 const struct ipv6hdr *iph, u32 mtu,
2742 const struct in6_addr *daddr, *saddr;
2743 struct rt6_info *rt6 = (struct rt6_info *)dst;
2745 /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2746 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2747 * [see also comment in rt6_mtu_change_route()]
2751 daddr = &iph->daddr;
2752 saddr = &iph->saddr;
2754 daddr = &sk->sk_v6_daddr;
2755 saddr = &inet6_sk(sk)->saddr;
2762 dst_confirm_neigh(dst, daddr);
2764 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2765 if (mtu >= dst_mtu(dst))
2768 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2769 rt6_do_update_pmtu(rt6, mtu);
2770 /* update rt6_ex->stamp for cache */
2771 if (rt6->rt6i_flags & RTF_CACHE)
2772 rt6_update_exception_stamp_rt(rt6);
2774 struct fib6_result res = {};
2775 struct rt6_info *nrt6;
2778 res.f6i = rcu_dereference(rt6->from);
2782 res.fib6_flags = res.f6i->fib6_flags;
2783 res.fib6_type = res.f6i->fib6_type;
2786 struct fib6_nh_match_arg arg = {
2788 .gw = &rt6->rt6i_gateway,
2791 nexthop_for_each_fib6_nh(res.f6i->nh,
2792 fib6_nh_find_match, &arg);
2794 /* fib6_info uses a nexthop that does not have fib6_nh
2795 * using the dst->dev + gw. Should be impossible.
2802 res.nh = res.f6i->fib6_nh;
2805 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2807 rt6_do_update_pmtu(nrt6, mtu);
2808 if (rt6_insert_exception(nrt6, &res))
2809 dst_release_immediate(&nrt6->dst);
2816 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2817 struct sk_buff *skb, u32 mtu,
2820 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2824 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2825 int oif, u32 mark, kuid_t uid)
2827 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2828 struct dst_entry *dst;
2829 struct flowi6 fl6 = {
2831 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2832 .daddr = iph->daddr,
2833 .saddr = iph->saddr,
2834 .flowlabel = ip6_flowinfo(iph),
2838 dst = ip6_route_output(net, NULL, &fl6);
2840 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
2843 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2845 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2847 int oif = sk->sk_bound_dev_if;
2848 struct dst_entry *dst;
2850 if (!oif && skb->dev)
2851 oif = l3mdev_master_ifindex(skb->dev);
2853 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2855 dst = __sk_dst_get(sk);
2856 if (!dst || !dst->obsolete ||
2857 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2861 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2862 ip6_datagram_dst_update(sk, false);
2865 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2867 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2868 const struct flowi6 *fl6)
2870 #ifdef CONFIG_IPV6_SUBTREES
2871 struct ipv6_pinfo *np = inet6_sk(sk);
2874 ip6_dst_store(sk, dst,
2875 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2876 &sk->sk_v6_daddr : NULL,
2877 #ifdef CONFIG_IPV6_SUBTREES
2878 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2884 static bool ip6_redirect_nh_match(const struct fib6_result *res,
2886 const struct in6_addr *gw,
2887 struct rt6_info **ret)
2889 const struct fib6_nh *nh = res->nh;
2891 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2892 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2895 /* rt_cache's gateway might be different from its 'parent'
2896 * in the case of an ip redirect.
2897 * So we keep searching in the exception table if the gateway
2900 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2901 struct rt6_info *rt_cache;
2903 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2905 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2914 struct fib6_nh_rd_arg {
2915 struct fib6_result *res;
2917 const struct in6_addr *gw;
2918 struct rt6_info **ret;
2921 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
2923 struct fib6_nh_rd_arg *arg = _arg;
2926 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
2929 /* Handle redirects */
2930 struct ip6rd_flowi {
2932 struct in6_addr gateway;
2935 static struct rt6_info *__ip6_route_redirect(struct net *net,
2936 struct fib6_table *table,
2938 const struct sk_buff *skb,
2941 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2942 struct rt6_info *ret = NULL;
2943 struct fib6_result res = {};
2944 struct fib6_nh_rd_arg arg = {
2947 .gw = &rdfl->gateway,
2950 struct fib6_info *rt;
2951 struct fib6_node *fn;
2953 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2954 * this case we must match on the real ingress device, so reset it
2956 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2957 fl6->flowi6_oif = skb->dev->ifindex;
2959 /* Get the "current" route for this destination and
2960 * check if the redirect has come from appropriate router.
2962 * RFC 4861 specifies that redirects should only be
2963 * accepted if they come from the nexthop to the target.
2964 * Due to the way the routes are chosen, this notion
2965 * is a bit fuzzy and one might need to check all possible
2970 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2972 for_each_fib6_node_rt_rcu(fn) {
2974 if (fib6_check_expired(rt))
2976 if (rt->fib6_flags & RTF_REJECT)
2978 if (unlikely(rt->nh)) {
2979 if (nexthop_is_blackhole(rt->nh))
2981 /* on match, res->nh is filled in and potentially ret */
2982 if (nexthop_for_each_fib6_nh(rt->nh,
2983 fib6_nh_redirect_match,
2987 res.nh = rt->fib6_nh;
2988 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
2995 rt = net->ipv6.fib6_null_entry;
2996 else if (rt->fib6_flags & RTF_REJECT) {
2997 ret = net->ipv6.ip6_null_entry;
3001 if (rt == net->ipv6.fib6_null_entry) {
3002 fn = fib6_backtrack(fn, &fl6->saddr);
3008 res.nh = rt->fib6_nh;
3011 ip6_hold_safe(net, &ret);
3013 res.fib6_flags = res.f6i->fib6_flags;
3014 res.fib6_type = res.f6i->fib6_type;
3015 ret = ip6_create_rt_rcu(&res);
3020 trace_fib6_table_lookup(net, &res, table, fl6);
3024 static struct dst_entry *ip6_route_redirect(struct net *net,
3025 const struct flowi6 *fl6,
3026 const struct sk_buff *skb,
3027 const struct in6_addr *gateway)
3029 int flags = RT6_LOOKUP_F_HAS_SADDR;
3030 struct ip6rd_flowi rdfl;
3033 rdfl.gateway = *gateway;
3035 return fib6_rule_lookup(net, &rdfl.fl6, skb,
3036 flags, __ip6_route_redirect);
3039 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3042 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3043 struct dst_entry *dst;
3044 struct flowi6 fl6 = {
3045 .flowi6_iif = LOOPBACK_IFINDEX,
3047 .flowi6_mark = mark,
3048 .daddr = iph->daddr,
3049 .saddr = iph->saddr,
3050 .flowlabel = ip6_flowinfo(iph),
3054 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3055 rt6_do_redirect(dst, NULL, skb);
3058 EXPORT_SYMBOL_GPL(ip6_redirect);
3060 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3062 const struct ipv6hdr *iph = ipv6_hdr(skb);
3063 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3064 struct dst_entry *dst;
3065 struct flowi6 fl6 = {
3066 .flowi6_iif = LOOPBACK_IFINDEX,
3069 .saddr = iph->daddr,
3070 .flowi6_uid = sock_net_uid(net, NULL),
3073 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3074 rt6_do_redirect(dst, NULL, skb);
3078 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3080 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
3083 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3085 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3087 struct net_device *dev = dst->dev;
3088 unsigned int mtu = dst_mtu(dst);
3089 struct net *net = dev_net(dev);
3091 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3093 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3094 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3097 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3098 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3099 * IPV6_MAXPLEN is also valid and means: "any MSS,
3100 * rely only on pmtu discovery"
3102 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3107 static unsigned int ip6_mtu(const struct dst_entry *dst)
3109 struct inet6_dev *idev;
3112 mtu = dst_metric_raw(dst, RTAX_MTU);
3119 idev = __in6_dev_get(dst->dev);
3121 mtu = idev->cnf.mtu6;
3125 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3127 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
3131 * 1. mtu on route is locked - use it
3132 * 2. mtu from nexthop exception
3133 * 3. mtu from egress device
3135 * based on ip6_dst_mtu_forward and exception logic of
3136 * rt6_find_cached_rt; called with rcu_read_lock
3138 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3139 const struct in6_addr *daddr,
3140 const struct in6_addr *saddr)
3142 const struct fib6_nh *nh = res->nh;
3143 struct fib6_info *f6i = res->f6i;
3144 struct inet6_dev *idev;
3145 struct rt6_info *rt;
3148 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3149 mtu = f6i->fib6_pmtu;
3154 rt = rt6_find_cached_rt(res, daddr, saddr);
3156 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3158 struct net_device *dev = nh->fib_nh_dev;
3161 idev = __in6_dev_get(dev);
3162 if (idev && idev->cnf.mtu6 > mtu)
3163 mtu = idev->cnf.mtu6;
3166 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3168 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3171 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3174 struct dst_entry *dst;
3175 struct rt6_info *rt;
3176 struct inet6_dev *idev = in6_dev_get(dev);
3177 struct net *net = dev_net(dev);
3179 if (unlikely(!idev))
3180 return ERR_PTR(-ENODEV);
3182 rt = ip6_dst_alloc(net, dev, 0);
3183 if (unlikely(!rt)) {
3185 dst = ERR_PTR(-ENOMEM);
3189 rt->dst.flags |= DST_HOST;
3190 rt->dst.input = ip6_input;
3191 rt->dst.output = ip6_output;
3192 rt->rt6i_gateway = fl6->daddr;
3193 rt->rt6i_dst.addr = fl6->daddr;
3194 rt->rt6i_dst.plen = 128;
3195 rt->rt6i_idev = idev;
3196 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3198 /* Add this dst into uncached_list so that rt6_disable_ip() can
3199 * do proper release of the net_device
3201 rt6_uncached_list_add(rt);
3202 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
3204 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3210 static int ip6_dst_gc(struct dst_ops *ops)
3212 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3213 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3214 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
3215 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3216 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3217 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3220 entries = dst_entries_get_fast(ops);
3221 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
3222 entries <= rt_max_size)
3225 net->ipv6.ip6_rt_gc_expire++;
3226 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
3227 entries = dst_entries_get_slow(ops);
3228 if (entries < ops->gc_thresh)
3229 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
3231 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
3232 return entries > rt_max_size;
3235 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3236 const struct in6_addr *gw_addr, u32 tbid,
3237 int flags, struct fib6_result *res)
3239 struct flowi6 fl6 = {
3240 .flowi6_oif = cfg->fc_ifindex,
3242 .saddr = cfg->fc_prefsrc,
3244 struct fib6_table *table;
3247 table = fib6_get_table(net, tbid);
3251 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3252 flags |= RT6_LOOKUP_F_HAS_SADDR;
3254 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3256 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3257 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3258 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3259 cfg->fc_ifindex != 0, NULL, flags);
3264 static int ip6_route_check_nh_onlink(struct net *net,
3265 struct fib6_config *cfg,
3266 const struct net_device *dev,
3267 struct netlink_ext_ack *extack)
3269 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3270 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3271 struct fib6_result res = {};
3274 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3275 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3276 /* ignore match if it is the default route */
3277 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3278 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3279 NL_SET_ERR_MSG(extack,
3280 "Nexthop has invalid gateway or device mismatch");
3287 static int ip6_route_check_nh(struct net *net,
3288 struct fib6_config *cfg,
3289 struct net_device **_dev,
3290 struct inet6_dev **idev)
3292 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3293 struct net_device *dev = _dev ? *_dev : NULL;
3294 int flags = RT6_LOOKUP_F_IFACE;
3295 struct fib6_result res = {};
3296 int err = -EHOSTUNREACH;
3298 if (cfg->fc_table) {
3299 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3300 cfg->fc_table, flags, &res);
3301 /* gw_addr can not require a gateway or resolve to a reject
3302 * route. If a device is given, it must match the result.
3304 if (err || res.fib6_flags & RTF_REJECT ||
3305 res.nh->fib_nh_gw_family ||
3306 (dev && dev != res.nh->fib_nh_dev))
3307 err = -EHOSTUNREACH;
3311 struct flowi6 fl6 = {
3312 .flowi6_oif = cfg->fc_ifindex,
3316 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3317 if (err || res.fib6_flags & RTF_REJECT ||
3318 res.nh->fib_nh_gw_family)
3319 err = -EHOSTUNREACH;
3324 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3325 cfg->fc_ifindex != 0, NULL, flags);
3330 if (dev != res.nh->fib_nh_dev)
3331 err = -EHOSTUNREACH;
3333 *_dev = dev = res.nh->fib_nh_dev;
3335 *idev = in6_dev_get(dev);
3341 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3342 struct net_device **_dev, struct inet6_dev **idev,
3343 struct netlink_ext_ack *extack)
3345 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3346 int gwa_type = ipv6_addr_type(gw_addr);
3347 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3348 const struct net_device *dev = *_dev;
3349 bool need_addr_check = !dev;
3352 /* if gw_addr is local we will fail to detect this in case
3353 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3354 * will return already-added prefix route via interface that
3355 * prefix route was assigned to, which might be non-loopback.
3358 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3359 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3363 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3364 /* IPv6 strictly inhibits using not link-local
3365 * addresses as nexthop address.
3366 * Otherwise, router will not able to send redirects.
3367 * It is very good, but in some (rare!) circumstances
3368 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3369 * some exceptions. --ANK
3370 * We allow IPv4-mapped nexthops to support RFC4798-type
3373 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3374 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3380 if (cfg->fc_flags & RTNH_F_ONLINK)
3381 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3383 err = ip6_route_check_nh(net, cfg, _dev, idev);
3391 /* reload in case device was changed */
3396 NL_SET_ERR_MSG(extack, "Egress device not specified");
3398 } else if (dev->flags & IFF_LOOPBACK) {
3399 NL_SET_ERR_MSG(extack,
3400 "Egress device can not be loopback device for this route");
3404 /* if we did not check gw_addr above, do so now that the
3405 * egress device has been resolved.
3407 if (need_addr_check &&
3408 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3409 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3418 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3420 if ((flags & RTF_REJECT) ||
3421 (dev && (dev->flags & IFF_LOOPBACK) &&
3422 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3423 !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3429 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3430 struct fib6_config *cfg, gfp_t gfp_flags,
3431 struct netlink_ext_ack *extack)
3433 struct net_device *dev = NULL;
3434 struct inet6_dev *idev = NULL;
3438 fib6_nh->fib_nh_family = AF_INET6;
3439 #ifdef CONFIG_IPV6_ROUTER_PREF
3440 fib6_nh->last_probe = jiffies;
3444 if (cfg->fc_ifindex) {
3445 dev = dev_get_by_index(net, cfg->fc_ifindex);
3448 idev = in6_dev_get(dev);
3453 if (cfg->fc_flags & RTNH_F_ONLINK) {
3455 NL_SET_ERR_MSG(extack,
3456 "Nexthop device required for onlink");
3460 if (!(dev->flags & IFF_UP)) {
3461 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3466 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3469 fib6_nh->fib_nh_weight = 1;
3471 /* We cannot add true routes via loopback here,
3472 * they would result in kernel looping; promote them to reject routes
3474 addr_type = ipv6_addr_type(&cfg->fc_dst);
3475 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3476 /* hold loopback dev/idev if we haven't done so. */
3477 if (dev != net->loopback_dev) {
3482 dev = net->loopback_dev;
3484 idev = in6_dev_get(dev);
3493 if (cfg->fc_flags & RTF_GATEWAY) {
3494 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3498 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3499 fib6_nh->fib_nh_gw_family = AF_INET6;
3506 if (idev->cnf.disable_ipv6) {
3507 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3512 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3513 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3518 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3519 !netif_carrier_ok(dev))
3520 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3522 err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
3523 cfg->fc_encap_type, cfg, gfp_flags, extack);
3528 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3529 if (!fib6_nh->rt6i_pcpu) {
3534 fib6_nh->fib_nh_dev = dev;
3535 fib6_nh->fib_nh_oif = dev->ifindex;
3542 lwtstate_put(fib6_nh->fib_nh_lws);
3543 fib6_nh->fib_nh_lws = NULL;
3551 void fib6_nh_release(struct fib6_nh *fib6_nh)
3553 struct rt6_exception_bucket *bucket;
3557 fib6_nh_flush_exceptions(fib6_nh, NULL);
3558 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3560 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3566 if (fib6_nh->rt6i_pcpu) {
3569 for_each_possible_cpu(cpu) {
3570 struct rt6_info **ppcpu_rt;
3571 struct rt6_info *pcpu_rt;
3573 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3574 pcpu_rt = *ppcpu_rt;
3576 dst_dev_put(&pcpu_rt->dst);
3577 dst_release(&pcpu_rt->dst);
3582 free_percpu(fib6_nh->rt6i_pcpu);
3585 fib_nh_common_release(&fib6_nh->nh_common);
3588 void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
3592 if (!fib6_nh->rt6i_pcpu)
3595 for_each_possible_cpu(cpu) {
3596 struct rt6_info *pcpu_rt, **ppcpu_rt;
3598 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3599 pcpu_rt = xchg(ppcpu_rt, NULL);
3601 dst_dev_put(&pcpu_rt->dst);
3602 dst_release(&pcpu_rt->dst);
3607 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3609 struct netlink_ext_ack *extack)
3611 struct net *net = cfg->fc_nlinfo.nl_net;
3612 struct fib6_info *rt = NULL;
3613 struct nexthop *nh = NULL;
3614 struct fib6_table *table;
3615 struct fib6_nh *fib6_nh;
3619 /* RTF_PCPU is an internal flag; can not be set by userspace */
3620 if (cfg->fc_flags & RTF_PCPU) {
3621 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3625 /* RTF_CACHE is an internal flag; can not be set by userspace */
3626 if (cfg->fc_flags & RTF_CACHE) {
3627 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3631 if (cfg->fc_type > RTN_MAX) {
3632 NL_SET_ERR_MSG(extack, "Invalid route type");
3636 if (cfg->fc_dst_len > 128) {
3637 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3640 if (cfg->fc_src_len > 128) {
3641 NL_SET_ERR_MSG(extack, "Invalid source address length");
3644 #ifndef CONFIG_IPV6_SUBTREES
3645 if (cfg->fc_src_len) {
3646 NL_SET_ERR_MSG(extack,
3647 "Specifying source address requires IPV6_SUBTREES to be enabled");
3651 if (cfg->fc_nh_id) {
3652 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3654 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3657 err = fib6_check_nexthop(nh, cfg, extack);
3663 if (cfg->fc_nlinfo.nlh &&
3664 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3665 table = fib6_get_table(net, cfg->fc_table);
3667 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3668 table = fib6_new_table(net, cfg->fc_table);
3671 table = fib6_new_table(net, cfg->fc_table);
3678 rt = fib6_info_alloc(gfp_flags, !nh);
3682 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3684 if (IS_ERR(rt->fib6_metrics)) {
3685 err = PTR_ERR(rt->fib6_metrics);
3686 /* Do not leave garbage there. */
3687 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3691 if (cfg->fc_flags & RTF_ADDRCONF)
3692 rt->dst_nocount = true;
3694 if (cfg->fc_flags & RTF_EXPIRES)
3695 fib6_set_expires(rt, jiffies +
3696 clock_t_to_jiffies(cfg->fc_expires));
3698 fib6_clean_expires(rt);
3700 if (cfg->fc_protocol == RTPROT_UNSPEC)
3701 cfg->fc_protocol = RTPROT_BOOT;
3702 rt->fib6_protocol = cfg->fc_protocol;
3704 rt->fib6_table = table;
3705 rt->fib6_metric = cfg->fc_metric;
3706 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3707 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3709 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3710 rt->fib6_dst.plen = cfg->fc_dst_len;
3711 if (rt->fib6_dst.plen == 128)
3712 rt->dst_host = true;
3714 #ifdef CONFIG_IPV6_SUBTREES
3715 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3716 rt->fib6_src.plen = cfg->fc_src_len;
3719 if (rt->fib6_src.plen) {
3720 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3723 if (!nexthop_get(nh)) {
3724 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3728 fib6_nh = nexthop_fib6_nh(rt->nh);
3730 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3734 fib6_nh = rt->fib6_nh;
3736 /* We cannot add true routes via loopback here, they would
3737 * result in kernel looping; promote them to reject routes
3739 addr_type = ipv6_addr_type(&cfg->fc_dst);
3740 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3742 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3745 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3746 struct net_device *dev = fib6_nh->fib_nh_dev;
3748 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3749 NL_SET_ERR_MSG(extack, "Invalid source address");
3753 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3754 rt->fib6_prefsrc.plen = 128;
3756 rt->fib6_prefsrc.plen = 0;
3760 fib6_info_release(rt);
3761 return ERR_PTR(err);
3763 ip_fib_metrics_put(rt->fib6_metrics);
3765 return ERR_PTR(err);
3768 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3769 struct netlink_ext_ack *extack)
3771 struct fib6_info *rt;
3774 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3778 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3779 fib6_info_release(rt);
3784 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3786 struct net *net = info->nl_net;
3787 struct fib6_table *table;
3790 if (rt == net->ipv6.fib6_null_entry) {
3795 table = rt->fib6_table;
3796 spin_lock_bh(&table->tb6_lock);
3797 err = fib6_del(rt, info);
3798 spin_unlock_bh(&table->tb6_lock);
3801 fib6_info_release(rt);
3805 int ip6_del_rt(struct net *net, struct fib6_info *rt)
3807 struct nl_info info = { .nl_net = net };
3809 return __ip6_del_rt(rt, &info);
3812 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3814 struct nl_info *info = &cfg->fc_nlinfo;
3815 struct net *net = info->nl_net;
3816 struct sk_buff *skb = NULL;
3817 struct fib6_table *table;
3820 if (rt == net->ipv6.fib6_null_entry)
3822 table = rt->fib6_table;
3823 spin_lock_bh(&table->tb6_lock);
3825 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3826 struct fib6_info *sibling, *next_sibling;
3828 /* prefer to send a single notification with all hops */
3829 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3831 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3833 if (rt6_fill_node(net, skb, rt, NULL,
3834 NULL, NULL, 0, RTM_DELROUTE,
3835 info->portid, seq, 0) < 0) {
3839 info->skip_notify = 1;
3842 info->skip_notify_kernel = 1;
3843 call_fib6_multipath_entry_notifiers(net,
3844 FIB_EVENT_ENTRY_DEL,
3848 list_for_each_entry_safe(sibling, next_sibling,
3851 err = fib6_del(sibling, info);
3857 err = fib6_del(rt, info);
3859 spin_unlock_bh(&table->tb6_lock);
3861 fib6_info_release(rt);
3864 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3865 info->nlh, gfp_any());
3870 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3874 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3877 if (cfg->fc_flags & RTF_GATEWAY &&
3878 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3881 rc = rt6_remove_exception_rt(rt);
3886 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3889 struct fib6_result res = {
3893 struct rt6_info *rt_cache;
3895 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3897 return __ip6_del_cached_rt(rt_cache, cfg);
3902 struct fib6_nh_del_cached_rt_arg {
3903 struct fib6_config *cfg;
3904 struct fib6_info *f6i;
3907 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
3909 struct fib6_nh_del_cached_rt_arg *arg = _arg;
3912 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
3913 return rc != -ESRCH ? rc : 0;
3916 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
3918 struct fib6_nh_del_cached_rt_arg arg = {
3923 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
3926 static int ip6_route_del(struct fib6_config *cfg,
3927 struct netlink_ext_ack *extack)
3929 struct fib6_table *table;
3930 struct fib6_info *rt;
3931 struct fib6_node *fn;
3934 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3936 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3942 fn = fib6_locate(&table->tb6_root,
3943 &cfg->fc_dst, cfg->fc_dst_len,
3944 &cfg->fc_src, cfg->fc_src_len,
3945 !(cfg->fc_flags & RTF_CACHE));
3948 for_each_fib6_node_rt_rcu(fn) {
3951 if (rt->nh && cfg->fc_nh_id &&
3952 rt->nh->id != cfg->fc_nh_id)
3955 if (cfg->fc_flags & RTF_CACHE) {
3959 rc = ip6_del_cached_rt_nh(cfg, rt);
3960 } else if (cfg->fc_nh_id) {
3964 rc = ip6_del_cached_rt(cfg, rt, nh);
3973 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3975 if (cfg->fc_protocol &&
3976 cfg->fc_protocol != rt->fib6_protocol)
3980 if (!fib6_info_hold_safe(rt))
3984 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3990 if (cfg->fc_ifindex &&
3992 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3994 if (cfg->fc_flags & RTF_GATEWAY &&
3995 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3997 if (!fib6_info_hold_safe(rt))
4001 /* if gateway was specified only delete the one hop */
4002 if (cfg->fc_flags & RTF_GATEWAY)
4003 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
4005 return __ip6_del_rt_siblings(rt, cfg);
4013 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4015 struct netevent_redirect netevent;
4016 struct rt6_info *rt, *nrt = NULL;
4017 struct fib6_result res = {};
4018 struct ndisc_options ndopts;
4019 struct inet6_dev *in6_dev;
4020 struct neighbour *neigh;
4022 int optlen, on_link;
4025 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4026 optlen -= sizeof(*msg);
4029 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4033 msg = (struct rd_msg *)icmp6_hdr(skb);
4035 if (ipv6_addr_is_multicast(&msg->dest)) {
4036 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4041 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4043 } else if (ipv6_addr_type(&msg->target) !=
4044 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4045 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4049 in6_dev = __in6_dev_get(skb->dev);
4052 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
4056 * The IP source address of the Redirect MUST be the same as the current
4057 * first-hop router for the specified ICMP Destination Address.
4060 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4061 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4066 if (ndopts.nd_opts_tgt_lladdr) {
4067 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4070 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4075 rt = (struct rt6_info *) dst;
4076 if (rt->rt6i_flags & RTF_REJECT) {
4077 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4081 /* Redirect received -> path was valid.
4082 * Look, redirects are sent only in response to data packets,
4083 * so that this nexthop apparently is reachable. --ANK
4085 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4087 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4092 * We have finally decided to accept it.
4095 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4096 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4097 NEIGH_UPDATE_F_OVERRIDE|
4098 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4099 NEIGH_UPDATE_F_ISROUTER)),
4100 NDISC_REDIRECT, &ndopts);
4103 res.f6i = rcu_dereference(rt->from);
4108 struct fib6_nh_match_arg arg = {
4110 .gw = &rt->rt6i_gateway,
4113 nexthop_for_each_fib6_nh(res.f6i->nh,
4114 fib6_nh_find_match, &arg);
4116 /* fib6_info uses a nexthop that does not have fib6_nh
4117 * using the dst->dev. Should be impossible
4123 res.nh = res.f6i->fib6_nh;
4126 res.fib6_flags = res.f6i->fib6_flags;
4127 res.fib6_type = res.f6i->fib6_type;
4128 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4132 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4134 nrt->rt6i_flags &= ~RTF_GATEWAY;
4136 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4138 /* rt6_insert_exception() will take care of duplicated exceptions */
4139 if (rt6_insert_exception(nrt, &res)) {
4140 dst_release_immediate(&nrt->dst);
4144 netevent.old = &rt->dst;
4145 netevent.new = &nrt->dst;
4146 netevent.daddr = &msg->dest;
4147 netevent.neigh = neigh;
4148 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4152 neigh_release(neigh);
4155 #ifdef CONFIG_IPV6_ROUTE_INFO
4156 static struct fib6_info *rt6_get_route_info(struct net *net,
4157 const struct in6_addr *prefix, int prefixlen,
4158 const struct in6_addr *gwaddr,
4159 struct net_device *dev)
4161 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4162 int ifindex = dev->ifindex;
4163 struct fib6_node *fn;
4164 struct fib6_info *rt = NULL;
4165 struct fib6_table *table;
4167 table = fib6_get_table(net, tb_id);
4172 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4176 for_each_fib6_node_rt_rcu(fn) {
4177 /* these routes do not use nexthops */
4180 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4182 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4183 !rt->fib6_nh->fib_nh_gw_family)
4185 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4187 if (!fib6_info_hold_safe(rt))
4196 static struct fib6_info *rt6_add_route_info(struct net *net,
4197 const struct in6_addr *prefix, int prefixlen,
4198 const struct in6_addr *gwaddr,
4199 struct net_device *dev,
4202 struct fib6_config cfg = {
4203 .fc_metric = IP6_RT_PRIO_USER,
4204 .fc_ifindex = dev->ifindex,
4205 .fc_dst_len = prefixlen,
4206 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4207 RTF_UP | RTF_PREF(pref),
4208 .fc_protocol = RTPROT_RA,
4209 .fc_type = RTN_UNICAST,
4210 .fc_nlinfo.portid = 0,
4211 .fc_nlinfo.nlh = NULL,
4212 .fc_nlinfo.nl_net = net,
4215 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
4216 cfg.fc_dst = *prefix;
4217 cfg.fc_gateway = *gwaddr;
4219 /* We should treat it as a default route if prefix length is 0. */
4221 cfg.fc_flags |= RTF_DEFAULT;
4223 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4225 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4229 struct fib6_info *rt6_get_dflt_router(struct net *net,
4230 const struct in6_addr *addr,
4231 struct net_device *dev)
4233 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4234 struct fib6_info *rt;
4235 struct fib6_table *table;
4237 table = fib6_get_table(net, tb_id);
4242 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4245 /* RA routes do not use nexthops */
4250 if (dev == nh->fib_nh_dev &&
4251 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4252 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4255 if (rt && !fib6_info_hold_safe(rt))
4261 struct fib6_info *rt6_add_dflt_router(struct net *net,
4262 const struct in6_addr *gwaddr,
4263 struct net_device *dev,
4266 struct fib6_config cfg = {
4267 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4268 .fc_metric = IP6_RT_PRIO_USER,
4269 .fc_ifindex = dev->ifindex,
4270 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4271 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4272 .fc_protocol = RTPROT_RA,
4273 .fc_type = RTN_UNICAST,
4274 .fc_nlinfo.portid = 0,
4275 .fc_nlinfo.nlh = NULL,
4276 .fc_nlinfo.nl_net = net,
4279 cfg.fc_gateway = *gwaddr;
4281 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4282 struct fib6_table *table;
4284 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4286 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4289 return rt6_get_dflt_router(net, gwaddr, dev);
4292 static void __rt6_purge_dflt_routers(struct net *net,
4293 struct fib6_table *table)
4295 struct fib6_info *rt;
4299 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4300 struct net_device *dev = fib6_info_nh_dev(rt);
4301 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4303 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4304 (!idev || idev->cnf.accept_ra != 2) &&
4305 fib6_info_hold_safe(rt)) {
4307 ip6_del_rt(net, rt);
4313 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4316 void rt6_purge_dflt_routers(struct net *net)
4318 struct fib6_table *table;
4319 struct hlist_head *head;
4324 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4325 head = &net->ipv6.fib_table_hash[h];
4326 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4327 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4328 __rt6_purge_dflt_routers(net, table);
4335 static void rtmsg_to_fib6_config(struct net *net,
4336 struct in6_rtmsg *rtmsg,
4337 struct fib6_config *cfg)
4339 *cfg = (struct fib6_config){
4340 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4342 .fc_ifindex = rtmsg->rtmsg_ifindex,
4343 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
4344 .fc_expires = rtmsg->rtmsg_info,
4345 .fc_dst_len = rtmsg->rtmsg_dst_len,
4346 .fc_src_len = rtmsg->rtmsg_src_len,
4347 .fc_flags = rtmsg->rtmsg_flags,
4348 .fc_type = rtmsg->rtmsg_type,
4350 .fc_nlinfo.nl_net = net,
4352 .fc_dst = rtmsg->rtmsg_dst,
4353 .fc_src = rtmsg->rtmsg_src,
4354 .fc_gateway = rtmsg->rtmsg_gateway,
4358 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4360 struct fib6_config cfg;
4361 struct in6_rtmsg rtmsg;
4365 case SIOCADDRT: /* Add a route */
4366 case SIOCDELRT: /* Delete a route */
4367 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4369 err = copy_from_user(&rtmsg, arg,
4370 sizeof(struct in6_rtmsg));
4374 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
4379 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4382 err = ip6_route_del(&cfg, NULL);
4396 * Drop the packet on the floor
4399 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4401 struct dst_entry *dst = skb_dst(skb);
4402 struct net *net = dev_net(dst->dev);
4403 struct inet6_dev *idev;
4406 if (netif_is_l3_master(skb->dev) ||
4407 dst->dev == net->loopback_dev)
4408 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4410 idev = ip6_dst_idev(dst);
4412 switch (ipstats_mib_noroutes) {
4413 case IPSTATS_MIB_INNOROUTES:
4414 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4415 if (type == IPV6_ADDR_ANY) {
4416 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4420 case IPSTATS_MIB_OUTNOROUTES:
4421 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4425 /* Start over by dropping the dst for l3mdev case */
4426 if (netif_is_l3_master(skb->dev))
4429 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4434 static int ip6_pkt_discard(struct sk_buff *skb)
4436 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4439 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4441 skb->dev = skb_dst(skb)->dev;
4442 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4445 static int ip6_pkt_prohibit(struct sk_buff *skb)
4447 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4450 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4452 skb->dev = skb_dst(skb)->dev;
4453 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4457 * Allocate a dst for local (unicast / anycast) address.
4460 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4461 struct inet6_dev *idev,
4462 const struct in6_addr *addr,
4463 bool anycast, gfp_t gfp_flags)
4465 struct fib6_config cfg = {
4466 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4467 .fc_ifindex = idev->dev->ifindex,
4468 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4471 .fc_protocol = RTPROT_KERNEL,
4472 .fc_nlinfo.nl_net = net,
4473 .fc_ignore_dev_down = true,
4475 struct fib6_info *f6i;
4478 cfg.fc_type = RTN_ANYCAST;
4479 cfg.fc_flags |= RTF_ANYCAST;
4481 cfg.fc_type = RTN_LOCAL;
4482 cfg.fc_flags |= RTF_LOCAL;
4485 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
4487 f6i->dst_nocount = true;
4490 (net->ipv6.devconf_all->disable_policy ||
4491 idev->cnf.disable_policy))
4492 f6i->dst_nopolicy = true;
4498 /* remove deleted ip from prefsrc entries */
4499 struct arg_dev_net_ip {
4500 struct net_device *dev;
4502 struct in6_addr *addr;
4505 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4507 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4508 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4509 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4512 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
4513 rt != net->ipv6.fib6_null_entry &&
4514 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4515 spin_lock_bh(&rt6_exception_lock);
4516 /* remove prefsrc entry */
4517 rt->fib6_prefsrc.plen = 0;
4518 spin_unlock_bh(&rt6_exception_lock);
4523 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4525 struct net *net = dev_net(ifp->idev->dev);
4526 struct arg_dev_net_ip adni = {
4527 .dev = ifp->idev->dev,
4531 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4534 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4536 /* Remove routers and update dst entries when gateway turn into host. */
4537 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4539 struct in6_addr *gateway = (struct in6_addr *)arg;
4542 /* RA routes do not use nexthops */
4547 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4548 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4551 /* Further clean up cached routes in exception table.
4552 * This is needed because cached route may have a different
4553 * gateway than its 'parent' in the case of an ip redirect.
4555 fib6_nh_exceptions_clean_tohost(nh, gateway);
4560 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4562 fib6_clean_all(net, fib6_clean_tohost, gateway);
4565 struct arg_netdev_event {
4566 const struct net_device *dev;
4568 unsigned char nh_flags;
4569 unsigned long event;
4573 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4575 struct fib6_info *iter;
4576 struct fib6_node *fn;
4578 fn = rcu_dereference_protected(rt->fib6_node,
4579 lockdep_is_held(&rt->fib6_table->tb6_lock));
4580 iter = rcu_dereference_protected(fn->leaf,
4581 lockdep_is_held(&rt->fib6_table->tb6_lock));
4583 if (iter->fib6_metric == rt->fib6_metric &&
4584 rt6_qualify_for_ecmp(iter))
4586 iter = rcu_dereference_protected(iter->fib6_next,
4587 lockdep_is_held(&rt->fib6_table->tb6_lock));
4593 /* only called for fib entries with builtin fib6_nh */
4594 static bool rt6_is_dead(const struct fib6_info *rt)
4596 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4597 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4598 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4604 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4606 struct fib6_info *iter;
4609 if (!rt6_is_dead(rt))
4610 total += rt->fib6_nh->fib_nh_weight;
4612 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4613 if (!rt6_is_dead(iter))
4614 total += iter->fib6_nh->fib_nh_weight;
4620 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4622 int upper_bound = -1;
4624 if (!rt6_is_dead(rt)) {
4625 *weight += rt->fib6_nh->fib_nh_weight;
4626 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4629 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4632 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4634 struct fib6_info *iter;
4637 rt6_upper_bound_set(rt, &weight, total);
4639 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4640 rt6_upper_bound_set(iter, &weight, total);
4643 void rt6_multipath_rebalance(struct fib6_info *rt)
4645 struct fib6_info *first;
4648 /* In case the entire multipath route was marked for flushing,
4649 * then there is no need to rebalance upon the removal of every
4652 if (!rt->fib6_nsiblings || rt->should_flush)
4655 /* During lookup routes are evaluated in order, so we need to
4656 * make sure upper bounds are assigned from the first sibling
4659 first = rt6_multipath_first_sibling(rt);
4660 if (WARN_ON_ONCE(!first))
4663 total = rt6_multipath_total_weight(first);
4664 rt6_multipath_upper_bound_set(first, total);
4667 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4669 const struct arg_netdev_event *arg = p_arg;
4670 struct net *net = dev_net(arg->dev);
4672 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4673 rt->fib6_nh->fib_nh_dev == arg->dev) {
4674 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4675 fib6_update_sernum_upto_root(net, rt);
4676 rt6_multipath_rebalance(rt);
4682 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4684 struct arg_netdev_event arg = {
4687 .nh_flags = nh_flags,
4691 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4692 arg.nh_flags |= RTNH_F_LINKDOWN;
4694 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4697 /* only called for fib entries with inline fib6_nh */
4698 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4699 const struct net_device *dev)
4701 struct fib6_info *iter;
4703 if (rt->fib6_nh->fib_nh_dev == dev)
4705 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4706 if (iter->fib6_nh->fib_nh_dev == dev)
4712 static void rt6_multipath_flush(struct fib6_info *rt)
4714 struct fib6_info *iter;
4716 rt->should_flush = 1;
4717 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4718 iter->should_flush = 1;
4721 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4722 const struct net_device *down_dev)
4724 struct fib6_info *iter;
4725 unsigned int dead = 0;
4727 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4728 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4730 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4731 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4732 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4738 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4739 const struct net_device *dev,
4740 unsigned char nh_flags)
4742 struct fib6_info *iter;
4744 if (rt->fib6_nh->fib_nh_dev == dev)
4745 rt->fib6_nh->fib_nh_flags |= nh_flags;
4746 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4747 if (iter->fib6_nh->fib_nh_dev == dev)
4748 iter->fib6_nh->fib_nh_flags |= nh_flags;
4751 /* called with write lock held for table with rt */
4752 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4754 const struct arg_netdev_event *arg = p_arg;
4755 const struct net_device *dev = arg->dev;
4756 struct net *net = dev_net(dev);
4758 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4761 switch (arg->event) {
4762 case NETDEV_UNREGISTER:
4763 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4765 if (rt->should_flush)
4767 if (!rt->fib6_nsiblings)
4768 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4769 if (rt6_multipath_uses_dev(rt, dev)) {
4772 count = rt6_multipath_dead_count(rt, dev);
4773 if (rt->fib6_nsiblings + 1 == count) {
4774 rt6_multipath_flush(rt);
4777 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4779 fib6_update_sernum(net, rt);
4780 rt6_multipath_rebalance(rt);
4784 if (rt->fib6_nh->fib_nh_dev != dev ||
4785 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4787 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4788 rt6_multipath_rebalance(rt);
4795 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4797 struct arg_netdev_event arg = {
4803 struct net *net = dev_net(dev);
4805 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4806 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4808 fib6_clean_all(net, fib6_ifdown, &arg);
4811 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4813 rt6_sync_down_dev(dev, event);
4814 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4815 neigh_ifdown(&nd_tbl, dev);
4818 struct rt6_mtu_change_arg {
4819 struct net_device *dev;
4821 struct fib6_info *f6i;
4824 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4826 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4827 struct fib6_info *f6i = arg->f6i;
4829 /* For administrative MTU increase, there is no way to discover
4830 * IPv6 PMTU increase, so PMTU increase should be updated here.
4831 * Since RFC 1981 doesn't include administrative MTU increase
4832 * update PMTU increase is a MUST. (i.e. jumbo frame)
4834 if (nh->fib_nh_dev == arg->dev) {
4835 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4836 u32 mtu = f6i->fib6_pmtu;
4838 if (mtu >= arg->mtu ||
4839 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4840 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4842 spin_lock_bh(&rt6_exception_lock);
4843 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4844 spin_unlock_bh(&rt6_exception_lock);
4850 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4852 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4853 struct inet6_dev *idev;
4855 /* In IPv6 pmtu discovery is not optional,
4856 so that RTAX_MTU lock cannot disable it.
4857 We still use this lock to block changes
4858 caused by addrconf/ndisc.
4861 idev = __in6_dev_get(arg->dev);
4865 if (fib6_metric_locked(f6i, RTAX_MTU))
4870 /* fib6_nh_mtu_change only returns 0, so this is safe */
4871 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4875 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4878 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4880 struct rt6_mtu_change_arg arg = {
4885 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4888 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4889 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4890 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4891 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4892 [RTA_OIF] = { .type = NLA_U32 },
4893 [RTA_IIF] = { .type = NLA_U32 },
4894 [RTA_PRIORITY] = { .type = NLA_U32 },
4895 [RTA_METRICS] = { .type = NLA_NESTED },
4896 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4897 [RTA_PREF] = { .type = NLA_U8 },
4898 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4899 [RTA_ENCAP] = { .type = NLA_NESTED },
4900 [RTA_EXPIRES] = { .type = NLA_U32 },
4901 [RTA_UID] = { .type = NLA_U32 },
4902 [RTA_MARK] = { .type = NLA_U32 },
4903 [RTA_TABLE] = { .type = NLA_U32 },
4904 [RTA_IP_PROTO] = { .type = NLA_U8 },
4905 [RTA_SPORT] = { .type = NLA_U16 },
4906 [RTA_DPORT] = { .type = NLA_U16 },
4907 [RTA_NH_ID] = { .type = NLA_U32 },
4910 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4911 struct fib6_config *cfg,
4912 struct netlink_ext_ack *extack)
4915 struct nlattr *tb[RTA_MAX+1];
4919 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4920 rtm_ipv6_policy, extack);
4925 rtm = nlmsg_data(nlh);
4927 *cfg = (struct fib6_config){
4928 .fc_table = rtm->rtm_table,
4929 .fc_dst_len = rtm->rtm_dst_len,
4930 .fc_src_len = rtm->rtm_src_len,
4932 .fc_protocol = rtm->rtm_protocol,
4933 .fc_type = rtm->rtm_type,
4935 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4936 .fc_nlinfo.nlh = nlh,
4937 .fc_nlinfo.nl_net = sock_net(skb->sk),
4940 if (rtm->rtm_type == RTN_UNREACHABLE ||
4941 rtm->rtm_type == RTN_BLACKHOLE ||
4942 rtm->rtm_type == RTN_PROHIBIT ||
4943 rtm->rtm_type == RTN_THROW)
4944 cfg->fc_flags |= RTF_REJECT;
4946 if (rtm->rtm_type == RTN_LOCAL)
4947 cfg->fc_flags |= RTF_LOCAL;
4949 if (rtm->rtm_flags & RTM_F_CLONED)
4950 cfg->fc_flags |= RTF_CACHE;
4952 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4954 if (tb[RTA_NH_ID]) {
4955 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
4956 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
4957 NL_SET_ERR_MSG(extack,
4958 "Nexthop specification and nexthop id are mutually exclusive");
4961 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
4964 if (tb[RTA_GATEWAY]) {
4965 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4966 cfg->fc_flags |= RTF_GATEWAY;
4969 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4974 int plen = (rtm->rtm_dst_len + 7) >> 3;
4976 if (nla_len(tb[RTA_DST]) < plen)
4979 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4983 int plen = (rtm->rtm_src_len + 7) >> 3;
4985 if (nla_len(tb[RTA_SRC]) < plen)
4988 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4991 if (tb[RTA_PREFSRC])
4992 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4995 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4997 if (tb[RTA_PRIORITY])
4998 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
5000 if (tb[RTA_METRICS]) {
5001 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
5002 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
5006 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
5008 if (tb[RTA_MULTIPATH]) {
5009 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
5010 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
5012 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
5013 cfg->fc_mp_len, extack);
5019 pref = nla_get_u8(tb[RTA_PREF]);
5020 if (pref != ICMPV6_ROUTER_PREF_LOW &&
5021 pref != ICMPV6_ROUTER_PREF_HIGH)
5022 pref = ICMPV6_ROUTER_PREF_MEDIUM;
5023 cfg->fc_flags |= RTF_PREF(pref);
5027 cfg->fc_encap = tb[RTA_ENCAP];
5029 if (tb[RTA_ENCAP_TYPE]) {
5030 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
5032 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5037 if (tb[RTA_EXPIRES]) {
5038 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5040 if (addrconf_finite_timeout(timeout)) {
5041 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5042 cfg->fc_flags |= RTF_EXPIRES;
5052 struct fib6_info *fib6_info;
5053 struct fib6_config r_cfg;
5054 struct list_head next;
5057 static int ip6_route_info_append(struct net *net,
5058 struct list_head *rt6_nh_list,
5059 struct fib6_info *rt,
5060 struct fib6_config *r_cfg)
5065 list_for_each_entry(nh, rt6_nh_list, next) {
5066 /* check if fib6_info already exists */
5067 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5071 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5075 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5076 list_add_tail(&nh->next, rt6_nh_list);
5081 static void ip6_route_mpath_notify(struct fib6_info *rt,
5082 struct fib6_info *rt_last,
5083 struct nl_info *info,
5086 /* if this is an APPEND route, then rt points to the first route
5087 * inserted and rt_last points to last route inserted. Userspace
5088 * wants a consistent dump of the route which starts at the first
5089 * nexthop. Since sibling routes are always added at the end of
5090 * the list, find the first sibling of the last route appended
5092 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5093 rt = list_first_entry(&rt_last->fib6_siblings,
5099 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5102 static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
5103 struct netlink_ext_ack *extack)
5105 if (nla_len(nla) < sizeof(*gw)) {
5106 NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
5110 *gw = nla_get_in6_addr(nla);
5115 static int ip6_route_multipath_add(struct fib6_config *cfg,
5116 struct netlink_ext_ack *extack)
5118 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5119 struct nl_info *info = &cfg->fc_nlinfo;
5120 enum fib_event_type event_type;
5121 struct fib6_config r_cfg;
5122 struct rtnexthop *rtnh;
5123 struct fib6_info *rt;
5124 struct rt6_nh *err_nh;
5125 struct rt6_nh *nh, *nh_safe;
5131 int replace = (cfg->fc_nlinfo.nlh &&
5132 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5133 LIST_HEAD(rt6_nh_list);
5135 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5136 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5137 nlflags |= NLM_F_APPEND;
5139 remaining = cfg->fc_mp_len;
5140 rtnh = (struct rtnexthop *)cfg->fc_mp;
5142 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
5143 * fib6_info structs per nexthop
5145 while (rtnh_ok(rtnh, remaining)) {
5146 memcpy(&r_cfg, cfg, sizeof(*cfg));
5147 if (rtnh->rtnh_ifindex)
5148 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5150 attrlen = rtnh_attrlen(rtnh);
5152 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5154 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5156 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
5161 r_cfg.fc_flags |= RTF_GATEWAY;
5163 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5165 /* RTA_ENCAP_TYPE length checked in
5166 * lwtunnel_valid_encap_type_attr
5168 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5170 r_cfg.fc_encap_type = nla_get_u16(nla);
5173 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5174 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5180 if (!rt6_qualify_for_ecmp(rt)) {
5182 NL_SET_ERR_MSG(extack,
5183 "Device only routes can not be added for IPv6 using the multipath API.");
5184 fib6_info_release(rt);
5188 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5190 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5193 fib6_info_release(rt);
5197 rtnh = rtnh_next(rtnh, &remaining);
5200 if (list_empty(&rt6_nh_list)) {
5201 NL_SET_ERR_MSG(extack,
5202 "Invalid nexthop configuration - no valid nexthops");
5206 /* for add and replace send one notification with all nexthops.
5207 * Skip the notification in fib6_add_rt2node and send one with
5208 * the full route when done
5210 info->skip_notify = 1;
5212 /* For add and replace, send one notification with all nexthops. For
5213 * append, send one notification with all appended nexthops.
5215 info->skip_notify_kernel = 1;
5218 list_for_each_entry(nh, &rt6_nh_list, next) {
5219 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5220 fib6_info_release(nh->fib6_info);
5223 /* save reference to last route successfully inserted */
5224 rt_last = nh->fib6_info;
5226 /* save reference to first route for notification */
5228 rt_notif = nh->fib6_info;
5231 /* nh->fib6_info is used or freed at this point, reset to NULL*/
5232 nh->fib6_info = NULL;
5235 NL_SET_ERR_MSG_MOD(extack,
5236 "multipath route replace failed (check consistency of installed routes)");
5241 /* Because each route is added like a single route we remove
5242 * these flags after the first nexthop: if there is a collision,
5243 * we have already failed to add the first nexthop:
5244 * fib6_add_rt2node() has rejected it; when replacing, old
5245 * nexthops have been replaced by first new, the rest should
5248 if (cfg->fc_nlinfo.nlh) {
5249 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5251 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5256 event_type = replace ? FIB_EVENT_ENTRY_REPLACE : FIB_EVENT_ENTRY_ADD;
5257 err = call_fib6_multipath_entry_notifiers(info->nl_net, event_type,
5258 rt_notif, nhn - 1, extack);
5260 /* Delete all the siblings that were just added */
5265 /* success ... tell user about new route */
5266 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5270 /* send notification for routes that were added so that
5271 * the delete notifications sent by ip6_route_del are
5275 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5277 /* Delete routes that were already added */
5278 list_for_each_entry(nh, &rt6_nh_list, next) {
5281 ip6_route_del(&nh->r_cfg, extack);
5285 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5287 fib6_info_release(nh->fib6_info);
5288 list_del(&nh->next);
5295 static int ip6_route_multipath_del(struct fib6_config *cfg,
5296 struct netlink_ext_ack *extack)
5298 struct fib6_config r_cfg;
5299 struct rtnexthop *rtnh;
5302 int err = 1, last_err = 0;
5304 remaining = cfg->fc_mp_len;
5305 rtnh = (struct rtnexthop *)cfg->fc_mp;
5307 /* Parse a Multipath Entry */
5308 while (rtnh_ok(rtnh, remaining)) {
5309 memcpy(&r_cfg, cfg, sizeof(*cfg));
5310 if (rtnh->rtnh_ifindex)
5311 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5313 attrlen = rtnh_attrlen(rtnh);
5315 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5317 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5319 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
5326 r_cfg.fc_flags |= RTF_GATEWAY;
5329 err = ip6_route_del(&r_cfg, extack);
5334 rtnh = rtnh_next(rtnh, &remaining);
5340 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5341 struct netlink_ext_ack *extack)
5343 struct fib6_config cfg;
5346 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5351 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5352 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5357 return ip6_route_multipath_del(&cfg, extack);
5359 cfg.fc_delete_all_nh = 1;
5360 return ip6_route_del(&cfg, extack);
5364 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5365 struct netlink_ext_ack *extack)
5367 struct fib6_config cfg;
5370 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5374 if (cfg.fc_metric == 0)
5375 cfg.fc_metric = IP6_RT_PRIO_USER;
5378 return ip6_route_multipath_add(&cfg, extack);
5380 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5383 /* add the overhead of this fib6_nh to nexthop_len */
5384 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5386 int *nexthop_len = arg;
5388 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5389 + NLA_ALIGN(sizeof(struct rtnexthop))
5390 + nla_total_size(16); /* RTA_GATEWAY */
5392 if (nh->fib_nh_lws) {
5393 /* RTA_ENCAP_TYPE */
5394 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5396 *nexthop_len += nla_total_size(2);
5402 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5407 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5408 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5411 struct fib6_nh *nh = f6i->fib6_nh;
5414 if (f6i->fib6_nsiblings) {
5415 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
5416 + NLA_ALIGN(sizeof(struct rtnexthop))
5417 + nla_total_size(16) /* RTA_GATEWAY */
5418 + lwtunnel_get_encap_size(nh->fib_nh_lws);
5420 nexthop_len *= f6i->fib6_nsiblings;
5422 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5425 return NLMSG_ALIGN(sizeof(struct rtmsg))
5426 + nla_total_size(16) /* RTA_SRC */
5427 + nla_total_size(16) /* RTA_DST */
5428 + nla_total_size(16) /* RTA_GATEWAY */
5429 + nla_total_size(16) /* RTA_PREFSRC */
5430 + nla_total_size(4) /* RTA_TABLE */
5431 + nla_total_size(4) /* RTA_IIF */
5432 + nla_total_size(4) /* RTA_OIF */
5433 + nla_total_size(4) /* RTA_PRIORITY */
5434 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5435 + nla_total_size(sizeof(struct rta_cacheinfo))
5436 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5437 + nla_total_size(1) /* RTA_PREF */
5441 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5442 unsigned char *flags)
5444 if (nexthop_is_multipath(nh)) {
5447 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5449 goto nla_put_failure;
5451 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5452 goto nla_put_failure;
5454 nla_nest_end(skb, mp);
5456 struct fib6_nh *fib6_nh;
5458 fib6_nh = nexthop_fib6_nh(nh);
5459 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5461 goto nla_put_failure;
5470 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5471 struct fib6_info *rt, struct dst_entry *dst,
5472 struct in6_addr *dest, struct in6_addr *src,
5473 int iif, int type, u32 portid, u32 seq,
5476 struct rt6_info *rt6 = (struct rt6_info *)dst;
5477 struct rt6key *rt6_dst, *rt6_src;
5478 u32 *pmetrics, table, rt6_flags;
5479 unsigned char nh_flags = 0;
5480 struct nlmsghdr *nlh;
5484 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5489 rt6_dst = &rt6->rt6i_dst;
5490 rt6_src = &rt6->rt6i_src;
5491 rt6_flags = rt6->rt6i_flags;
5493 rt6_dst = &rt->fib6_dst;
5494 rt6_src = &rt->fib6_src;
5495 rt6_flags = rt->fib6_flags;
5498 rtm = nlmsg_data(nlh);
5499 rtm->rtm_family = AF_INET6;
5500 rtm->rtm_dst_len = rt6_dst->plen;
5501 rtm->rtm_src_len = rt6_src->plen;
5504 table = rt->fib6_table->tb6_id;
5506 table = RT6_TABLE_UNSPEC;
5507 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5508 if (nla_put_u32(skb, RTA_TABLE, table))
5509 goto nla_put_failure;
5511 rtm->rtm_type = rt->fib6_type;
5513 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5514 rtm->rtm_protocol = rt->fib6_protocol;
5516 if (rt6_flags & RTF_CACHE)
5517 rtm->rtm_flags |= RTM_F_CLONED;
5520 if (nla_put_in6_addr(skb, RTA_DST, dest))
5521 goto nla_put_failure;
5522 rtm->rtm_dst_len = 128;
5523 } else if (rtm->rtm_dst_len)
5524 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5525 goto nla_put_failure;
5526 #ifdef CONFIG_IPV6_SUBTREES
5528 if (nla_put_in6_addr(skb, RTA_SRC, src))
5529 goto nla_put_failure;
5530 rtm->rtm_src_len = 128;
5531 } else if (rtm->rtm_src_len &&
5532 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5533 goto nla_put_failure;
5536 #ifdef CONFIG_IPV6_MROUTE
5537 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5538 int err = ip6mr_get_route(net, skb, rtm, portid);
5543 goto nla_put_failure;
5546 if (nla_put_u32(skb, RTA_IIF, iif))
5547 goto nla_put_failure;
5549 struct in6_addr saddr_buf;
5550 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5551 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5552 goto nla_put_failure;
5555 if (rt->fib6_prefsrc.plen) {
5556 struct in6_addr saddr_buf;
5557 saddr_buf = rt->fib6_prefsrc.addr;
5558 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5559 goto nla_put_failure;
5562 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5563 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5564 goto nla_put_failure;
5566 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5567 goto nla_put_failure;
5569 /* For multipath routes, walk the siblings list and add
5570 * each as a nexthop within RTA_MULTIPATH.
5573 if (rt6_flags & RTF_GATEWAY &&
5574 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5575 goto nla_put_failure;
5577 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5578 goto nla_put_failure;
5579 } else if (rt->fib6_nsiblings) {
5580 struct fib6_info *sibling, *next_sibling;
5583 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5585 goto nla_put_failure;
5587 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5588 rt->fib6_nh->fib_nh_weight, AF_INET6,
5590 goto nla_put_failure;
5592 list_for_each_entry_safe(sibling, next_sibling,
5593 &rt->fib6_siblings, fib6_siblings) {
5594 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5595 sibling->fib6_nh->fib_nh_weight,
5597 goto nla_put_failure;
5600 nla_nest_end(skb, mp);
5601 } else if (rt->nh) {
5602 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5603 goto nla_put_failure;
5605 if (nexthop_is_blackhole(rt->nh))
5606 rtm->rtm_type = RTN_BLACKHOLE;
5608 if (rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5609 goto nla_put_failure;
5611 rtm->rtm_flags |= nh_flags;
5613 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5614 &nh_flags, false) < 0)
5615 goto nla_put_failure;
5617 rtm->rtm_flags |= nh_flags;
5620 if (rt6_flags & RTF_EXPIRES) {
5621 expires = dst ? dst->expires : rt->expires;
5625 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5626 goto nla_put_failure;
5628 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5629 goto nla_put_failure;
5632 nlmsg_end(skb, nlh);
5636 nlmsg_cancel(skb, nlh);
5640 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5642 const struct net_device *dev = arg;
5644 if (nh->fib_nh_dev == dev)
5650 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5651 const struct net_device *dev)
5654 struct net_device *_dev = (struct net_device *)dev;
5656 return !!nexthop_for_each_fib6_nh(f6i->nh,
5657 fib6_info_nh_uses_dev,
5661 if (f6i->fib6_nh->fib_nh_dev == dev)
5664 if (f6i->fib6_nsiblings) {
5665 struct fib6_info *sibling, *next_sibling;
5667 list_for_each_entry_safe(sibling, next_sibling,
5668 &f6i->fib6_siblings, fib6_siblings) {
5669 if (sibling->fib6_nh->fib_nh_dev == dev)
5677 struct fib6_nh_exception_dump_walker {
5678 struct rt6_rtnl_dump_arg *dump;
5679 struct fib6_info *rt;
5685 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5687 struct fib6_nh_exception_dump_walker *w = arg;
5688 struct rt6_rtnl_dump_arg *dump = w->dump;
5689 struct rt6_exception_bucket *bucket;
5690 struct rt6_exception *rt6_ex;
5693 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5697 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5698 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5704 /* Expiration of entries doesn't bump sernum, insertion
5705 * does. Removal is triggered by insertion, so we can
5706 * rely on the fact that if entries change between two
5707 * partial dumps, this node is scanned again completely,
5708 * see rt6_insert_exception() and fib6_dump_table().
5710 * Count expired entries we go through as handled
5711 * entries that we'll skip next time, in case of partial
5712 * node dump. Otherwise, if entries expire meanwhile,
5713 * we'll skip the wrong amount.
5715 if (rt6_check_expired(rt6_ex->rt6i)) {
5720 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5721 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5723 NETLINK_CB(dump->cb->skb).portid,
5724 dump->cb->nlh->nlmsg_seq, w->flags);
5736 /* Return -1 if done with node, number of handled routes on partial dump */
5737 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5739 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5740 struct fib_dump_filter *filter = &arg->filter;
5741 unsigned int flags = NLM_F_MULTI;
5742 struct net *net = arg->net;
5745 if (rt == net->ipv6.fib6_null_entry)
5748 if ((filter->flags & RTM_F_PREFIX) &&
5749 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5750 /* success since this is not a prefix route */
5753 if (filter->filter_set &&
5754 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5755 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5756 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5760 if (filter->filter_set ||
5761 !filter->dump_routes || !filter->dump_exceptions) {
5762 flags |= NLM_F_DUMP_FILTERED;
5765 if (filter->dump_routes) {
5769 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5771 NETLINK_CB(arg->cb->skb).portid,
5772 arg->cb->nlh->nlmsg_seq, flags)) {
5779 if (filter->dump_exceptions) {
5780 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5789 err = nexthop_for_each_fib6_nh(rt->nh,
5790 rt6_nh_dump_exceptions,
5793 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5798 return count += w.count;
5804 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5805 const struct nlmsghdr *nlh,
5807 struct netlink_ext_ack *extack)
5812 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5813 NL_SET_ERR_MSG_MOD(extack,
5814 "Invalid header for get route request");
5818 if (!netlink_strict_get_check(skb))
5819 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5820 rtm_ipv6_policy, extack);
5822 rtm = nlmsg_data(nlh);
5823 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5824 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5825 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5827 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5830 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5831 NL_SET_ERR_MSG_MOD(extack,
5832 "Invalid flags for get route request");
5836 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5837 rtm_ipv6_policy, extack);
5841 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5842 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5843 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5847 for (i = 0; i <= RTA_MAX; i++) {
5863 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5871 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5872 struct netlink_ext_ack *extack)
5874 struct net *net = sock_net(in_skb->sk);
5875 struct nlattr *tb[RTA_MAX+1];
5876 int err, iif = 0, oif = 0;
5877 struct fib6_info *from;
5878 struct dst_entry *dst;
5879 struct rt6_info *rt;
5880 struct sk_buff *skb;
5882 struct flowi6 fl6 = {};
5885 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
5890 rtm = nlmsg_data(nlh);
5891 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
5892 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
5895 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
5898 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
5902 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
5905 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
5909 iif = nla_get_u32(tb[RTA_IIF]);
5912 oif = nla_get_u32(tb[RTA_OIF]);
5915 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
5918 fl6.flowi6_uid = make_kuid(current_user_ns(),
5919 nla_get_u32(tb[RTA_UID]));
5921 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5924 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5927 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5929 if (tb[RTA_IP_PROTO]) {
5930 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5931 &fl6.flowi6_proto, AF_INET6,
5938 struct net_device *dev;
5943 dev = dev_get_by_index_rcu(net, iif);
5950 fl6.flowi6_iif = iif;
5952 if (!ipv6_addr_any(&fl6.saddr))
5953 flags |= RT6_LOOKUP_F_HAS_SADDR;
5955 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5959 fl6.flowi6_oif = oif;
5961 dst = ip6_route_output(net, NULL, &fl6);
5965 rt = container_of(dst, struct rt6_info, dst);
5966 if (rt->dst.error) {
5967 err = rt->dst.error;
5972 if (rt == net->ipv6.ip6_null_entry) {
5973 err = rt->dst.error;
5978 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
5985 skb_dst_set(skb, &rt->dst);
5988 from = rcu_dereference(rt->from);
5991 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5993 NETLINK_CB(in_skb).portid,
5996 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5997 &fl6.saddr, iif, RTM_NEWROUTE,
5998 NETLINK_CB(in_skb).portid,
6010 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
6015 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
6016 unsigned int nlm_flags)
6018 struct sk_buff *skb;
6019 struct net *net = info->nl_net;
6024 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6026 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6030 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6031 event, info->portid, seq, nlm_flags);
6033 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6034 WARN_ON(err == -EMSGSIZE);
6038 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6039 info->nlh, gfp_any());
6043 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6046 void fib6_rt_update(struct net *net, struct fib6_info *rt,
6047 struct nl_info *info)
6049 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6050 struct sk_buff *skb;
6053 /* call_fib6_entry_notifiers will be removed when in-kernel notifier
6054 * is implemented and supported for nexthop objects
6056 call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
6058 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6062 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6063 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6065 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6066 WARN_ON(err == -EMSGSIZE);
6070 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6071 info->nlh, gfp_any());
6075 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6078 static int ip6_route_dev_notify(struct notifier_block *this,
6079 unsigned long event, void *ptr)
6081 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6082 struct net *net = dev_net(dev);
6084 if (!(dev->flags & IFF_LOOPBACK))
6087 if (event == NETDEV_REGISTER) {
6088 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6089 net->ipv6.ip6_null_entry->dst.dev = dev;
6090 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6091 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6092 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6093 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6094 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6095 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6097 } else if (event == NETDEV_UNREGISTER &&
6098 dev->reg_state != NETREG_UNREGISTERED) {
6099 /* NETDEV_UNREGISTER could be fired for multiple times by
6100 * netdev_wait_allrefs(). Make sure we only call this once.
6102 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6103 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6104 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6105 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6116 #ifdef CONFIG_PROC_FS
6117 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6119 struct net *net = (struct net *)seq->private;
6120 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6121 net->ipv6.rt6_stats->fib_nodes,
6122 net->ipv6.rt6_stats->fib_route_nodes,
6123 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6124 net->ipv6.rt6_stats->fib_rt_entries,
6125 net->ipv6.rt6_stats->fib_rt_cache,
6126 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6127 net->ipv6.rt6_stats->fib_discarded_routes);
6131 #endif /* CONFIG_PROC_FS */
6133 #ifdef CONFIG_SYSCTL
6136 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6137 void __user *buffer, size_t *lenp, loff_t *ppos)
6145 net = (struct net *)ctl->extra1;
6146 delay = net->ipv6.sysctl.flush_delay;
6147 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6151 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6155 static struct ctl_table ipv6_route_table_template[] = {
6157 .procname = "flush",
6158 .data = &init_net.ipv6.sysctl.flush_delay,
6159 .maxlen = sizeof(int),
6161 .proc_handler = ipv6_sysctl_rtcache_flush
6164 .procname = "gc_thresh",
6165 .data = &ip6_dst_ops_template.gc_thresh,
6166 .maxlen = sizeof(int),
6168 .proc_handler = proc_dointvec,
6171 .procname = "max_size",
6172 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
6173 .maxlen = sizeof(int),
6175 .proc_handler = proc_dointvec,
6178 .procname = "gc_min_interval",
6179 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6180 .maxlen = sizeof(int),
6182 .proc_handler = proc_dointvec_jiffies,
6185 .procname = "gc_timeout",
6186 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6187 .maxlen = sizeof(int),
6189 .proc_handler = proc_dointvec_jiffies,
6192 .procname = "gc_interval",
6193 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
6194 .maxlen = sizeof(int),
6196 .proc_handler = proc_dointvec_jiffies,
6199 .procname = "gc_elasticity",
6200 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6201 .maxlen = sizeof(int),
6203 .proc_handler = proc_dointvec,
6206 .procname = "mtu_expires",
6207 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6208 .maxlen = sizeof(int),
6210 .proc_handler = proc_dointvec_jiffies,
6213 .procname = "min_adv_mss",
6214 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
6215 .maxlen = sizeof(int),
6217 .proc_handler = proc_dointvec,
6220 .procname = "gc_min_interval_ms",
6221 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6222 .maxlen = sizeof(int),
6224 .proc_handler = proc_dointvec_ms_jiffies,
6227 .procname = "skip_notify_on_dev_down",
6228 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6229 .maxlen = sizeof(int),
6231 .proc_handler = proc_dointvec_minmax,
6232 .extra1 = SYSCTL_ZERO,
6233 .extra2 = SYSCTL_ONE,
6238 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6240 struct ctl_table *table;
6242 table = kmemdup(ipv6_route_table_template,
6243 sizeof(ipv6_route_table_template),
6247 table[0].data = &net->ipv6.sysctl.flush_delay;
6248 table[0].extra1 = net;
6249 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6250 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
6251 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6252 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6253 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6254 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6255 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6256 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6257 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6258 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6260 /* Don't export sysctls to unprivileged users */
6261 if (net->user_ns != &init_user_ns)
6262 table[0].procname = NULL;
6269 static int __net_init ip6_route_net_init(struct net *net)
6273 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6274 sizeof(net->ipv6.ip6_dst_ops));
6276 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6277 goto out_ip6_dst_ops;
6279 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6280 if (!net->ipv6.fib6_null_entry)
6281 goto out_ip6_dst_entries;
6282 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6283 sizeof(*net->ipv6.fib6_null_entry));
6285 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6286 sizeof(*net->ipv6.ip6_null_entry),
6288 if (!net->ipv6.ip6_null_entry)
6289 goto out_fib6_null_entry;
6290 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6291 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6292 ip6_template_metrics, true);
6293 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
6295 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6296 net->ipv6.fib6_has_custom_rules = false;
6297 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6298 sizeof(*net->ipv6.ip6_prohibit_entry),
6300 if (!net->ipv6.ip6_prohibit_entry)
6301 goto out_ip6_null_entry;
6302 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6303 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6304 ip6_template_metrics, true);
6305 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
6307 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6308 sizeof(*net->ipv6.ip6_blk_hole_entry),
6310 if (!net->ipv6.ip6_blk_hole_entry)
6311 goto out_ip6_prohibit_entry;
6312 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6313 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6314 ip6_template_metrics, true);
6315 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
6318 net->ipv6.sysctl.flush_delay = 0;
6319 net->ipv6.sysctl.ip6_rt_max_size = 4096;
6320 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6321 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6322 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6323 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6324 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6325 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6326 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6328 net->ipv6.ip6_rt_gc_expire = 30*HZ;
6334 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6335 out_ip6_prohibit_entry:
6336 kfree(net->ipv6.ip6_prohibit_entry);
6338 kfree(net->ipv6.ip6_null_entry);
6340 out_fib6_null_entry:
6341 kfree(net->ipv6.fib6_null_entry);
6342 out_ip6_dst_entries:
6343 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6348 static void __net_exit ip6_route_net_exit(struct net *net)
6350 kfree(net->ipv6.fib6_null_entry);
6351 kfree(net->ipv6.ip6_null_entry);
6352 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6353 kfree(net->ipv6.ip6_prohibit_entry);
6354 kfree(net->ipv6.ip6_blk_hole_entry);
6356 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6359 static int __net_init ip6_route_net_init_late(struct net *net)
6361 #ifdef CONFIG_PROC_FS
6362 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
6363 sizeof(struct ipv6_route_iter));
6364 proc_create_net_single("rt6_stats", 0444, net->proc_net,
6365 rt6_stats_seq_show, NULL);
6370 static void __net_exit ip6_route_net_exit_late(struct net *net)
6372 #ifdef CONFIG_PROC_FS
6373 remove_proc_entry("ipv6_route", net->proc_net);
6374 remove_proc_entry("rt6_stats", net->proc_net);
6378 static struct pernet_operations ip6_route_net_ops = {
6379 .init = ip6_route_net_init,
6380 .exit = ip6_route_net_exit,
6383 static int __net_init ipv6_inetpeer_init(struct net *net)
6385 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6389 inet_peer_base_init(bp);
6390 net->ipv6.peers = bp;
6394 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6396 struct inet_peer_base *bp = net->ipv6.peers;
6398 net->ipv6.peers = NULL;
6399 inetpeer_invalidate_tree(bp);
6403 static struct pernet_operations ipv6_inetpeer_ops = {
6404 .init = ipv6_inetpeer_init,
6405 .exit = ipv6_inetpeer_exit,
6408 static struct pernet_operations ip6_route_net_late_ops = {
6409 .init = ip6_route_net_init_late,
6410 .exit = ip6_route_net_exit_late,
6413 static struct notifier_block ip6_route_dev_notifier = {
6414 .notifier_call = ip6_route_dev_notify,
6415 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6418 void __init ip6_route_init_special_entries(void)
6420 /* Registering of the loopback is done before this portion of code,
6421 * the loopback reference in rt6_info will not be taken, do it
6422 * manually for init_net */
6423 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6424 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6425 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6426 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6427 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6428 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6429 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6430 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6434 int __init ip6_route_init(void)
6440 ip6_dst_ops_template.kmem_cachep =
6441 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6442 SLAB_HWCACHE_ALIGN, NULL);
6443 if (!ip6_dst_ops_template.kmem_cachep)
6446 ret = dst_entries_init(&ip6_dst_blackhole_ops);
6448 goto out_kmem_cache;
6450 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6452 goto out_dst_entries;
6454 ret = register_pernet_subsys(&ip6_route_net_ops);
6456 goto out_register_inetpeer;
6458 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6462 goto out_register_subsys;
6468 ret = fib6_rules_init();
6472 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6474 goto fib6_rules_init;
6476 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6477 inet6_rtm_newroute, NULL, 0);
6479 goto out_register_late_subsys;
6481 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6482 inet6_rtm_delroute, NULL, 0);
6484 goto out_register_late_subsys;
6486 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6487 inet6_rtm_getroute, NULL,
6488 RTNL_FLAG_DOIT_UNLOCKED);
6490 goto out_register_late_subsys;
6492 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6494 goto out_register_late_subsys;
6496 for_each_possible_cpu(cpu) {
6497 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6499 INIT_LIST_HEAD(&ul->head);
6500 spin_lock_init(&ul->lock);
6506 out_register_late_subsys:
6507 rtnl_unregister_all(PF_INET6);
6508 unregister_pernet_subsys(&ip6_route_net_late_ops);
6510 fib6_rules_cleanup();
6515 out_register_subsys:
6516 unregister_pernet_subsys(&ip6_route_net_ops);
6517 out_register_inetpeer:
6518 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6520 dst_entries_destroy(&ip6_dst_blackhole_ops);
6522 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6526 void ip6_route_cleanup(void)
6528 unregister_netdevice_notifier(&ip6_route_dev_notifier);
6529 unregister_pernet_subsys(&ip6_route_net_late_ops);
6530 fib6_rules_cleanup();
6533 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6534 unregister_pernet_subsys(&ip6_route_net_ops);
6535 dst_entries_destroy(&ip6_dst_blackhole_ops);
6536 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);