1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
20 * Fixed routing subtrees.
23 #define pr_fmt(fmt) "IPv6: " fmt
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <linux/siphash.h>
45 #include <net/net_namespace.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
53 #include <linux/rtnetlink.h>
55 #include <net/dst_metadata.h>
57 #include <net/netevent.h>
58 #include <net/netlink.h>
60 #include <net/lwtunnel.h>
61 #include <net/ip_tunnels.h>
62 #include <net/l3mdev.h>
64 #include <linux/uaccess.h>
65 #include <linux/btf_ids.h>
68 #include <linux/sysctl.h>
71 static int ip6_rt_type_to_error(u8 fib6_type);
73 #define CREATE_TRACE_POINTS
74 #include <trace/events/fib6.h>
75 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
76 #undef CREATE_TRACE_POINTS
79 RT6_NUD_FAIL_HARD = -3,
80 RT6_NUD_FAIL_PROBE = -2,
81 RT6_NUD_FAIL_DO_RR = -1,
85 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
86 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
87 static unsigned int ip6_mtu(const struct dst_entry *dst);
88 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
89 static void ip6_dst_destroy(struct dst_entry *);
90 static void ip6_dst_ifdown(struct dst_entry *,
91 struct net_device *dev, int how);
92 static void ip6_dst_gc(struct dst_ops *ops);
94 static int ip6_pkt_discard(struct sk_buff *skb);
95 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
96 static int ip6_pkt_prohibit(struct sk_buff *skb);
97 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
98 static void ip6_link_failure(struct sk_buff *skb);
99 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
100 struct sk_buff *skb, u32 mtu,
102 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
103 struct sk_buff *skb);
104 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
106 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
107 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
108 struct fib6_info *rt, struct dst_entry *dst,
109 struct in6_addr *dest, struct in6_addr *src,
110 int iif, int type, u32 portid, u32 seq,
112 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
113 const struct in6_addr *daddr,
114 const struct in6_addr *saddr);
116 #ifdef CONFIG_IPV6_ROUTE_INFO
117 static struct fib6_info *rt6_add_route_info(struct net *net,
118 const struct in6_addr *prefix, int prefixlen,
119 const struct in6_addr *gwaddr,
120 struct net_device *dev,
122 static struct fib6_info *rt6_get_route_info(struct net *net,
123 const struct in6_addr *prefix, int prefixlen,
124 const struct in6_addr *gwaddr,
125 struct net_device *dev);
128 struct uncached_list {
130 struct list_head head;
133 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
135 void rt6_uncached_list_add(struct rt6_info *rt)
137 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
139 rt->rt6i_uncached_list = ul;
141 spin_lock_bh(&ul->lock);
142 list_add_tail(&rt->rt6i_uncached, &ul->head);
143 spin_unlock_bh(&ul->lock);
146 void rt6_uncached_list_del(struct rt6_info *rt)
148 if (!list_empty(&rt->rt6i_uncached)) {
149 struct uncached_list *ul = rt->rt6i_uncached_list;
150 struct net *net = dev_net(rt->dst.dev);
152 spin_lock_bh(&ul->lock);
153 list_del(&rt->rt6i_uncached);
154 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
155 spin_unlock_bh(&ul->lock);
159 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
161 struct net_device *loopback_dev = net->loopback_dev;
164 if (dev == loopback_dev)
167 for_each_possible_cpu(cpu) {
168 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
171 spin_lock_bh(&ul->lock);
172 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
173 struct inet6_dev *rt_idev = rt->rt6i_idev;
174 struct net_device *rt_dev = rt->dst.dev;
176 if (rt_idev->dev == dev) {
177 rt->rt6i_idev = in6_dev_get(loopback_dev);
178 in6_dev_put(rt_idev);
182 rt->dst.dev = blackhole_netdev;
183 dev_hold(rt->dst.dev);
187 spin_unlock_bh(&ul->lock);
191 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
195 if (!ipv6_addr_any(p))
196 return (const void *) p;
198 return &ipv6_hdr(skb)->daddr;
202 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
203 struct net_device *dev,
209 daddr = choose_neigh_daddr(gw, skb, daddr);
210 n = __ipv6_neigh_lookup(dev, daddr);
214 n = neigh_create(&nd_tbl, daddr, dev);
215 return IS_ERR(n) ? NULL : n;
218 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
222 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
224 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
225 dst->dev, skb, daddr);
228 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
230 struct net_device *dev = dst->dev;
231 struct rt6_info *rt = (struct rt6_info *)dst;
233 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
236 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
238 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
240 __ipv6_confirm_neigh(dev, daddr);
243 static struct dst_ops ip6_dst_ops_template = {
247 .check = ip6_dst_check,
248 .default_advmss = ip6_default_advmss,
250 .cow_metrics = dst_cow_metrics_generic,
251 .destroy = ip6_dst_destroy,
252 .ifdown = ip6_dst_ifdown,
253 .negative_advice = ip6_negative_advice,
254 .link_failure = ip6_link_failure,
255 .update_pmtu = ip6_rt_update_pmtu,
256 .redirect = rt6_do_redirect,
257 .local_out = __ip6_local_out,
258 .neigh_lookup = ip6_dst_neigh_lookup,
259 .confirm_neigh = ip6_confirm_neigh,
262 static struct dst_ops ip6_dst_blackhole_ops = {
264 .default_advmss = ip6_default_advmss,
265 .neigh_lookup = ip6_dst_neigh_lookup,
266 .check = ip6_dst_check,
267 .destroy = ip6_dst_destroy,
268 .cow_metrics = dst_cow_metrics_generic,
269 .update_pmtu = dst_blackhole_update_pmtu,
270 .redirect = dst_blackhole_redirect,
271 .mtu = dst_blackhole_mtu,
274 static const u32 ip6_template_metrics[RTAX_MAX] = {
275 [RTAX_HOPLIMIT - 1] = 0,
278 static const struct fib6_info fib6_null_entry_template = {
279 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
280 .fib6_protocol = RTPROT_KERNEL,
281 .fib6_metric = ~(u32)0,
282 .fib6_ref = REFCOUNT_INIT(1),
283 .fib6_type = RTN_UNREACHABLE,
284 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
287 static const struct rt6_info ip6_null_entry_template = {
289 .__refcnt = ATOMIC_INIT(1),
291 .obsolete = DST_OBSOLETE_FORCE_CHK,
292 .error = -ENETUNREACH,
293 .input = ip6_pkt_discard,
294 .output = ip6_pkt_discard_out,
296 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
299 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
301 static const struct rt6_info ip6_prohibit_entry_template = {
303 .__refcnt = ATOMIC_INIT(1),
305 .obsolete = DST_OBSOLETE_FORCE_CHK,
307 .input = ip6_pkt_prohibit,
308 .output = ip6_pkt_prohibit_out,
310 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
313 static const struct rt6_info ip6_blk_hole_entry_template = {
315 .__refcnt = ATOMIC_INIT(1),
317 .obsolete = DST_OBSOLETE_FORCE_CHK,
319 .input = dst_discard,
320 .output = dst_discard_out,
322 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
327 static void rt6_info_init(struct rt6_info *rt)
329 struct dst_entry *dst = &rt->dst;
331 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
332 INIT_LIST_HEAD(&rt->rt6i_uncached);
335 /* allocate dst with ip6_dst_ops */
336 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
339 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
340 1, DST_OBSOLETE_FORCE_CHK, flags);
344 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
349 EXPORT_SYMBOL(ip6_dst_alloc);
351 static void ip6_dst_destroy(struct dst_entry *dst)
353 struct rt6_info *rt = (struct rt6_info *)dst;
354 struct fib6_info *from;
355 struct inet6_dev *idev;
357 ip_dst_metrics_put(dst);
358 rt6_uncached_list_del(rt);
360 idev = rt->rt6i_idev;
362 rt->rt6i_idev = NULL;
366 from = xchg((__force struct fib6_info **)&rt->from, NULL);
367 fib6_info_release(from);
370 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
373 struct rt6_info *rt = (struct rt6_info *)dst;
374 struct inet6_dev *idev = rt->rt6i_idev;
375 struct net_device *loopback_dev =
376 dev_net(dev)->loopback_dev;
378 if (idev && idev->dev != loopback_dev) {
379 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
381 rt->rt6i_idev = loopback_idev;
387 static bool __rt6_check_expired(const struct rt6_info *rt)
389 if (rt->rt6i_flags & RTF_EXPIRES)
390 return time_after(jiffies, rt->dst.expires);
395 static bool rt6_check_expired(const struct rt6_info *rt)
397 struct fib6_info *from;
399 from = rcu_dereference(rt->from);
401 if (rt->rt6i_flags & RTF_EXPIRES) {
402 if (time_after(jiffies, rt->dst.expires))
405 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
406 fib6_check_expired(from);
411 void fib6_select_path(const struct net *net, struct fib6_result *res,
412 struct flowi6 *fl6, int oif, bool have_oif_match,
413 const struct sk_buff *skb, int strict)
415 struct fib6_info *sibling, *next_sibling;
416 struct fib6_info *match = res->f6i;
418 if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
421 if (match->nh && have_oif_match && res->nh)
424 /* We might have already computed the hash for ICMPv6 errors. In such
425 * case it will always be non-zero. Otherwise now is the time to do it.
428 (!match->nh || nexthop_is_multipath(match->nh)))
429 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
431 if (unlikely(match->nh)) {
432 nexthop_path_fib6_result(res, fl6->mp_hash);
436 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
439 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
441 const struct fib6_nh *nh = sibling->fib6_nh;
444 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
445 if (fl6->mp_hash > nh_upper_bound)
447 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
455 res->nh = match->fib6_nh;
459 * Route lookup. rcu_read_lock() should be held.
462 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
463 const struct in6_addr *saddr, int oif, int flags)
465 const struct net_device *dev;
467 if (nh->fib_nh_flags & RTNH_F_DEAD)
470 dev = nh->fib_nh_dev;
472 if (dev->ifindex == oif)
475 if (ipv6_chk_addr(net, saddr, dev,
476 flags & RT6_LOOKUP_F_IFACE))
483 struct fib6_nh_dm_arg {
485 const struct in6_addr *saddr;
491 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
493 struct fib6_nh_dm_arg *arg = _arg;
496 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
500 /* returns fib6_nh from nexthop or NULL */
501 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
502 struct fib6_result *res,
503 const struct in6_addr *saddr,
506 struct fib6_nh_dm_arg arg = {
513 if (nexthop_is_blackhole(nh))
516 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
522 static void rt6_device_match(struct net *net, struct fib6_result *res,
523 const struct in6_addr *saddr, int oif, int flags)
525 struct fib6_info *f6i = res->f6i;
526 struct fib6_info *spf6i;
529 if (!oif && ipv6_addr_any(saddr)) {
530 if (unlikely(f6i->nh)) {
531 nh = nexthop_fib6_nh(f6i->nh);
532 if (nexthop_is_blackhole(f6i->nh))
537 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
541 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
542 bool matched = false;
544 if (unlikely(spf6i->nh)) {
545 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
551 if (__rt6_device_match(net, nh, saddr, oif, flags))
560 if (oif && flags & RT6_LOOKUP_F_IFACE) {
561 res->f6i = net->ipv6.fib6_null_entry;
562 nh = res->f6i->fib6_nh;
566 if (unlikely(f6i->nh)) {
567 nh = nexthop_fib6_nh(f6i->nh);
568 if (nexthop_is_blackhole(f6i->nh))
574 if (nh->fib_nh_flags & RTNH_F_DEAD) {
575 res->f6i = net->ipv6.fib6_null_entry;
576 nh = res->f6i->fib6_nh;
580 res->fib6_type = res->f6i->fib6_type;
581 res->fib6_flags = res->f6i->fib6_flags;
585 res->fib6_flags |= RTF_REJECT;
586 res->fib6_type = RTN_BLACKHOLE;
590 #ifdef CONFIG_IPV6_ROUTER_PREF
591 struct __rt6_probe_work {
592 struct work_struct work;
593 struct in6_addr target;
594 struct net_device *dev;
597 static void rt6_probe_deferred(struct work_struct *w)
599 struct in6_addr mcaddr;
600 struct __rt6_probe_work *work =
601 container_of(w, struct __rt6_probe_work, work);
603 addrconf_addr_solict_mult(&work->target, &mcaddr);
604 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
609 static void rt6_probe(struct fib6_nh *fib6_nh)
611 struct __rt6_probe_work *work = NULL;
612 const struct in6_addr *nh_gw;
613 unsigned long last_probe;
614 struct neighbour *neigh;
615 struct net_device *dev;
616 struct inet6_dev *idev;
619 * Okay, this does not seem to be appropriate
620 * for now, however, we need to check if it
621 * is really so; aka Router Reachability Probing.
623 * Router Reachability Probe MUST be rate-limited
624 * to no more than one per minute.
626 if (!fib6_nh->fib_nh_gw_family)
629 nh_gw = &fib6_nh->fib_nh_gw6;
630 dev = fib6_nh->fib_nh_dev;
632 last_probe = READ_ONCE(fib6_nh->last_probe);
633 idev = __in6_dev_get(dev);
634 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
636 if (neigh->nud_state & NUD_VALID)
639 write_lock(&neigh->lock);
640 if (!(neigh->nud_state & NUD_VALID) &&
642 neigh->updated + idev->cnf.rtr_probe_interval)) {
643 work = kmalloc(sizeof(*work), GFP_ATOMIC);
645 __neigh_set_probe_once(neigh);
647 write_unlock(&neigh->lock);
648 } else if (time_after(jiffies, last_probe +
649 idev->cnf.rtr_probe_interval)) {
650 work = kmalloc(sizeof(*work), GFP_ATOMIC);
653 if (!work || cmpxchg(&fib6_nh->last_probe,
654 last_probe, jiffies) != last_probe) {
657 INIT_WORK(&work->work, rt6_probe_deferred);
658 work->target = *nh_gw;
661 schedule_work(&work->work);
665 rcu_read_unlock_bh();
668 static inline void rt6_probe(struct fib6_nh *fib6_nh)
674 * Default Router Selection (RFC 2461 6.3.6)
676 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
678 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
679 struct neighbour *neigh;
682 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
683 &fib6_nh->fib_nh_gw6);
685 read_lock(&neigh->lock);
686 if (neigh->nud_state & NUD_VALID)
687 ret = RT6_NUD_SUCCEED;
688 #ifdef CONFIG_IPV6_ROUTER_PREF
689 else if (!(neigh->nud_state & NUD_FAILED))
690 ret = RT6_NUD_SUCCEED;
692 ret = RT6_NUD_FAIL_PROBE;
694 read_unlock(&neigh->lock);
696 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
697 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
699 rcu_read_unlock_bh();
704 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
709 if (!oif || nh->fib_nh_dev->ifindex == oif)
712 if (!m && (strict & RT6_LOOKUP_F_IFACE))
713 return RT6_NUD_FAIL_HARD;
714 #ifdef CONFIG_IPV6_ROUTER_PREF
715 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
717 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
718 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
719 int n = rt6_check_neigh(nh);
726 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
727 int oif, int strict, int *mpri, bool *do_rr)
729 bool match_do_rr = false;
733 if (nh->fib_nh_flags & RTNH_F_DEAD)
736 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
737 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
738 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
741 m = rt6_score_route(nh, fib6_flags, oif, strict);
742 if (m == RT6_NUD_FAIL_DO_RR) {
744 m = 0; /* lowest valid score */
745 } else if (m == RT6_NUD_FAIL_HARD) {
749 if (strict & RT6_LOOKUP_F_REACHABLE)
752 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
754 *do_rr = match_do_rr;
762 struct fib6_nh_frl_arg {
771 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
773 struct fib6_nh_frl_arg *arg = _arg;
776 return find_match(nh, arg->flags, arg->oif, arg->strict,
777 arg->mpri, arg->do_rr);
780 static void __find_rr_leaf(struct fib6_info *f6i_start,
781 struct fib6_info *nomatch, u32 metric,
782 struct fib6_result *res, struct fib6_info **cont,
783 int oif, int strict, bool *do_rr, int *mpri)
785 struct fib6_info *f6i;
787 for (f6i = f6i_start;
788 f6i && f6i != nomatch;
789 f6i = rcu_dereference(f6i->fib6_next)) {
790 bool matched = false;
793 if (cont && f6i->fib6_metric != metric) {
798 if (fib6_check_expired(f6i))
801 if (unlikely(f6i->nh)) {
802 struct fib6_nh_frl_arg arg = {
803 .flags = f6i->fib6_flags,
810 if (nexthop_is_blackhole(f6i->nh)) {
811 res->fib6_flags = RTF_REJECT;
812 res->fib6_type = RTN_BLACKHOLE;
814 res->nh = nexthop_fib6_nh(f6i->nh);
817 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
824 if (find_match(nh, f6i->fib6_flags, oif, strict,
831 res->fib6_flags = f6i->fib6_flags;
832 res->fib6_type = f6i->fib6_type;
837 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
838 struct fib6_info *rr_head, int oif, int strict,
839 bool *do_rr, struct fib6_result *res)
841 u32 metric = rr_head->fib6_metric;
842 struct fib6_info *cont = NULL;
845 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
846 oif, strict, do_rr, &mpri);
848 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
849 oif, strict, do_rr, &mpri);
851 if (res->f6i || !cont)
854 __find_rr_leaf(cont, NULL, metric, res, NULL,
855 oif, strict, do_rr, &mpri);
858 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
859 struct fib6_result *res, int strict)
861 struct fib6_info *leaf = rcu_dereference(fn->leaf);
862 struct fib6_info *rt0;
866 /* make sure this function or its helpers sets f6i */
869 if (!leaf || leaf == net->ipv6.fib6_null_entry)
872 rt0 = rcu_dereference(fn->rr_ptr);
876 /* Double check to make sure fn is not an intermediate node
877 * and fn->leaf does not points to its child's leaf
878 * (This might happen if all routes under fn are deleted from
879 * the tree and fib6_repair_tree() is called on the node.)
881 key_plen = rt0->fib6_dst.plen;
882 #ifdef CONFIG_IPV6_SUBTREES
883 if (rt0->fib6_src.plen)
884 key_plen = rt0->fib6_src.plen;
886 if (fn->fn_bit != key_plen)
889 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
891 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
893 /* no entries matched; do round-robin */
894 if (!next || next->fib6_metric != rt0->fib6_metric)
898 spin_lock_bh(&leaf->fib6_table->tb6_lock);
899 /* make sure next is not being deleted from the tree */
901 rcu_assign_pointer(fn->rr_ptr, next);
902 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
908 res->f6i = net->ipv6.fib6_null_entry;
909 res->nh = res->f6i->fib6_nh;
910 res->fib6_flags = res->f6i->fib6_flags;
911 res->fib6_type = res->f6i->fib6_type;
915 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
917 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
918 res->nh->fib_nh_gw_family;
921 #ifdef CONFIG_IPV6_ROUTE_INFO
922 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
923 const struct in6_addr *gwaddr)
925 struct net *net = dev_net(dev);
926 struct route_info *rinfo = (struct route_info *) opt;
927 struct in6_addr prefix_buf, *prefix;
929 unsigned long lifetime;
930 struct fib6_info *rt;
932 if (len < sizeof(struct route_info)) {
936 /* Sanity check for prefix_len and length */
937 if (rinfo->length > 3) {
939 } else if (rinfo->prefix_len > 128) {
941 } else if (rinfo->prefix_len > 64) {
942 if (rinfo->length < 2) {
945 } else if (rinfo->prefix_len > 0) {
946 if (rinfo->length < 1) {
951 pref = rinfo->route_pref;
952 if (pref == ICMPV6_ROUTER_PREF_INVALID)
955 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
957 if (rinfo->length == 3)
958 prefix = (struct in6_addr *)rinfo->prefix;
960 /* this function is safe */
961 ipv6_addr_prefix(&prefix_buf,
962 (struct in6_addr *)rinfo->prefix,
964 prefix = &prefix_buf;
967 if (rinfo->prefix_len == 0)
968 rt = rt6_get_dflt_router(net, gwaddr, dev);
970 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
973 if (rt && !lifetime) {
974 ip6_del_rt(net, rt, false);
979 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
982 rt->fib6_flags = RTF_ROUTEINFO |
983 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
986 if (!addrconf_finite_timeout(lifetime))
987 fib6_clean_expires(rt);
989 fib6_set_expires(rt, jiffies + HZ * lifetime);
991 fib6_info_release(rt);
998 * Misc support functions
1001 /* called with rcu_lock held */
1002 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1004 struct net_device *dev = res->nh->fib_nh_dev;
1006 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1007 /* for copies of local routes, dst->dev needs to be the
1008 * device if it is a master device, the master device if
1009 * device is enslaved, and the loopback as the default
1011 if (netif_is_l3_slave(dev) &&
1012 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1013 dev = l3mdev_master_dev_rcu(dev);
1014 else if (!netif_is_l3_master(dev))
1015 dev = dev_net(dev)->loopback_dev;
1016 /* last case is netif_is_l3_master(dev) is true in which
1017 * case we want dev returned to be dev
1024 static const int fib6_prop[RTN_MAX + 1] = {
1028 [RTN_BROADCAST] = 0,
1030 [RTN_MULTICAST] = 0,
1031 [RTN_BLACKHOLE] = -EINVAL,
1032 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1033 [RTN_PROHIBIT] = -EACCES,
1034 [RTN_THROW] = -EAGAIN,
1035 [RTN_NAT] = -EINVAL,
1036 [RTN_XRESOLVE] = -EINVAL,
1039 static int ip6_rt_type_to_error(u8 fib6_type)
1041 return fib6_prop[fib6_type];
1044 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1046 unsigned short flags = 0;
1048 if (rt->dst_nocount)
1049 flags |= DST_NOCOUNT;
1050 if (rt->dst_nopolicy)
1051 flags |= DST_NOPOLICY;
1056 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1058 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1060 switch (fib6_type) {
1062 rt->dst.output = dst_discard_out;
1063 rt->dst.input = dst_discard;
1066 rt->dst.output = ip6_pkt_prohibit_out;
1067 rt->dst.input = ip6_pkt_prohibit;
1070 case RTN_UNREACHABLE:
1072 rt->dst.output = ip6_pkt_discard_out;
1073 rt->dst.input = ip6_pkt_discard;
1078 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1080 struct fib6_info *f6i = res->f6i;
1082 if (res->fib6_flags & RTF_REJECT) {
1083 ip6_rt_init_dst_reject(rt, res->fib6_type);
1088 rt->dst.output = ip6_output;
1090 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1091 rt->dst.input = ip6_input;
1092 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1093 rt->dst.input = ip6_mc_input;
1095 rt->dst.input = ip6_forward;
1098 if (res->nh->fib_nh_lws) {
1099 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1100 lwtunnel_set_redirect(&rt->dst);
1103 rt->dst.lastuse = jiffies;
1106 /* Caller must already hold reference to @from */
1107 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1109 rt->rt6i_flags &= ~RTF_EXPIRES;
1110 rcu_assign_pointer(rt->from, from);
1111 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1114 /* Caller must already hold reference to f6i in result */
1115 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1117 const struct fib6_nh *nh = res->nh;
1118 const struct net_device *dev = nh->fib_nh_dev;
1119 struct fib6_info *f6i = res->f6i;
1121 ip6_rt_init_dst(rt, res);
1123 rt->rt6i_dst = f6i->fib6_dst;
1124 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1125 rt->rt6i_flags = res->fib6_flags;
1126 if (nh->fib_nh_gw_family) {
1127 rt->rt6i_gateway = nh->fib_nh_gw6;
1128 rt->rt6i_flags |= RTF_GATEWAY;
1130 rt6_set_from(rt, f6i);
1131 #ifdef CONFIG_IPV6_SUBTREES
1132 rt->rt6i_src = f6i->fib6_src;
1136 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1137 struct in6_addr *saddr)
1139 struct fib6_node *pn, *sn;
1141 if (fn->fn_flags & RTN_TL_ROOT)
1143 pn = rcu_dereference(fn->parent);
1144 sn = FIB6_SUBTREE(pn);
1146 fn = fib6_node_lookup(sn, NULL, saddr);
1149 if (fn->fn_flags & RTN_RTINFO)
1154 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1156 struct rt6_info *rt = *prt;
1158 if (dst_hold_safe(&rt->dst))
1161 rt = net->ipv6.ip6_null_entry;
1170 /* called with rcu_lock held */
1171 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1173 struct net_device *dev = res->nh->fib_nh_dev;
1174 struct fib6_info *f6i = res->f6i;
1175 unsigned short flags;
1176 struct rt6_info *nrt;
1178 if (!fib6_info_hold_safe(f6i))
1181 flags = fib6_info_dst_flags(f6i);
1182 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1184 fib6_info_release(f6i);
1188 ip6_rt_copy_init(nrt, res);
1192 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1193 dst_hold(&nrt->dst);
1197 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
1198 struct fib6_table *table,
1200 const struct sk_buff *skb,
1203 struct fib6_result res = {};
1204 struct fib6_node *fn;
1205 struct rt6_info *rt;
1207 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1208 flags &= ~RT6_LOOKUP_F_IFACE;
1211 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1213 res.f6i = rcu_dereference(fn->leaf);
1215 res.f6i = net->ipv6.fib6_null_entry;
1217 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1220 if (res.f6i == net->ipv6.fib6_null_entry) {
1221 fn = fib6_backtrack(fn, &fl6->saddr);
1225 rt = net->ipv6.ip6_null_entry;
1228 } else if (res.fib6_flags & RTF_REJECT) {
1232 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1233 fl6->flowi6_oif != 0, skb, flags);
1235 /* Search through exception table */
1236 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1238 if (ip6_hold_safe(net, &rt))
1239 dst_use_noref(&rt->dst, jiffies);
1242 rt = ip6_create_rt_rcu(&res);
1246 trace_fib6_table_lookup(net, &res, table, fl6);
1253 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1254 const struct sk_buff *skb, int flags)
1256 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1258 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1260 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1261 const struct in6_addr *saddr, int oif,
1262 const struct sk_buff *skb, int strict)
1264 struct flowi6 fl6 = {
1268 struct dst_entry *dst;
1269 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1272 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1273 flags |= RT6_LOOKUP_F_HAS_SADDR;
1276 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1277 if (dst->error == 0)
1278 return (struct rt6_info *) dst;
1284 EXPORT_SYMBOL(rt6_lookup);
1286 /* ip6_ins_rt is called with FREE table->tb6_lock.
1287 * It takes new route entry, the addition fails by any reason the
1288 * route is released.
1289 * Caller must hold dst before calling it.
1292 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1293 struct netlink_ext_ack *extack)
1296 struct fib6_table *table;
1298 table = rt->fib6_table;
1299 spin_lock_bh(&table->tb6_lock);
1300 err = fib6_add(&table->tb6_root, rt, info, extack);
1301 spin_unlock_bh(&table->tb6_lock);
1306 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1308 struct nl_info info = { .nl_net = net, };
1310 return __ip6_ins_rt(rt, &info, NULL);
1313 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1314 const struct in6_addr *daddr,
1315 const struct in6_addr *saddr)
1317 struct fib6_info *f6i = res->f6i;
1318 struct net_device *dev;
1319 struct rt6_info *rt;
1325 if (!fib6_info_hold_safe(f6i))
1328 dev = ip6_rt_get_dev_rcu(res);
1329 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1331 fib6_info_release(f6i);
1335 ip6_rt_copy_init(rt, res);
1336 rt->rt6i_flags |= RTF_CACHE;
1337 rt->rt6i_dst.addr = *daddr;
1338 rt->rt6i_dst.plen = 128;
1340 if (!rt6_is_gw_or_nonexthop(res)) {
1341 if (f6i->fib6_dst.plen != 128 &&
1342 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1343 rt->rt6i_flags |= RTF_ANYCAST;
1344 #ifdef CONFIG_IPV6_SUBTREES
1345 if (rt->rt6i_src.plen && saddr) {
1346 rt->rt6i_src.addr = *saddr;
1347 rt->rt6i_src.plen = 128;
1355 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1357 struct fib6_info *f6i = res->f6i;
1358 unsigned short flags = fib6_info_dst_flags(f6i);
1359 struct net_device *dev;
1360 struct rt6_info *pcpu_rt;
1362 if (!fib6_info_hold_safe(f6i))
1366 dev = ip6_rt_get_dev_rcu(res);
1367 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1370 fib6_info_release(f6i);
1373 ip6_rt_copy_init(pcpu_rt, res);
1374 pcpu_rt->rt6i_flags |= RTF_PCPU;
1377 pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1382 static bool rt6_is_valid(const struct rt6_info *rt6)
1384 return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1387 /* It should be called with rcu_read_lock() acquired */
1388 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1390 struct rt6_info *pcpu_rt;
1392 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1394 if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1395 struct rt6_info *prev, **p;
1397 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1398 prev = xchg(p, NULL);
1400 dst_dev_put(&prev->dst);
1401 dst_release(&prev->dst);
1410 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1411 const struct fib6_result *res)
1413 struct rt6_info *pcpu_rt, *prev, **p;
1415 pcpu_rt = ip6_rt_pcpu_alloc(res);
1419 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1420 prev = cmpxchg(p, NULL, pcpu_rt);
1423 if (res->f6i->fib6_destroying) {
1424 struct fib6_info *from;
1426 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1427 fib6_info_release(from);
1433 /* exception hash table implementation
1435 static DEFINE_SPINLOCK(rt6_exception_lock);
1437 /* Remove rt6_ex from hash table and free the memory
1438 * Caller must hold rt6_exception_lock
1440 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1441 struct rt6_exception *rt6_ex)
1443 struct fib6_info *from;
1446 if (!bucket || !rt6_ex)
1449 net = dev_net(rt6_ex->rt6i->dst.dev);
1450 net->ipv6.rt6_stats->fib_rt_cache--;
1452 /* purge completely the exception to allow releasing the held resources:
1453 * some [sk] cache may keep the dst around for unlimited time
1455 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1456 fib6_info_release(from);
1457 dst_dev_put(&rt6_ex->rt6i->dst);
1459 hlist_del_rcu(&rt6_ex->hlist);
1460 dst_release(&rt6_ex->rt6i->dst);
1461 kfree_rcu(rt6_ex, rcu);
1462 WARN_ON_ONCE(!bucket->depth);
1466 /* Remove oldest rt6_ex in bucket and free the memory
1467 * Caller must hold rt6_exception_lock
1469 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1471 struct rt6_exception *rt6_ex, *oldest = NULL;
1476 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1477 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1480 rt6_remove_exception(bucket, oldest);
1483 static u32 rt6_exception_hash(const struct in6_addr *dst,
1484 const struct in6_addr *src)
1486 static siphash_key_t rt6_exception_key __read_mostly;
1488 struct in6_addr dst;
1489 struct in6_addr src;
1490 } __aligned(SIPHASH_ALIGNMENT) combined = {
1495 net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
1497 #ifdef CONFIG_IPV6_SUBTREES
1499 combined.src = *src;
1501 val = siphash(&combined, sizeof(combined), &rt6_exception_key);
1503 return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1506 /* Helper function to find the cached rt in the hash table
1507 * and update bucket pointer to point to the bucket for this
1508 * (daddr, saddr) pair
1509 * Caller must hold rt6_exception_lock
1511 static struct rt6_exception *
1512 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1513 const struct in6_addr *daddr,
1514 const struct in6_addr *saddr)
1516 struct rt6_exception *rt6_ex;
1519 if (!(*bucket) || !daddr)
1522 hval = rt6_exception_hash(daddr, saddr);
1525 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1526 struct rt6_info *rt6 = rt6_ex->rt6i;
1527 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1529 #ifdef CONFIG_IPV6_SUBTREES
1530 if (matched && saddr)
1531 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1539 /* Helper function to find the cached rt in the hash table
1540 * and update bucket pointer to point to the bucket for this
1541 * (daddr, saddr) pair
1542 * Caller must hold rcu_read_lock()
1544 static struct rt6_exception *
1545 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1546 const struct in6_addr *daddr,
1547 const struct in6_addr *saddr)
1549 struct rt6_exception *rt6_ex;
1552 WARN_ON_ONCE(!rcu_read_lock_held());
1554 if (!(*bucket) || !daddr)
1557 hval = rt6_exception_hash(daddr, saddr);
1560 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1561 struct rt6_info *rt6 = rt6_ex->rt6i;
1562 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1564 #ifdef CONFIG_IPV6_SUBTREES
1565 if (matched && saddr)
1566 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1574 static unsigned int fib6_mtu(const struct fib6_result *res)
1576 const struct fib6_nh *nh = res->nh;
1579 if (res->f6i->fib6_pmtu) {
1580 mtu = res->f6i->fib6_pmtu;
1582 struct net_device *dev = nh->fib_nh_dev;
1583 struct inet6_dev *idev;
1586 idev = __in6_dev_get(dev);
1587 mtu = idev->cnf.mtu6;
1591 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1593 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1596 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1598 /* used when the flushed bit is not relevant, only access to the bucket
1599 * (ie., all bucket users except rt6_insert_exception);
1601 * called under rcu lock; sometimes called with rt6_exception_lock held
1604 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1607 struct rt6_exception_bucket *bucket;
1610 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1611 lockdep_is_held(lock));
1613 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1615 /* remove bucket flushed bit if set */
1617 unsigned long p = (unsigned long)bucket;
1619 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1620 bucket = (struct rt6_exception_bucket *)p;
1626 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1628 unsigned long p = (unsigned long)bucket;
1630 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1633 /* called with rt6_exception_lock held */
1634 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1637 struct rt6_exception_bucket *bucket;
1640 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1641 lockdep_is_held(lock));
1643 p = (unsigned long)bucket;
1644 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1645 bucket = (struct rt6_exception_bucket *)p;
1646 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1649 static int rt6_insert_exception(struct rt6_info *nrt,
1650 const struct fib6_result *res)
1652 struct net *net = dev_net(nrt->dst.dev);
1653 struct rt6_exception_bucket *bucket;
1654 struct fib6_info *f6i = res->f6i;
1655 struct in6_addr *src_key = NULL;
1656 struct rt6_exception *rt6_ex;
1657 struct fib6_nh *nh = res->nh;
1661 spin_lock_bh(&rt6_exception_lock);
1663 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1664 lockdep_is_held(&rt6_exception_lock));
1666 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1672 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1673 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1678 #ifdef CONFIG_IPV6_SUBTREES
1679 /* fib6_src.plen != 0 indicates f6i is in subtree
1680 * and exception table is indexed by a hash of
1681 * both fib6_dst and fib6_src.
1682 * Otherwise, the exception table is indexed by
1683 * a hash of only fib6_dst.
1685 if (f6i->fib6_src.plen)
1686 src_key = &nrt->rt6i_src.addr;
1688 /* rt6_mtu_change() might lower mtu on f6i.
1689 * Only insert this exception route if its mtu
1690 * is less than f6i's mtu value.
1692 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1697 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1700 rt6_remove_exception(bucket, rt6_ex);
1702 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1708 rt6_ex->stamp = jiffies;
1709 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1711 net->ipv6.rt6_stats->fib_rt_cache++;
1713 /* Randomize max depth to avoid some side channels attacks. */
1714 max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
1715 while (bucket->depth > max_depth)
1716 rt6_exception_remove_oldest(bucket);
1719 spin_unlock_bh(&rt6_exception_lock);
1721 /* Update fn->fn_sernum to invalidate all cached dst */
1723 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1724 fib6_update_sernum(net, f6i);
1725 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1726 fib6_force_start_gc(net);
1732 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1734 struct rt6_exception_bucket *bucket;
1735 struct rt6_exception *rt6_ex;
1736 struct hlist_node *tmp;
1739 spin_lock_bh(&rt6_exception_lock);
1741 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1745 /* Prevent rt6_insert_exception() to recreate the bucket list */
1747 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1749 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1750 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1752 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1753 rt6_remove_exception(bucket, rt6_ex);
1755 WARN_ON_ONCE(!from && bucket->depth);
1759 spin_unlock_bh(&rt6_exception_lock);
1762 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1764 struct fib6_info *f6i = arg;
1766 fib6_nh_flush_exceptions(nh, f6i);
1771 void rt6_flush_exceptions(struct fib6_info *f6i)
1774 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1777 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1780 /* Find cached rt in the hash table inside passed in rt
1781 * Caller has to hold rcu_read_lock()
1783 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1784 const struct in6_addr *daddr,
1785 const struct in6_addr *saddr)
1787 const struct in6_addr *src_key = NULL;
1788 struct rt6_exception_bucket *bucket;
1789 struct rt6_exception *rt6_ex;
1790 struct rt6_info *ret = NULL;
1792 #ifdef CONFIG_IPV6_SUBTREES
1793 /* fib6i_src.plen != 0 indicates f6i is in subtree
1794 * and exception table is indexed by a hash of
1795 * both fib6_dst and fib6_src.
1796 * However, the src addr used to create the hash
1797 * might not be exactly the passed in saddr which
1798 * is a /128 addr from the flow.
1799 * So we need to use f6i->fib6_src to redo lookup
1800 * if the passed in saddr does not find anything.
1801 * (See the logic in ip6_rt_cache_alloc() on how
1802 * rt->rt6i_src is updated.)
1804 if (res->f6i->fib6_src.plen)
1808 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1809 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1811 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1814 #ifdef CONFIG_IPV6_SUBTREES
1815 /* Use fib6_src as src_key and redo lookup */
1816 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1817 src_key = &res->f6i->fib6_src.addr;
1825 /* Remove the passed in cached rt from the hash table that contains it */
1826 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1827 const struct rt6_info *rt)
1829 const struct in6_addr *src_key = NULL;
1830 struct rt6_exception_bucket *bucket;
1831 struct rt6_exception *rt6_ex;
1834 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1837 spin_lock_bh(&rt6_exception_lock);
1838 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1840 #ifdef CONFIG_IPV6_SUBTREES
1841 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1842 * and exception table is indexed by a hash of
1843 * both rt6i_dst and rt6i_src.
1844 * Otherwise, the exception table is indexed by
1845 * a hash of only rt6i_dst.
1848 src_key = &rt->rt6i_src.addr;
1850 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1854 rt6_remove_exception(bucket, rt6_ex);
1860 spin_unlock_bh(&rt6_exception_lock);
1864 struct fib6_nh_excptn_arg {
1865 struct rt6_info *rt;
1869 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1871 struct fib6_nh_excptn_arg *arg = _arg;
1874 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1881 static int rt6_remove_exception_rt(struct rt6_info *rt)
1883 struct fib6_info *from;
1885 from = rcu_dereference(rt->from);
1886 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1890 struct fib6_nh_excptn_arg arg = {
1892 .plen = from->fib6_src.plen
1896 /* rc = 1 means an entry was found */
1897 rc = nexthop_for_each_fib6_nh(from->nh,
1898 rt6_nh_remove_exception_rt,
1900 return rc ? 0 : -ENOENT;
1903 return fib6_nh_remove_exception(from->fib6_nh,
1904 from->fib6_src.plen, rt);
1907 /* Find rt6_ex which contains the passed in rt cache and
1910 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1911 const struct rt6_info *rt)
1913 const struct in6_addr *src_key = NULL;
1914 struct rt6_exception_bucket *bucket;
1915 struct rt6_exception *rt6_ex;
1917 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1918 #ifdef CONFIG_IPV6_SUBTREES
1919 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1920 * and exception table is indexed by a hash of
1921 * both rt6i_dst and rt6i_src.
1922 * Otherwise, the exception table is indexed by
1923 * a hash of only rt6i_dst.
1926 src_key = &rt->rt6i_src.addr;
1928 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1930 rt6_ex->stamp = jiffies;
1933 struct fib6_nh_match_arg {
1934 const struct net_device *dev;
1935 const struct in6_addr *gw;
1936 struct fib6_nh *match;
1939 /* determine if fib6_nh has given device and gateway */
1940 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1942 struct fib6_nh_match_arg *arg = _arg;
1944 if (arg->dev != nh->fib_nh_dev ||
1945 (arg->gw && !nh->fib_nh_gw_family) ||
1946 (!arg->gw && nh->fib_nh_gw_family) ||
1947 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1952 /* found a match, break the loop */
1956 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1958 struct fib6_info *from;
1959 struct fib6_nh *fib6_nh;
1963 from = rcu_dereference(rt->from);
1964 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1968 struct fib6_nh_match_arg arg = {
1970 .gw = &rt->rt6i_gateway,
1973 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1977 fib6_nh = arg.match;
1979 fib6_nh = from->fib6_nh;
1981 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1986 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1987 struct rt6_info *rt, int mtu)
1989 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1990 * lowest MTU in the path: always allow updating the route PMTU to
1991 * reflect PMTU decreases.
1993 * If the new MTU is higher, and the route PMTU is equal to the local
1994 * MTU, this means the old MTU is the lowest in the path, so allow
1995 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1999 if (dst_mtu(&rt->dst) >= mtu)
2002 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
2008 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2009 const struct fib6_nh *nh, int mtu)
2011 struct rt6_exception_bucket *bucket;
2012 struct rt6_exception *rt6_ex;
2015 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2019 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2020 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2021 struct rt6_info *entry = rt6_ex->rt6i;
2023 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2024 * route), the metrics of its rt->from have already
2027 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2028 rt6_mtu_change_route_allowed(idev, entry, mtu))
2029 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2035 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2037 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2038 const struct in6_addr *gateway)
2040 struct rt6_exception_bucket *bucket;
2041 struct rt6_exception *rt6_ex;
2042 struct hlist_node *tmp;
2045 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2048 spin_lock_bh(&rt6_exception_lock);
2049 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2051 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2052 hlist_for_each_entry_safe(rt6_ex, tmp,
2053 &bucket->chain, hlist) {
2054 struct rt6_info *entry = rt6_ex->rt6i;
2056 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2057 RTF_CACHE_GATEWAY &&
2058 ipv6_addr_equal(gateway,
2059 &entry->rt6i_gateway)) {
2060 rt6_remove_exception(bucket, rt6_ex);
2067 spin_unlock_bh(&rt6_exception_lock);
2070 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2071 struct rt6_exception *rt6_ex,
2072 struct fib6_gc_args *gc_args,
2075 struct rt6_info *rt = rt6_ex->rt6i;
2077 /* we are pruning and obsoleting aged-out and non gateway exceptions
2078 * even if others have still references to them, so that on next
2079 * dst_check() such references can be dropped.
2080 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2081 * expired, independently from their aging, as per RFC 8201 section 4
2083 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2084 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2085 RT6_TRACE("aging clone %p\n", rt);
2086 rt6_remove_exception(bucket, rt6_ex);
2089 } else if (time_after(jiffies, rt->dst.expires)) {
2090 RT6_TRACE("purging expired route %p\n", rt);
2091 rt6_remove_exception(bucket, rt6_ex);
2095 if (rt->rt6i_flags & RTF_GATEWAY) {
2096 struct neighbour *neigh;
2097 __u8 neigh_flags = 0;
2099 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2101 neigh_flags = neigh->flags;
2103 if (!(neigh_flags & NTF_ROUTER)) {
2104 RT6_TRACE("purging route %p via non-router but gateway\n",
2106 rt6_remove_exception(bucket, rt6_ex);
2114 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2115 struct fib6_gc_args *gc_args,
2118 struct rt6_exception_bucket *bucket;
2119 struct rt6_exception *rt6_ex;
2120 struct hlist_node *tmp;
2123 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2127 spin_lock(&rt6_exception_lock);
2128 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2130 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2131 hlist_for_each_entry_safe(rt6_ex, tmp,
2132 &bucket->chain, hlist) {
2133 rt6_age_examine_exception(bucket, rt6_ex,
2139 spin_unlock(&rt6_exception_lock);
2140 rcu_read_unlock_bh();
2143 struct fib6_nh_age_excptn_arg {
2144 struct fib6_gc_args *gc_args;
2148 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2150 struct fib6_nh_age_excptn_arg *arg = _arg;
2152 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2156 void rt6_age_exceptions(struct fib6_info *f6i,
2157 struct fib6_gc_args *gc_args,
2161 struct fib6_nh_age_excptn_arg arg = {
2166 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2169 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2173 /* must be called with rcu lock held */
2174 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2175 struct flowi6 *fl6, struct fib6_result *res, int strict)
2177 struct fib6_node *fn, *saved_fn;
2179 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2182 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2186 rt6_select(net, fn, oif, res, strict);
2187 if (res->f6i == net->ipv6.fib6_null_entry) {
2188 fn = fib6_backtrack(fn, &fl6->saddr);
2190 goto redo_rt6_select;
2191 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2192 /* also consider unreachable route */
2193 strict &= ~RT6_LOOKUP_F_REACHABLE;
2195 goto redo_rt6_select;
2199 trace_fib6_table_lookup(net, res, table, fl6);
2204 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2205 int oif, struct flowi6 *fl6,
2206 const struct sk_buff *skb, int flags)
2208 struct fib6_result res = {};
2209 struct rt6_info *rt = NULL;
2212 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2213 !rcu_read_lock_held());
2215 strict |= flags & RT6_LOOKUP_F_IFACE;
2216 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2217 if (net->ipv6.devconf_all->forwarding == 0)
2218 strict |= RT6_LOOKUP_F_REACHABLE;
2222 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2223 if (res.f6i == net->ipv6.fib6_null_entry)
2226 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2228 /*Search through exception table */
2229 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2232 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2233 !res.nh->fib_nh_gw_family)) {
2234 /* Create a RTF_CACHE clone which will not be
2235 * owned by the fib6 tree. It is for the special case where
2236 * the daddr in the skb during the neighbor look-up is different
2237 * from the fl6->daddr used to look-up route here.
2239 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2242 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2243 * As rt6_uncached_list_add() does not consume refcnt,
2244 * this refcnt is always returned to the caller even
2245 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2247 rt6_uncached_list_add(rt);
2248 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2254 /* Get a percpu copy */
2256 rt = rt6_get_pcpu_route(&res);
2259 rt = rt6_make_pcpu_route(net, &res);
2265 rt = net->ipv6.ip6_null_entry;
2266 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2267 ip6_hold_safe(net, &rt);
2272 EXPORT_SYMBOL_GPL(ip6_pol_route);
2274 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
2275 struct fib6_table *table,
2277 const struct sk_buff *skb,
2280 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2283 struct dst_entry *ip6_route_input_lookup(struct net *net,
2284 struct net_device *dev,
2286 const struct sk_buff *skb,
2289 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2290 flags |= RT6_LOOKUP_F_IFACE;
2292 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2294 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2296 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2297 struct flow_keys *keys,
2298 struct flow_keys *flkeys)
2300 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2301 const struct ipv6hdr *key_iph = outer_iph;
2302 struct flow_keys *_flkeys = flkeys;
2303 const struct ipv6hdr *inner_iph;
2304 const struct icmp6hdr *icmph;
2305 struct ipv6hdr _inner_iph;
2306 struct icmp6hdr _icmph;
2308 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2311 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2312 sizeof(_icmph), &_icmph);
2316 if (!icmpv6_is_err(icmph->icmp6_type))
2319 inner_iph = skb_header_pointer(skb,
2320 skb_transport_offset(skb) + sizeof(*icmph),
2321 sizeof(_inner_iph), &_inner_iph);
2325 key_iph = inner_iph;
2329 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2330 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2331 keys->tags.flow_label = _flkeys->tags.flow_label;
2332 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2334 keys->addrs.v6addrs.src = key_iph->saddr;
2335 keys->addrs.v6addrs.dst = key_iph->daddr;
2336 keys->tags.flow_label = ip6_flowlabel(key_iph);
2337 keys->basic.ip_proto = key_iph->nexthdr;
2341 /* if skb is set it will be used and fl6 can be NULL */
2342 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2343 const struct sk_buff *skb, struct flow_keys *flkeys)
2345 struct flow_keys hash_keys;
2348 switch (ip6_multipath_hash_policy(net)) {
2350 memset(&hash_keys, 0, sizeof(hash_keys));
2351 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2353 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2355 hash_keys.addrs.v6addrs.src = fl6->saddr;
2356 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2357 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2358 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2363 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2364 struct flow_keys keys;
2366 /* short-circuit if we already have L4 hash present */
2368 return skb_get_hash_raw(skb) >> 1;
2370 memset(&hash_keys, 0, sizeof(hash_keys));
2373 skb_flow_dissect_flow_keys(skb, &keys, flag);
2376 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2377 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2378 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2379 hash_keys.ports.src = flkeys->ports.src;
2380 hash_keys.ports.dst = flkeys->ports.dst;
2381 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2383 memset(&hash_keys, 0, sizeof(hash_keys));
2384 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2385 hash_keys.addrs.v6addrs.src = fl6->saddr;
2386 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2387 hash_keys.ports.src = fl6->fl6_sport;
2388 hash_keys.ports.dst = fl6->fl6_dport;
2389 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2393 memset(&hash_keys, 0, sizeof(hash_keys));
2394 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2396 struct flow_keys keys;
2399 skb_flow_dissect_flow_keys(skb, &keys, 0);
2403 /* Inner can be v4 or v6 */
2404 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2405 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2406 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2407 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2408 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2409 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2410 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2411 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2412 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2413 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2415 /* Same as case 0 */
2416 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2417 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2420 /* Same as case 0 */
2421 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2422 hash_keys.addrs.v6addrs.src = fl6->saddr;
2423 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2424 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2425 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2429 mhash = flow_hash_from_keys(&hash_keys);
2434 /* Called with rcu held */
2435 void ip6_route_input(struct sk_buff *skb)
2437 const struct ipv6hdr *iph = ipv6_hdr(skb);
2438 struct net *net = dev_net(skb->dev);
2439 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2440 struct ip_tunnel_info *tun_info;
2441 struct flowi6 fl6 = {
2442 .flowi6_iif = skb->dev->ifindex,
2443 .daddr = iph->daddr,
2444 .saddr = iph->saddr,
2445 .flowlabel = ip6_flowinfo(iph),
2446 .flowi6_mark = skb->mark,
2447 .flowi6_proto = iph->nexthdr,
2449 struct flow_keys *flkeys = NULL, _flkeys;
2451 tun_info = skb_tunnel_info(skb);
2452 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2453 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2455 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2458 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2459 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2461 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2465 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
2466 struct fib6_table *table,
2468 const struct sk_buff *skb,
2471 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2474 struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2475 const struct sock *sk,
2476 struct flowi6 *fl6, int flags)
2480 if (ipv6_addr_type(&fl6->daddr) &
2481 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2482 struct dst_entry *dst;
2484 /* This function does not take refcnt on the dst */
2485 dst = l3mdev_link_scope_lookup(net, fl6);
2490 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2492 flags |= RT6_LOOKUP_F_DST_NOREF;
2493 any_src = ipv6_addr_any(&fl6->saddr);
2494 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2495 (fl6->flowi6_oif && any_src))
2496 flags |= RT6_LOOKUP_F_IFACE;
2499 flags |= RT6_LOOKUP_F_HAS_SADDR;
2501 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2503 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2505 EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2507 struct dst_entry *ip6_route_output_flags(struct net *net,
2508 const struct sock *sk,
2512 struct dst_entry *dst;
2513 struct rt6_info *rt6;
2516 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2517 rt6 = (struct rt6_info *)dst;
2518 /* For dst cached in uncached_list, refcnt is already taken. */
2519 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2520 dst = &net->ipv6.ip6_null_entry->dst;
2527 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2529 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2531 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2532 struct net_device *loopback_dev = net->loopback_dev;
2533 struct dst_entry *new = NULL;
2535 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2536 DST_OBSOLETE_DEAD, 0);
2539 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2543 new->input = dst_discard;
2544 new->output = dst_discard_out;
2546 dst_copy_metrics(new, &ort->dst);
2548 rt->rt6i_idev = in6_dev_get(loopback_dev);
2549 rt->rt6i_gateway = ort->rt6i_gateway;
2550 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2552 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2553 #ifdef CONFIG_IPV6_SUBTREES
2554 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2558 dst_release(dst_orig);
2559 return new ? new : ERR_PTR(-ENOMEM);
2563 * Destination cache support functions
2566 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2570 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2573 if (fib6_check_expired(f6i))
2579 static struct dst_entry *rt6_check(struct rt6_info *rt,
2580 struct fib6_info *from,
2585 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2586 rt_cookie != cookie)
2589 if (rt6_check_expired(rt))
2595 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2596 struct fib6_info *from,
2599 if (!__rt6_check_expired(rt) &&
2600 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2601 fib6_check(from, cookie))
2607 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2609 struct dst_entry *dst_ret;
2610 struct fib6_info *from;
2611 struct rt6_info *rt;
2613 rt = container_of(dst, struct rt6_info, dst);
2616 return rt6_is_valid(rt) ? dst : NULL;
2620 /* All IPV6 dsts are created with ->obsolete set to the value
2621 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2622 * into this function always.
2625 from = rcu_dereference(rt->from);
2627 if (from && (rt->rt6i_flags & RTF_PCPU ||
2628 unlikely(!list_empty(&rt->rt6i_uncached))))
2629 dst_ret = rt6_dst_from_check(rt, from, cookie);
2631 dst_ret = rt6_check(rt, from, cookie);
2638 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2640 struct rt6_info *rt = (struct rt6_info *) dst;
2643 if (rt->rt6i_flags & RTF_CACHE) {
2645 if (rt6_check_expired(rt)) {
2646 rt6_remove_exception_rt(rt);
2658 static void ip6_link_failure(struct sk_buff *skb)
2660 struct rt6_info *rt;
2662 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2664 rt = (struct rt6_info *) skb_dst(skb);
2667 if (rt->rt6i_flags & RTF_CACHE) {
2668 rt6_remove_exception_rt(rt);
2670 struct fib6_info *from;
2671 struct fib6_node *fn;
2673 from = rcu_dereference(rt->from);
2675 fn = rcu_dereference(from->fib6_node);
2676 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2677 WRITE_ONCE(fn->fn_sernum, -1);
2684 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2686 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2687 struct fib6_info *from;
2690 from = rcu_dereference(rt0->from);
2692 rt0->dst.expires = from->expires;
2696 dst_set_expires(&rt0->dst, timeout);
2697 rt0->rt6i_flags |= RTF_EXPIRES;
2700 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2702 struct net *net = dev_net(rt->dst.dev);
2704 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2705 rt->rt6i_flags |= RTF_MODIFIED;
2706 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2709 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2711 return !(rt->rt6i_flags & RTF_CACHE) &&
2712 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2715 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2716 const struct ipv6hdr *iph, u32 mtu,
2719 const struct in6_addr *daddr, *saddr;
2720 struct rt6_info *rt6 = (struct rt6_info *)dst;
2722 /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2723 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2724 * [see also comment in rt6_mtu_change_route()]
2728 daddr = &iph->daddr;
2729 saddr = &iph->saddr;
2731 daddr = &sk->sk_v6_daddr;
2732 saddr = &inet6_sk(sk)->saddr;
2739 dst_confirm_neigh(dst, daddr);
2741 if (mtu < IPV6_MIN_MTU)
2743 if (mtu >= dst_mtu(dst))
2746 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2747 rt6_do_update_pmtu(rt6, mtu);
2748 /* update rt6_ex->stamp for cache */
2749 if (rt6->rt6i_flags & RTF_CACHE)
2750 rt6_update_exception_stamp_rt(rt6);
2752 struct fib6_result res = {};
2753 struct rt6_info *nrt6;
2756 res.f6i = rcu_dereference(rt6->from);
2760 res.fib6_flags = res.f6i->fib6_flags;
2761 res.fib6_type = res.f6i->fib6_type;
2764 struct fib6_nh_match_arg arg = {
2766 .gw = &rt6->rt6i_gateway,
2769 nexthop_for_each_fib6_nh(res.f6i->nh,
2770 fib6_nh_find_match, &arg);
2772 /* fib6_info uses a nexthop that does not have fib6_nh
2773 * using the dst->dev + gw. Should be impossible.
2780 res.nh = res.f6i->fib6_nh;
2783 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2785 rt6_do_update_pmtu(nrt6, mtu);
2786 if (rt6_insert_exception(nrt6, &res))
2787 dst_release_immediate(&nrt6->dst);
2794 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2795 struct sk_buff *skb, u32 mtu,
2798 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2802 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2803 int oif, u32 mark, kuid_t uid)
2805 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2806 struct dst_entry *dst;
2807 struct flowi6 fl6 = {
2809 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2810 .daddr = iph->daddr,
2811 .saddr = iph->saddr,
2812 .flowlabel = ip6_flowinfo(iph),
2816 dst = ip6_route_output(net, NULL, &fl6);
2818 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
2821 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2823 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2825 int oif = sk->sk_bound_dev_if;
2826 struct dst_entry *dst;
2828 if (!oif && skb->dev)
2829 oif = l3mdev_master_ifindex(skb->dev);
2831 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2833 dst = __sk_dst_get(sk);
2834 if (!dst || !dst->obsolete ||
2835 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2839 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2840 ip6_datagram_dst_update(sk, false);
2843 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2845 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2846 const struct flowi6 *fl6)
2848 #ifdef CONFIG_IPV6_SUBTREES
2849 struct ipv6_pinfo *np = inet6_sk(sk);
2852 ip6_dst_store(sk, dst,
2853 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2854 &sk->sk_v6_daddr : NULL,
2855 #ifdef CONFIG_IPV6_SUBTREES
2856 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2862 static bool ip6_redirect_nh_match(const struct fib6_result *res,
2864 const struct in6_addr *gw,
2865 struct rt6_info **ret)
2867 const struct fib6_nh *nh = res->nh;
2869 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2870 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2873 /* rt_cache's gateway might be different from its 'parent'
2874 * in the case of an ip redirect.
2875 * So we keep searching in the exception table if the gateway
2878 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2879 struct rt6_info *rt_cache;
2881 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2883 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2892 struct fib6_nh_rd_arg {
2893 struct fib6_result *res;
2895 const struct in6_addr *gw;
2896 struct rt6_info **ret;
2899 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
2901 struct fib6_nh_rd_arg *arg = _arg;
2904 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
2907 /* Handle redirects */
2908 struct ip6rd_flowi {
2910 struct in6_addr gateway;
2913 INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
2914 struct fib6_table *table,
2916 const struct sk_buff *skb,
2919 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2920 struct rt6_info *ret = NULL;
2921 struct fib6_result res = {};
2922 struct fib6_nh_rd_arg arg = {
2925 .gw = &rdfl->gateway,
2928 struct fib6_info *rt;
2929 struct fib6_node *fn;
2931 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2932 * this case we must match on the real ingress device, so reset it
2934 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2935 fl6->flowi6_oif = skb->dev->ifindex;
2937 /* Get the "current" route for this destination and
2938 * check if the redirect has come from appropriate router.
2940 * RFC 4861 specifies that redirects should only be
2941 * accepted if they come from the nexthop to the target.
2942 * Due to the way the routes are chosen, this notion
2943 * is a bit fuzzy and one might need to check all possible
2948 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2950 for_each_fib6_node_rt_rcu(fn) {
2952 if (fib6_check_expired(rt))
2954 if (rt->fib6_flags & RTF_REJECT)
2956 if (unlikely(rt->nh)) {
2957 if (nexthop_is_blackhole(rt->nh))
2959 /* on match, res->nh is filled in and potentially ret */
2960 if (nexthop_for_each_fib6_nh(rt->nh,
2961 fib6_nh_redirect_match,
2965 res.nh = rt->fib6_nh;
2966 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
2973 rt = net->ipv6.fib6_null_entry;
2974 else if (rt->fib6_flags & RTF_REJECT) {
2975 ret = net->ipv6.ip6_null_entry;
2979 if (rt == net->ipv6.fib6_null_entry) {
2980 fn = fib6_backtrack(fn, &fl6->saddr);
2986 res.nh = rt->fib6_nh;
2989 ip6_hold_safe(net, &ret);
2991 res.fib6_flags = res.f6i->fib6_flags;
2992 res.fib6_type = res.f6i->fib6_type;
2993 ret = ip6_create_rt_rcu(&res);
2998 trace_fib6_table_lookup(net, &res, table, fl6);
3002 static struct dst_entry *ip6_route_redirect(struct net *net,
3003 const struct flowi6 *fl6,
3004 const struct sk_buff *skb,
3005 const struct in6_addr *gateway)
3007 int flags = RT6_LOOKUP_F_HAS_SADDR;
3008 struct ip6rd_flowi rdfl;
3011 rdfl.gateway = *gateway;
3013 return fib6_rule_lookup(net, &rdfl.fl6, skb,
3014 flags, __ip6_route_redirect);
3017 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3020 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3021 struct dst_entry *dst;
3022 struct flowi6 fl6 = {
3023 .flowi6_iif = LOOPBACK_IFINDEX,
3025 .flowi6_mark = mark,
3026 .daddr = iph->daddr,
3027 .saddr = iph->saddr,
3028 .flowlabel = ip6_flowinfo(iph),
3032 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3033 rt6_do_redirect(dst, NULL, skb);
3036 EXPORT_SYMBOL_GPL(ip6_redirect);
3038 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3040 const struct ipv6hdr *iph = ipv6_hdr(skb);
3041 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3042 struct dst_entry *dst;
3043 struct flowi6 fl6 = {
3044 .flowi6_iif = LOOPBACK_IFINDEX,
3047 .saddr = iph->daddr,
3048 .flowi6_uid = sock_net_uid(net, NULL),
3051 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3052 rt6_do_redirect(dst, NULL, skb);
3056 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3058 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
3061 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3063 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3065 struct net_device *dev = dst->dev;
3066 unsigned int mtu = dst_mtu(dst);
3067 struct net *net = dev_net(dev);
3069 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3071 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3072 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3075 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3076 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3077 * IPV6_MAXPLEN is also valid and means: "any MSS,
3078 * rely only on pmtu discovery"
3080 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3085 static unsigned int ip6_mtu(const struct dst_entry *dst)
3087 struct inet6_dev *idev;
3090 mtu = dst_metric_raw(dst, RTAX_MTU);
3097 idev = __in6_dev_get(dst->dev);
3099 mtu = idev->cnf.mtu6;
3103 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3105 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
3109 * 1. mtu on route is locked - use it
3110 * 2. mtu from nexthop exception
3111 * 3. mtu from egress device
3113 * based on ip6_dst_mtu_forward and exception logic of
3114 * rt6_find_cached_rt; called with rcu_read_lock
3116 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3117 const struct in6_addr *daddr,
3118 const struct in6_addr *saddr)
3120 const struct fib6_nh *nh = res->nh;
3121 struct fib6_info *f6i = res->f6i;
3122 struct inet6_dev *idev;
3123 struct rt6_info *rt;
3126 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3127 mtu = f6i->fib6_pmtu;
3132 rt = rt6_find_cached_rt(res, daddr, saddr);
3134 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3136 struct net_device *dev = nh->fib_nh_dev;
3139 idev = __in6_dev_get(dev);
3140 if (idev && idev->cnf.mtu6 > mtu)
3141 mtu = idev->cnf.mtu6;
3144 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3146 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3149 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3152 struct dst_entry *dst;
3153 struct rt6_info *rt;
3154 struct inet6_dev *idev = in6_dev_get(dev);
3155 struct net *net = dev_net(dev);
3157 if (unlikely(!idev))
3158 return ERR_PTR(-ENODEV);
3160 rt = ip6_dst_alloc(net, dev, 0);
3161 if (unlikely(!rt)) {
3163 dst = ERR_PTR(-ENOMEM);
3167 rt->dst.input = ip6_input;
3168 rt->dst.output = ip6_output;
3169 rt->rt6i_gateway = fl6->daddr;
3170 rt->rt6i_dst.addr = fl6->daddr;
3171 rt->rt6i_dst.plen = 128;
3172 rt->rt6i_idev = idev;
3173 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3175 /* Add this dst into uncached_list so that rt6_disable_ip() can
3176 * do proper release of the net_device
3178 rt6_uncached_list_add(rt);
3179 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
3181 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3187 static void ip6_dst_gc(struct dst_ops *ops)
3189 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3190 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3191 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3192 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3193 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3197 entries = dst_entries_get_fast(ops);
3198 if (entries > ops->gc_thresh)
3199 entries = dst_entries_get_slow(ops);
3201 if (time_after(rt_last_gc + rt_min_interval, jiffies))
3204 fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
3205 entries = dst_entries_get_slow(ops);
3206 if (entries < ops->gc_thresh)
3207 atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
3209 val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
3210 atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
3213 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3214 const struct in6_addr *gw_addr, u32 tbid,
3215 int flags, struct fib6_result *res)
3217 struct flowi6 fl6 = {
3218 .flowi6_oif = cfg->fc_ifindex,
3220 .saddr = cfg->fc_prefsrc,
3222 struct fib6_table *table;
3225 table = fib6_get_table(net, tbid);
3229 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3230 flags |= RT6_LOOKUP_F_HAS_SADDR;
3232 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3234 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3235 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3236 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3237 cfg->fc_ifindex != 0, NULL, flags);
3242 static int ip6_route_check_nh_onlink(struct net *net,
3243 struct fib6_config *cfg,
3244 const struct net_device *dev,
3245 struct netlink_ext_ack *extack)
3247 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3248 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3249 struct fib6_result res = {};
3252 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3253 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3254 /* ignore match if it is the default route */
3255 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3256 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3257 NL_SET_ERR_MSG(extack,
3258 "Nexthop has invalid gateway or device mismatch");
3265 static int ip6_route_check_nh(struct net *net,
3266 struct fib6_config *cfg,
3267 struct net_device **_dev,
3268 struct inet6_dev **idev)
3270 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3271 struct net_device *dev = _dev ? *_dev : NULL;
3272 int flags = RT6_LOOKUP_F_IFACE;
3273 struct fib6_result res = {};
3274 int err = -EHOSTUNREACH;
3276 if (cfg->fc_table) {
3277 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3278 cfg->fc_table, flags, &res);
3279 /* gw_addr can not require a gateway or resolve to a reject
3280 * route. If a device is given, it must match the result.
3282 if (err || res.fib6_flags & RTF_REJECT ||
3283 res.nh->fib_nh_gw_family ||
3284 (dev && dev != res.nh->fib_nh_dev))
3285 err = -EHOSTUNREACH;
3289 struct flowi6 fl6 = {
3290 .flowi6_oif = cfg->fc_ifindex,
3294 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3295 if (err || res.fib6_flags & RTF_REJECT ||
3296 res.nh->fib_nh_gw_family)
3297 err = -EHOSTUNREACH;
3302 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3303 cfg->fc_ifindex != 0, NULL, flags);
3308 if (dev != res.nh->fib_nh_dev)
3309 err = -EHOSTUNREACH;
3311 *_dev = dev = res.nh->fib_nh_dev;
3313 *idev = in6_dev_get(dev);
3319 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3320 struct net_device **_dev, struct inet6_dev **idev,
3321 struct netlink_ext_ack *extack)
3323 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3324 int gwa_type = ipv6_addr_type(gw_addr);
3325 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3326 const struct net_device *dev = *_dev;
3327 bool need_addr_check = !dev;
3330 /* if gw_addr is local we will fail to detect this in case
3331 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3332 * will return already-added prefix route via interface that
3333 * prefix route was assigned to, which might be non-loopback.
3336 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3337 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3341 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3342 /* IPv6 strictly inhibits using not link-local
3343 * addresses as nexthop address.
3344 * Otherwise, router will not able to send redirects.
3345 * It is very good, but in some (rare!) circumstances
3346 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3347 * some exceptions. --ANK
3348 * We allow IPv4-mapped nexthops to support RFC4798-type
3351 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3352 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3358 if (cfg->fc_flags & RTNH_F_ONLINK)
3359 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3361 err = ip6_route_check_nh(net, cfg, _dev, idev);
3369 /* reload in case device was changed */
3374 NL_SET_ERR_MSG(extack, "Egress device not specified");
3376 } else if (dev->flags & IFF_LOOPBACK) {
3377 NL_SET_ERR_MSG(extack,
3378 "Egress device can not be loopback device for this route");
3382 /* if we did not check gw_addr above, do so now that the
3383 * egress device has been resolved.
3385 if (need_addr_check &&
3386 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3387 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3396 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3398 if ((flags & RTF_REJECT) ||
3399 (dev && (dev->flags & IFF_LOOPBACK) &&
3400 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3401 !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3407 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3408 struct fib6_config *cfg, gfp_t gfp_flags,
3409 struct netlink_ext_ack *extack)
3411 struct net_device *dev = NULL;
3412 struct inet6_dev *idev = NULL;
3416 fib6_nh->fib_nh_family = AF_INET6;
3417 #ifdef CONFIG_IPV6_ROUTER_PREF
3418 fib6_nh->last_probe = jiffies;
3420 if (cfg->fc_is_fdb) {
3421 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3422 fib6_nh->fib_nh_gw_family = AF_INET6;
3427 if (cfg->fc_ifindex) {
3428 dev = dev_get_by_index(net, cfg->fc_ifindex);
3431 idev = in6_dev_get(dev);
3436 if (cfg->fc_flags & RTNH_F_ONLINK) {
3438 NL_SET_ERR_MSG(extack,
3439 "Nexthop device required for onlink");
3443 if (!(dev->flags & IFF_UP)) {
3444 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3449 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3452 fib6_nh->fib_nh_weight = 1;
3454 /* We cannot add true routes via loopback here,
3455 * they would result in kernel looping; promote them to reject routes
3457 addr_type = ipv6_addr_type(&cfg->fc_dst);
3458 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3459 /* hold loopback dev/idev if we haven't done so. */
3460 if (dev != net->loopback_dev) {
3465 dev = net->loopback_dev;
3467 idev = in6_dev_get(dev);
3476 if (cfg->fc_flags & RTF_GATEWAY) {
3477 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3481 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3482 fib6_nh->fib_nh_gw_family = AF_INET6;
3489 if (idev->cnf.disable_ipv6) {
3490 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3495 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3496 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3501 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3502 !netif_carrier_ok(dev))
3503 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3505 err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3506 cfg->fc_encap_type, cfg, gfp_flags, extack);
3511 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3512 if (!fib6_nh->rt6i_pcpu) {
3517 fib6_nh->fib_nh_dev = dev;
3518 fib6_nh->fib_nh_oif = dev->ifindex;
3525 lwtstate_put(fib6_nh->fib_nh_lws);
3526 fib6_nh->fib_nh_lws = NULL;
3534 void fib6_nh_release(struct fib6_nh *fib6_nh)
3536 struct rt6_exception_bucket *bucket;
3540 fib6_nh_flush_exceptions(fib6_nh, NULL);
3541 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3543 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3549 if (fib6_nh->rt6i_pcpu) {
3552 for_each_possible_cpu(cpu) {
3553 struct rt6_info **ppcpu_rt;
3554 struct rt6_info *pcpu_rt;
3556 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3557 pcpu_rt = *ppcpu_rt;
3559 dst_dev_put(&pcpu_rt->dst);
3560 dst_release(&pcpu_rt->dst);
3565 free_percpu(fib6_nh->rt6i_pcpu);
3568 fib_nh_common_release(&fib6_nh->nh_common);
3571 void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
3575 if (!fib6_nh->rt6i_pcpu)
3578 for_each_possible_cpu(cpu) {
3579 struct rt6_info *pcpu_rt, **ppcpu_rt;
3581 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3582 pcpu_rt = xchg(ppcpu_rt, NULL);
3584 dst_dev_put(&pcpu_rt->dst);
3585 dst_release(&pcpu_rt->dst);
3590 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3592 struct netlink_ext_ack *extack)
3594 struct net *net = cfg->fc_nlinfo.nl_net;
3595 struct fib6_info *rt = NULL;
3596 struct nexthop *nh = NULL;
3597 struct fib6_table *table;
3598 struct fib6_nh *fib6_nh;
3602 /* RTF_PCPU is an internal flag; can not be set by userspace */
3603 if (cfg->fc_flags & RTF_PCPU) {
3604 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3608 /* RTF_CACHE is an internal flag; can not be set by userspace */
3609 if (cfg->fc_flags & RTF_CACHE) {
3610 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3614 if (cfg->fc_type > RTN_MAX) {
3615 NL_SET_ERR_MSG(extack, "Invalid route type");
3619 if (cfg->fc_dst_len > 128) {
3620 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3623 if (cfg->fc_src_len > 128) {
3624 NL_SET_ERR_MSG(extack, "Invalid source address length");
3627 #ifndef CONFIG_IPV6_SUBTREES
3628 if (cfg->fc_src_len) {
3629 NL_SET_ERR_MSG(extack,
3630 "Specifying source address requires IPV6_SUBTREES to be enabled");
3634 if (cfg->fc_nh_id) {
3635 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3637 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3640 err = fib6_check_nexthop(nh, cfg, extack);
3646 if (cfg->fc_nlinfo.nlh &&
3647 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3648 table = fib6_get_table(net, cfg->fc_table);
3650 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3651 table = fib6_new_table(net, cfg->fc_table);
3654 table = fib6_new_table(net, cfg->fc_table);
3661 rt = fib6_info_alloc(gfp_flags, !nh);
3665 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3667 if (IS_ERR(rt->fib6_metrics)) {
3668 err = PTR_ERR(rt->fib6_metrics);
3669 /* Do not leave garbage there. */
3670 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3674 if (cfg->fc_flags & RTF_ADDRCONF)
3675 rt->dst_nocount = true;
3677 if (cfg->fc_flags & RTF_EXPIRES)
3678 fib6_set_expires(rt, jiffies +
3679 clock_t_to_jiffies(cfg->fc_expires));
3681 fib6_clean_expires(rt);
3683 if (cfg->fc_protocol == RTPROT_UNSPEC)
3684 cfg->fc_protocol = RTPROT_BOOT;
3685 rt->fib6_protocol = cfg->fc_protocol;
3687 rt->fib6_table = table;
3688 rt->fib6_metric = cfg->fc_metric;
3689 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3690 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3692 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3693 rt->fib6_dst.plen = cfg->fc_dst_len;
3695 #ifdef CONFIG_IPV6_SUBTREES
3696 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3697 rt->fib6_src.plen = cfg->fc_src_len;
3700 if (rt->fib6_src.plen) {
3701 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3704 if (!nexthop_get(nh)) {
3705 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3709 fib6_nh = nexthop_fib6_nh(rt->nh);
3711 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3715 fib6_nh = rt->fib6_nh;
3717 /* We cannot add true routes via loopback here, they would
3718 * result in kernel looping; promote them to reject routes
3720 addr_type = ipv6_addr_type(&cfg->fc_dst);
3721 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3723 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3726 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3727 struct net_device *dev = fib6_nh->fib_nh_dev;
3729 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3730 NL_SET_ERR_MSG(extack, "Invalid source address");
3734 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3735 rt->fib6_prefsrc.plen = 128;
3737 rt->fib6_prefsrc.plen = 0;
3741 fib6_info_release(rt);
3742 return ERR_PTR(err);
3744 ip_fib_metrics_put(rt->fib6_metrics);
3746 return ERR_PTR(err);
3749 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3750 struct netlink_ext_ack *extack)
3752 struct fib6_info *rt;
3755 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3759 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3760 fib6_info_release(rt);
3765 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3767 struct net *net = info->nl_net;
3768 struct fib6_table *table;
3771 if (rt == net->ipv6.fib6_null_entry) {
3776 table = rt->fib6_table;
3777 spin_lock_bh(&table->tb6_lock);
3778 err = fib6_del(rt, info);
3779 spin_unlock_bh(&table->tb6_lock);
3782 fib6_info_release(rt);
3786 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3788 struct nl_info info = {
3790 .skip_notify = skip_notify
3793 return __ip6_del_rt(rt, &info);
3796 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3798 struct nl_info *info = &cfg->fc_nlinfo;
3799 struct net *net = info->nl_net;
3800 struct sk_buff *skb = NULL;
3801 struct fib6_table *table;
3804 if (rt == net->ipv6.fib6_null_entry)
3806 table = rt->fib6_table;
3807 spin_lock_bh(&table->tb6_lock);
3809 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3810 struct fib6_info *sibling, *next_sibling;
3811 struct fib6_node *fn;
3813 /* prefer to send a single notification with all hops */
3814 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3816 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3818 if (rt6_fill_node(net, skb, rt, NULL,
3819 NULL, NULL, 0, RTM_DELROUTE,
3820 info->portid, seq, 0) < 0) {
3824 info->skip_notify = 1;
3827 /* 'rt' points to the first sibling route. If it is not the
3828 * leaf, then we do not need to send a notification. Otherwise,
3829 * we need to check if the last sibling has a next route or not
3830 * and emit a replace or delete notification, respectively.
3832 info->skip_notify_kernel = 1;
3833 fn = rcu_dereference_protected(rt->fib6_node,
3834 lockdep_is_held(&table->tb6_lock));
3835 if (rcu_access_pointer(fn->leaf) == rt) {
3836 struct fib6_info *last_sibling, *replace_rt;
3838 last_sibling = list_last_entry(&rt->fib6_siblings,
3841 replace_rt = rcu_dereference_protected(
3842 last_sibling->fib6_next,
3843 lockdep_is_held(&table->tb6_lock));
3845 call_fib6_entry_notifiers_replace(net,
3848 call_fib6_multipath_entry_notifiers(net,
3849 FIB_EVENT_ENTRY_DEL,
3850 rt, rt->fib6_nsiblings,
3853 list_for_each_entry_safe(sibling, next_sibling,
3856 err = fib6_del(sibling, info);
3862 err = fib6_del(rt, info);
3864 spin_unlock_bh(&table->tb6_lock);
3866 fib6_info_release(rt);
3869 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3870 info->nlh, gfp_any());
3875 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3879 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3882 if (cfg->fc_flags & RTF_GATEWAY &&
3883 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3886 rc = rt6_remove_exception_rt(rt);
3891 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3894 struct fib6_result res = {
3898 struct rt6_info *rt_cache;
3900 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3902 return __ip6_del_cached_rt(rt_cache, cfg);
3907 struct fib6_nh_del_cached_rt_arg {
3908 struct fib6_config *cfg;
3909 struct fib6_info *f6i;
3912 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
3914 struct fib6_nh_del_cached_rt_arg *arg = _arg;
3917 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
3918 return rc != -ESRCH ? rc : 0;
3921 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
3923 struct fib6_nh_del_cached_rt_arg arg = {
3928 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
3931 static int ip6_route_del(struct fib6_config *cfg,
3932 struct netlink_ext_ack *extack)
3934 struct fib6_table *table;
3935 struct fib6_info *rt;
3936 struct fib6_node *fn;
3939 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3941 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3947 fn = fib6_locate(&table->tb6_root,
3948 &cfg->fc_dst, cfg->fc_dst_len,
3949 &cfg->fc_src, cfg->fc_src_len,
3950 !(cfg->fc_flags & RTF_CACHE));
3953 for_each_fib6_node_rt_rcu(fn) {
3956 if (rt->nh && cfg->fc_nh_id &&
3957 rt->nh->id != cfg->fc_nh_id)
3960 if (cfg->fc_flags & RTF_CACHE) {
3964 rc = ip6_del_cached_rt_nh(cfg, rt);
3965 } else if (cfg->fc_nh_id) {
3969 rc = ip6_del_cached_rt(cfg, rt, nh);
3978 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3980 if (cfg->fc_protocol &&
3981 cfg->fc_protocol != rt->fib6_protocol)
3985 if (!fib6_info_hold_safe(rt))
3989 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3995 if (cfg->fc_ifindex &&
3997 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3999 if (cfg->fc_flags & RTF_GATEWAY &&
4000 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
4002 if (!fib6_info_hold_safe(rt))
4006 /* if gateway was specified only delete the one hop */
4007 if (cfg->fc_flags & RTF_GATEWAY)
4008 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
4010 return __ip6_del_rt_siblings(rt, cfg);
4018 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4020 struct netevent_redirect netevent;
4021 struct rt6_info *rt, *nrt = NULL;
4022 struct fib6_result res = {};
4023 struct ndisc_options ndopts;
4024 struct inet6_dev *in6_dev;
4025 struct neighbour *neigh;
4027 int optlen, on_link;
4030 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4031 optlen -= sizeof(*msg);
4034 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4038 msg = (struct rd_msg *)icmp6_hdr(skb);
4040 if (ipv6_addr_is_multicast(&msg->dest)) {
4041 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4046 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4048 } else if (ipv6_addr_type(&msg->target) !=
4049 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4050 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4054 in6_dev = __in6_dev_get(skb->dev);
4057 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
4061 * The IP source address of the Redirect MUST be the same as the current
4062 * first-hop router for the specified ICMP Destination Address.
4065 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4066 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4071 if (ndopts.nd_opts_tgt_lladdr) {
4072 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4075 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4080 rt = (struct rt6_info *) dst;
4081 if (rt->rt6i_flags & RTF_REJECT) {
4082 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4086 /* Redirect received -> path was valid.
4087 * Look, redirects are sent only in response to data packets,
4088 * so that this nexthop apparently is reachable. --ANK
4090 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4092 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4097 * We have finally decided to accept it.
4100 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4101 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4102 NEIGH_UPDATE_F_OVERRIDE|
4103 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4104 NEIGH_UPDATE_F_ISROUTER)),
4105 NDISC_REDIRECT, &ndopts);
4108 res.f6i = rcu_dereference(rt->from);
4113 struct fib6_nh_match_arg arg = {
4115 .gw = &rt->rt6i_gateway,
4118 nexthop_for_each_fib6_nh(res.f6i->nh,
4119 fib6_nh_find_match, &arg);
4121 /* fib6_info uses a nexthop that does not have fib6_nh
4122 * using the dst->dev. Should be impossible
4128 res.nh = res.f6i->fib6_nh;
4131 res.fib6_flags = res.f6i->fib6_flags;
4132 res.fib6_type = res.f6i->fib6_type;
4133 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4137 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4139 nrt->rt6i_flags &= ~RTF_GATEWAY;
4141 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4143 /* rt6_insert_exception() will take care of duplicated exceptions */
4144 if (rt6_insert_exception(nrt, &res)) {
4145 dst_release_immediate(&nrt->dst);
4149 netevent.old = &rt->dst;
4150 netevent.new = &nrt->dst;
4151 netevent.daddr = &msg->dest;
4152 netevent.neigh = neigh;
4153 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4157 neigh_release(neigh);
4160 #ifdef CONFIG_IPV6_ROUTE_INFO
4161 static struct fib6_info *rt6_get_route_info(struct net *net,
4162 const struct in6_addr *prefix, int prefixlen,
4163 const struct in6_addr *gwaddr,
4164 struct net_device *dev)
4166 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4167 int ifindex = dev->ifindex;
4168 struct fib6_node *fn;
4169 struct fib6_info *rt = NULL;
4170 struct fib6_table *table;
4172 table = fib6_get_table(net, tb_id);
4177 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4181 for_each_fib6_node_rt_rcu(fn) {
4182 /* these routes do not use nexthops */
4185 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4187 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4188 !rt->fib6_nh->fib_nh_gw_family)
4190 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4192 if (!fib6_info_hold_safe(rt))
4201 static struct fib6_info *rt6_add_route_info(struct net *net,
4202 const struct in6_addr *prefix, int prefixlen,
4203 const struct in6_addr *gwaddr,
4204 struct net_device *dev,
4207 struct fib6_config cfg = {
4208 .fc_metric = IP6_RT_PRIO_USER,
4209 .fc_ifindex = dev->ifindex,
4210 .fc_dst_len = prefixlen,
4211 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4212 RTF_UP | RTF_PREF(pref),
4213 .fc_protocol = RTPROT_RA,
4214 .fc_type = RTN_UNICAST,
4215 .fc_nlinfo.portid = 0,
4216 .fc_nlinfo.nlh = NULL,
4217 .fc_nlinfo.nl_net = net,
4220 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4221 cfg.fc_dst = *prefix;
4222 cfg.fc_gateway = *gwaddr;
4224 /* We should treat it as a default route if prefix length is 0. */
4226 cfg.fc_flags |= RTF_DEFAULT;
4228 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4230 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4234 struct fib6_info *rt6_get_dflt_router(struct net *net,
4235 const struct in6_addr *addr,
4236 struct net_device *dev)
4238 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4239 struct fib6_info *rt;
4240 struct fib6_table *table;
4242 table = fib6_get_table(net, tb_id);
4247 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4250 /* RA routes do not use nexthops */
4255 if (dev == nh->fib_nh_dev &&
4256 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4257 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4260 if (rt && !fib6_info_hold_safe(rt))
4266 struct fib6_info *rt6_add_dflt_router(struct net *net,
4267 const struct in6_addr *gwaddr,
4268 struct net_device *dev,
4271 struct fib6_config cfg = {
4272 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4273 .fc_metric = IP6_RT_PRIO_USER,
4274 .fc_ifindex = dev->ifindex,
4275 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4276 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4277 .fc_protocol = RTPROT_RA,
4278 .fc_type = RTN_UNICAST,
4279 .fc_nlinfo.portid = 0,
4280 .fc_nlinfo.nlh = NULL,
4281 .fc_nlinfo.nl_net = net,
4284 cfg.fc_gateway = *gwaddr;
4286 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4287 struct fib6_table *table;
4289 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4291 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4294 return rt6_get_dflt_router(net, gwaddr, dev);
4297 static void __rt6_purge_dflt_routers(struct net *net,
4298 struct fib6_table *table)
4300 struct fib6_info *rt;
4304 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4305 struct net_device *dev = fib6_info_nh_dev(rt);
4306 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4308 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4309 (!idev || idev->cnf.accept_ra != 2) &&
4310 fib6_info_hold_safe(rt)) {
4312 ip6_del_rt(net, rt, false);
4318 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4321 void rt6_purge_dflt_routers(struct net *net)
4323 struct fib6_table *table;
4324 struct hlist_head *head;
4329 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4330 head = &net->ipv6.fib_table_hash[h];
4331 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4332 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4333 __rt6_purge_dflt_routers(net, table);
4340 static void rtmsg_to_fib6_config(struct net *net,
4341 struct in6_rtmsg *rtmsg,
4342 struct fib6_config *cfg)
4344 *cfg = (struct fib6_config){
4345 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4347 .fc_ifindex = rtmsg->rtmsg_ifindex,
4348 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
4349 .fc_expires = rtmsg->rtmsg_info,
4350 .fc_dst_len = rtmsg->rtmsg_dst_len,
4351 .fc_src_len = rtmsg->rtmsg_src_len,
4352 .fc_flags = rtmsg->rtmsg_flags,
4353 .fc_type = rtmsg->rtmsg_type,
4355 .fc_nlinfo.nl_net = net,
4357 .fc_dst = rtmsg->rtmsg_dst,
4358 .fc_src = rtmsg->rtmsg_src,
4359 .fc_gateway = rtmsg->rtmsg_gateway,
4363 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4365 struct fib6_config cfg;
4368 if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4370 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4373 rtmsg_to_fib6_config(net, rtmsg, &cfg);
4378 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4381 err = ip6_route_del(&cfg, NULL);
4389 * Drop the packet on the floor
4392 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4394 struct dst_entry *dst = skb_dst(skb);
4395 struct net *net = dev_net(dst->dev);
4396 struct inet6_dev *idev;
4399 if (netif_is_l3_master(skb->dev) ||
4400 dst->dev == net->loopback_dev)
4401 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4403 idev = ip6_dst_idev(dst);
4405 switch (ipstats_mib_noroutes) {
4406 case IPSTATS_MIB_INNOROUTES:
4407 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4408 if (type == IPV6_ADDR_ANY) {
4409 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4413 case IPSTATS_MIB_OUTNOROUTES:
4414 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4418 /* Start over by dropping the dst for l3mdev case */
4419 if (netif_is_l3_master(skb->dev))
4422 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4427 static int ip6_pkt_discard(struct sk_buff *skb)
4429 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4432 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4434 skb->dev = skb_dst(skb)->dev;
4435 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4438 static int ip6_pkt_prohibit(struct sk_buff *skb)
4440 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4443 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4445 skb->dev = skb_dst(skb)->dev;
4446 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4450 * Allocate a dst for local (unicast / anycast) address.
4453 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4454 struct inet6_dev *idev,
4455 const struct in6_addr *addr,
4456 bool anycast, gfp_t gfp_flags)
4458 struct fib6_config cfg = {
4459 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4460 .fc_ifindex = idev->dev->ifindex,
4461 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4464 .fc_protocol = RTPROT_KERNEL,
4465 .fc_nlinfo.nl_net = net,
4466 .fc_ignore_dev_down = true,
4468 struct fib6_info *f6i;
4471 cfg.fc_type = RTN_ANYCAST;
4472 cfg.fc_flags |= RTF_ANYCAST;
4474 cfg.fc_type = RTN_LOCAL;
4475 cfg.fc_flags |= RTF_LOCAL;
4478 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
4480 f6i->dst_nocount = true;
4483 (net->ipv6.devconf_all->disable_policy ||
4484 idev->cnf.disable_policy))
4485 f6i->dst_nopolicy = true;
4491 /* remove deleted ip from prefsrc entries */
4492 struct arg_dev_net_ip {
4493 struct net_device *dev;
4495 struct in6_addr *addr;
4498 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4500 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4501 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4502 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4505 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
4506 rt != net->ipv6.fib6_null_entry &&
4507 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4508 spin_lock_bh(&rt6_exception_lock);
4509 /* remove prefsrc entry */
4510 rt->fib6_prefsrc.plen = 0;
4511 spin_unlock_bh(&rt6_exception_lock);
4516 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4518 struct net *net = dev_net(ifp->idev->dev);
4519 struct arg_dev_net_ip adni = {
4520 .dev = ifp->idev->dev,
4524 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4527 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4529 /* Remove routers and update dst entries when gateway turn into host. */
4530 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4532 struct in6_addr *gateway = (struct in6_addr *)arg;
4535 /* RA routes do not use nexthops */
4540 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4541 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4544 /* Further clean up cached routes in exception table.
4545 * This is needed because cached route may have a different
4546 * gateway than its 'parent' in the case of an ip redirect.
4548 fib6_nh_exceptions_clean_tohost(nh, gateway);
4553 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4555 fib6_clean_all(net, fib6_clean_tohost, gateway);
4558 struct arg_netdev_event {
4559 const struct net_device *dev;
4561 unsigned char nh_flags;
4562 unsigned long event;
4566 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4568 struct fib6_info *iter;
4569 struct fib6_node *fn;
4571 fn = rcu_dereference_protected(rt->fib6_node,
4572 lockdep_is_held(&rt->fib6_table->tb6_lock));
4573 iter = rcu_dereference_protected(fn->leaf,
4574 lockdep_is_held(&rt->fib6_table->tb6_lock));
4576 if (iter->fib6_metric == rt->fib6_metric &&
4577 rt6_qualify_for_ecmp(iter))
4579 iter = rcu_dereference_protected(iter->fib6_next,
4580 lockdep_is_held(&rt->fib6_table->tb6_lock));
4586 /* only called for fib entries with builtin fib6_nh */
4587 static bool rt6_is_dead(const struct fib6_info *rt)
4589 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4590 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4591 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4597 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4599 struct fib6_info *iter;
4602 if (!rt6_is_dead(rt))
4603 total += rt->fib6_nh->fib_nh_weight;
4605 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4606 if (!rt6_is_dead(iter))
4607 total += iter->fib6_nh->fib_nh_weight;
4613 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4615 int upper_bound = -1;
4617 if (!rt6_is_dead(rt)) {
4618 *weight += rt->fib6_nh->fib_nh_weight;
4619 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4622 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4625 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4627 struct fib6_info *iter;
4630 rt6_upper_bound_set(rt, &weight, total);
4632 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4633 rt6_upper_bound_set(iter, &weight, total);
4636 void rt6_multipath_rebalance(struct fib6_info *rt)
4638 struct fib6_info *first;
4641 /* In case the entire multipath route was marked for flushing,
4642 * then there is no need to rebalance upon the removal of every
4645 if (!rt->fib6_nsiblings || rt->should_flush)
4648 /* During lookup routes are evaluated in order, so we need to
4649 * make sure upper bounds are assigned from the first sibling
4652 first = rt6_multipath_first_sibling(rt);
4653 if (WARN_ON_ONCE(!first))
4656 total = rt6_multipath_total_weight(first);
4657 rt6_multipath_upper_bound_set(first, total);
4660 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4662 const struct arg_netdev_event *arg = p_arg;
4663 struct net *net = dev_net(arg->dev);
4665 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4666 rt->fib6_nh->fib_nh_dev == arg->dev) {
4667 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4668 fib6_update_sernum_upto_root(net, rt);
4669 rt6_multipath_rebalance(rt);
4675 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4677 struct arg_netdev_event arg = {
4680 .nh_flags = nh_flags,
4684 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4685 arg.nh_flags |= RTNH_F_LINKDOWN;
4687 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4690 /* only called for fib entries with inline fib6_nh */
4691 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4692 const struct net_device *dev)
4694 struct fib6_info *iter;
4696 if (rt->fib6_nh->fib_nh_dev == dev)
4698 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4699 if (iter->fib6_nh->fib_nh_dev == dev)
4705 static void rt6_multipath_flush(struct fib6_info *rt)
4707 struct fib6_info *iter;
4709 rt->should_flush = 1;
4710 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4711 iter->should_flush = 1;
4714 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4715 const struct net_device *down_dev)
4717 struct fib6_info *iter;
4718 unsigned int dead = 0;
4720 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4721 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4723 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4724 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4725 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4731 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4732 const struct net_device *dev,
4733 unsigned char nh_flags)
4735 struct fib6_info *iter;
4737 if (rt->fib6_nh->fib_nh_dev == dev)
4738 rt->fib6_nh->fib_nh_flags |= nh_flags;
4739 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4740 if (iter->fib6_nh->fib_nh_dev == dev)
4741 iter->fib6_nh->fib_nh_flags |= nh_flags;
4744 /* called with write lock held for table with rt */
4745 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4747 const struct arg_netdev_event *arg = p_arg;
4748 const struct net_device *dev = arg->dev;
4749 struct net *net = dev_net(dev);
4751 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4754 switch (arg->event) {
4755 case NETDEV_UNREGISTER:
4756 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4758 if (rt->should_flush)
4760 if (!rt->fib6_nsiblings)
4761 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4762 if (rt6_multipath_uses_dev(rt, dev)) {
4765 count = rt6_multipath_dead_count(rt, dev);
4766 if (rt->fib6_nsiblings + 1 == count) {
4767 rt6_multipath_flush(rt);
4770 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4772 fib6_update_sernum(net, rt);
4773 rt6_multipath_rebalance(rt);
4777 if (rt->fib6_nh->fib_nh_dev != dev ||
4778 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4780 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4781 rt6_multipath_rebalance(rt);
4788 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4790 struct arg_netdev_event arg = {
4796 struct net *net = dev_net(dev);
4798 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4799 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4801 fib6_clean_all(net, fib6_ifdown, &arg);
4804 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4806 rt6_sync_down_dev(dev, event);
4807 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4808 neigh_ifdown(&nd_tbl, dev);
4811 struct rt6_mtu_change_arg {
4812 struct net_device *dev;
4814 struct fib6_info *f6i;
4817 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4819 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4820 struct fib6_info *f6i = arg->f6i;
4822 /* For administrative MTU increase, there is no way to discover
4823 * IPv6 PMTU increase, so PMTU increase should be updated here.
4824 * Since RFC 1981 doesn't include administrative MTU increase
4825 * update PMTU increase is a MUST. (i.e. jumbo frame)
4827 if (nh->fib_nh_dev == arg->dev) {
4828 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4829 u32 mtu = f6i->fib6_pmtu;
4831 if (mtu >= arg->mtu ||
4832 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4833 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4835 spin_lock_bh(&rt6_exception_lock);
4836 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4837 spin_unlock_bh(&rt6_exception_lock);
4843 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4845 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4846 struct inet6_dev *idev;
4848 /* In IPv6 pmtu discovery is not optional,
4849 so that RTAX_MTU lock cannot disable it.
4850 We still use this lock to block changes
4851 caused by addrconf/ndisc.
4854 idev = __in6_dev_get(arg->dev);
4858 if (fib6_metric_locked(f6i, RTAX_MTU))
4863 /* fib6_nh_mtu_change only returns 0, so this is safe */
4864 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4868 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4871 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4873 struct rt6_mtu_change_arg arg = {
4878 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4881 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4882 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4883 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4884 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4885 [RTA_OIF] = { .type = NLA_U32 },
4886 [RTA_IIF] = { .type = NLA_U32 },
4887 [RTA_PRIORITY] = { .type = NLA_U32 },
4888 [RTA_METRICS] = { .type = NLA_NESTED },
4889 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4890 [RTA_PREF] = { .type = NLA_U8 },
4891 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4892 [RTA_ENCAP] = { .type = NLA_NESTED },
4893 [RTA_EXPIRES] = { .type = NLA_U32 },
4894 [RTA_UID] = { .type = NLA_U32 },
4895 [RTA_MARK] = { .type = NLA_U32 },
4896 [RTA_TABLE] = { .type = NLA_U32 },
4897 [RTA_IP_PROTO] = { .type = NLA_U8 },
4898 [RTA_SPORT] = { .type = NLA_U16 },
4899 [RTA_DPORT] = { .type = NLA_U16 },
4900 [RTA_NH_ID] = { .type = NLA_U32 },
4903 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4904 struct fib6_config *cfg,
4905 struct netlink_ext_ack *extack)
4908 struct nlattr *tb[RTA_MAX+1];
4912 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4913 rtm_ipv6_policy, extack);
4918 rtm = nlmsg_data(nlh);
4920 *cfg = (struct fib6_config){
4921 .fc_table = rtm->rtm_table,
4922 .fc_dst_len = rtm->rtm_dst_len,
4923 .fc_src_len = rtm->rtm_src_len,
4925 .fc_protocol = rtm->rtm_protocol,
4926 .fc_type = rtm->rtm_type,
4928 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4929 .fc_nlinfo.nlh = nlh,
4930 .fc_nlinfo.nl_net = sock_net(skb->sk),
4933 if (rtm->rtm_type == RTN_UNREACHABLE ||
4934 rtm->rtm_type == RTN_BLACKHOLE ||
4935 rtm->rtm_type == RTN_PROHIBIT ||
4936 rtm->rtm_type == RTN_THROW)
4937 cfg->fc_flags |= RTF_REJECT;
4939 if (rtm->rtm_type == RTN_LOCAL)
4940 cfg->fc_flags |= RTF_LOCAL;
4942 if (rtm->rtm_flags & RTM_F_CLONED)
4943 cfg->fc_flags |= RTF_CACHE;
4945 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4947 if (tb[RTA_NH_ID]) {
4948 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
4949 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
4950 NL_SET_ERR_MSG(extack,
4951 "Nexthop specification and nexthop id are mutually exclusive");
4954 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
4957 if (tb[RTA_GATEWAY]) {
4958 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4959 cfg->fc_flags |= RTF_GATEWAY;
4962 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4967 int plen = (rtm->rtm_dst_len + 7) >> 3;
4969 if (nla_len(tb[RTA_DST]) < plen)
4972 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4976 int plen = (rtm->rtm_src_len + 7) >> 3;
4978 if (nla_len(tb[RTA_SRC]) < plen)
4981 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4984 if (tb[RTA_PREFSRC])
4985 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4988 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4990 if (tb[RTA_PRIORITY])
4991 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4993 if (tb[RTA_METRICS]) {
4994 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4995 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4999 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
5001 if (tb[RTA_MULTIPATH]) {
5002 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
5003 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
5005 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
5006 cfg->fc_mp_len, extack);
5012 pref = nla_get_u8(tb[RTA_PREF]);
5013 if (pref != ICMPV6_ROUTER_PREF_LOW &&
5014 pref != ICMPV6_ROUTER_PREF_HIGH)
5015 pref = ICMPV6_ROUTER_PREF_MEDIUM;
5016 cfg->fc_flags |= RTF_PREF(pref);
5020 cfg->fc_encap = tb[RTA_ENCAP];
5022 if (tb[RTA_ENCAP_TYPE]) {
5023 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
5025 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5030 if (tb[RTA_EXPIRES]) {
5031 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5033 if (addrconf_finite_timeout(timeout)) {
5034 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5035 cfg->fc_flags |= RTF_EXPIRES;
5045 struct fib6_info *fib6_info;
5046 struct fib6_config r_cfg;
5047 struct list_head next;
5050 static int ip6_route_info_append(struct net *net,
5051 struct list_head *rt6_nh_list,
5052 struct fib6_info *rt,
5053 struct fib6_config *r_cfg)
5058 list_for_each_entry(nh, rt6_nh_list, next) {
5059 /* check if fib6_info already exists */
5060 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5064 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5068 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5069 list_add_tail(&nh->next, rt6_nh_list);
5074 static void ip6_route_mpath_notify(struct fib6_info *rt,
5075 struct fib6_info *rt_last,
5076 struct nl_info *info,
5079 /* if this is an APPEND route, then rt points to the first route
5080 * inserted and rt_last points to last route inserted. Userspace
5081 * wants a consistent dump of the route which starts at the first
5082 * nexthop. Since sibling routes are always added at the end of
5083 * the list, find the first sibling of the last route appended
5085 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5086 rt = list_first_entry(&rt_last->fib6_siblings,
5092 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5095 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5097 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5098 bool should_notify = false;
5099 struct fib6_info *leaf;
5100 struct fib6_node *fn;
5103 fn = rcu_dereference(rt->fib6_node);
5107 leaf = rcu_dereference(fn->leaf);
5112 (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5113 rt6_qualify_for_ecmp(leaf)))
5114 should_notify = true;
5118 return should_notify;
5121 static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
5122 struct netlink_ext_ack *extack)
5124 if (nla_len(nla) < sizeof(*gw)) {
5125 NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
5129 *gw = nla_get_in6_addr(nla);
5134 static int ip6_route_multipath_add(struct fib6_config *cfg,
5135 struct netlink_ext_ack *extack)
5137 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5138 struct nl_info *info = &cfg->fc_nlinfo;
5139 struct fib6_config r_cfg;
5140 struct rtnexthop *rtnh;
5141 struct fib6_info *rt;
5142 struct rt6_nh *err_nh;
5143 struct rt6_nh *nh, *nh_safe;
5149 int replace = (cfg->fc_nlinfo.nlh &&
5150 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5151 LIST_HEAD(rt6_nh_list);
5153 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5154 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5155 nlflags |= NLM_F_APPEND;
5157 remaining = cfg->fc_mp_len;
5158 rtnh = (struct rtnexthop *)cfg->fc_mp;
5160 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
5161 * fib6_info structs per nexthop
5163 while (rtnh_ok(rtnh, remaining)) {
5164 memcpy(&r_cfg, cfg, sizeof(*cfg));
5165 if (rtnh->rtnh_ifindex)
5166 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5168 attrlen = rtnh_attrlen(rtnh);
5170 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5172 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5174 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
5179 r_cfg.fc_flags |= RTF_GATEWAY;
5181 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5183 /* RTA_ENCAP_TYPE length checked in
5184 * lwtunnel_valid_encap_type_attr
5186 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5188 r_cfg.fc_encap_type = nla_get_u16(nla);
5191 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5192 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5198 if (!rt6_qualify_for_ecmp(rt)) {
5200 NL_SET_ERR_MSG(extack,
5201 "Device only routes can not be added for IPv6 using the multipath API.");
5202 fib6_info_release(rt);
5206 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5208 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5211 fib6_info_release(rt);
5215 rtnh = rtnh_next(rtnh, &remaining);
5218 if (list_empty(&rt6_nh_list)) {
5219 NL_SET_ERR_MSG(extack,
5220 "Invalid nexthop configuration - no valid nexthops");
5224 /* for add and replace send one notification with all nexthops.
5225 * Skip the notification in fib6_add_rt2node and send one with
5226 * the full route when done
5228 info->skip_notify = 1;
5230 /* For add and replace, send one notification with all nexthops. For
5231 * append, send one notification with all appended nexthops.
5233 info->skip_notify_kernel = 1;
5236 list_for_each_entry(nh, &rt6_nh_list, next) {
5237 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5241 NL_SET_ERR_MSG_MOD(extack,
5242 "multipath route replace failed (check consistency of installed routes)");
5246 /* save reference to last route successfully inserted */
5247 rt_last = nh->fib6_info;
5249 /* save reference to first route for notification */
5251 rt_notif = nh->fib6_info;
5253 /* Because each route is added like a single route we remove
5254 * these flags after the first nexthop: if there is a collision,
5255 * we have already failed to add the first nexthop:
5256 * fib6_add_rt2node() has rejected it; when replacing, old
5257 * nexthops have been replaced by first new, the rest should
5260 if (cfg->fc_nlinfo.nlh) {
5261 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5263 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5268 /* An in-kernel notification should only be sent in case the new
5269 * multipath route is added as the first route in the node, or if
5270 * it was appended to it. We pass 'rt_notif' since it is the first
5271 * sibling and might allow us to skip some checks in the replace case.
5273 if (ip6_route_mpath_should_notify(rt_notif)) {
5274 enum fib_event_type fib_event;
5276 if (rt_notif->fib6_nsiblings != nhn - 1)
5277 fib_event = FIB_EVENT_ENTRY_APPEND;
5279 fib_event = FIB_EVENT_ENTRY_REPLACE;
5281 err = call_fib6_multipath_entry_notifiers(info->nl_net,
5282 fib_event, rt_notif,
5285 /* Delete all the siblings that were just added */
5291 /* success ... tell user about new route */
5292 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5296 /* send notification for routes that were added so that
5297 * the delete notifications sent by ip6_route_del are
5301 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5303 /* Delete routes that were already added */
5304 list_for_each_entry(nh, &rt6_nh_list, next) {
5307 ip6_route_del(&nh->r_cfg, extack);
5311 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5312 fib6_info_release(nh->fib6_info);
5313 list_del(&nh->next);
5320 static int ip6_route_multipath_del(struct fib6_config *cfg,
5321 struct netlink_ext_ack *extack)
5323 struct fib6_config r_cfg;
5324 struct rtnexthop *rtnh;
5330 remaining = cfg->fc_mp_len;
5331 rtnh = (struct rtnexthop *)cfg->fc_mp;
5333 /* Parse a Multipath Entry */
5334 while (rtnh_ok(rtnh, remaining)) {
5335 memcpy(&r_cfg, cfg, sizeof(*cfg));
5336 if (rtnh->rtnh_ifindex)
5337 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5339 attrlen = rtnh_attrlen(rtnh);
5341 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5343 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5345 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
5352 r_cfg.fc_flags |= RTF_GATEWAY;
5355 err = ip6_route_del(&r_cfg, extack);
5360 rtnh = rtnh_next(rtnh, &remaining);
5366 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5367 struct netlink_ext_ack *extack)
5369 struct fib6_config cfg;
5372 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5377 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5378 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5383 return ip6_route_multipath_del(&cfg, extack);
5385 cfg.fc_delete_all_nh = 1;
5386 return ip6_route_del(&cfg, extack);
5390 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5391 struct netlink_ext_ack *extack)
5393 struct fib6_config cfg;
5396 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5400 if (cfg.fc_metric == 0)
5401 cfg.fc_metric = IP6_RT_PRIO_USER;
5404 return ip6_route_multipath_add(&cfg, extack);
5406 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5409 /* add the overhead of this fib6_nh to nexthop_len */
5410 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5412 int *nexthop_len = arg;
5414 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5415 + NLA_ALIGN(sizeof(struct rtnexthop))
5416 + nla_total_size(16); /* RTA_GATEWAY */
5418 if (nh->fib_nh_lws) {
5419 /* RTA_ENCAP_TYPE */
5420 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5422 *nexthop_len += nla_total_size(2);
5428 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5433 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5434 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5437 struct fib6_info *sibling, *next_sibling;
5438 struct fib6_nh *nh = f6i->fib6_nh;
5441 if (f6i->fib6_nsiblings) {
5442 rt6_nh_nlmsg_size(nh, &nexthop_len);
5444 list_for_each_entry_safe(sibling, next_sibling,
5445 &f6i->fib6_siblings, fib6_siblings) {
5446 rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
5449 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5452 return NLMSG_ALIGN(sizeof(struct rtmsg))
5453 + nla_total_size(16) /* RTA_SRC */
5454 + nla_total_size(16) /* RTA_DST */
5455 + nla_total_size(16) /* RTA_GATEWAY */
5456 + nla_total_size(16) /* RTA_PREFSRC */
5457 + nla_total_size(4) /* RTA_TABLE */
5458 + nla_total_size(4) /* RTA_IIF */
5459 + nla_total_size(4) /* RTA_OIF */
5460 + nla_total_size(4) /* RTA_PRIORITY */
5461 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5462 + nla_total_size(sizeof(struct rta_cacheinfo))
5463 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5464 + nla_total_size(1) /* RTA_PREF */
5468 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5469 unsigned char *flags)
5471 if (nexthop_is_multipath(nh)) {
5474 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5476 goto nla_put_failure;
5478 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5479 goto nla_put_failure;
5481 nla_nest_end(skb, mp);
5483 struct fib6_nh *fib6_nh;
5485 fib6_nh = nexthop_fib6_nh(nh);
5486 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5488 goto nla_put_failure;
5497 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5498 struct fib6_info *rt, struct dst_entry *dst,
5499 struct in6_addr *dest, struct in6_addr *src,
5500 int iif, int type, u32 portid, u32 seq,
5503 struct rt6_info *rt6 = (struct rt6_info *)dst;
5504 struct rt6key *rt6_dst, *rt6_src;
5505 u32 *pmetrics, table, rt6_flags;
5506 unsigned char nh_flags = 0;
5507 struct nlmsghdr *nlh;
5511 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5516 rt6_dst = &rt6->rt6i_dst;
5517 rt6_src = &rt6->rt6i_src;
5518 rt6_flags = rt6->rt6i_flags;
5520 rt6_dst = &rt->fib6_dst;
5521 rt6_src = &rt->fib6_src;
5522 rt6_flags = rt->fib6_flags;
5525 rtm = nlmsg_data(nlh);
5526 rtm->rtm_family = AF_INET6;
5527 rtm->rtm_dst_len = rt6_dst->plen;
5528 rtm->rtm_src_len = rt6_src->plen;
5531 table = rt->fib6_table->tb6_id;
5533 table = RT6_TABLE_UNSPEC;
5534 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5535 if (nla_put_u32(skb, RTA_TABLE, table))
5536 goto nla_put_failure;
5538 rtm->rtm_type = rt->fib6_type;
5540 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5541 rtm->rtm_protocol = rt->fib6_protocol;
5543 if (rt6_flags & RTF_CACHE)
5544 rtm->rtm_flags |= RTM_F_CLONED;
5547 if (nla_put_in6_addr(skb, RTA_DST, dest))
5548 goto nla_put_failure;
5549 rtm->rtm_dst_len = 128;
5550 } else if (rtm->rtm_dst_len)
5551 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5552 goto nla_put_failure;
5553 #ifdef CONFIG_IPV6_SUBTREES
5555 if (nla_put_in6_addr(skb, RTA_SRC, src))
5556 goto nla_put_failure;
5557 rtm->rtm_src_len = 128;
5558 } else if (rtm->rtm_src_len &&
5559 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5560 goto nla_put_failure;
5563 #ifdef CONFIG_IPV6_MROUTE
5564 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5565 int err = ip6mr_get_route(net, skb, rtm, portid);
5570 goto nla_put_failure;
5573 if (nla_put_u32(skb, RTA_IIF, iif))
5574 goto nla_put_failure;
5576 struct in6_addr saddr_buf;
5577 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5578 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5579 goto nla_put_failure;
5582 if (rt->fib6_prefsrc.plen) {
5583 struct in6_addr saddr_buf;
5584 saddr_buf = rt->fib6_prefsrc.addr;
5585 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5586 goto nla_put_failure;
5589 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5590 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5591 goto nla_put_failure;
5593 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5594 goto nla_put_failure;
5596 /* For multipath routes, walk the siblings list and add
5597 * each as a nexthop within RTA_MULTIPATH.
5600 if (rt6_flags & RTF_GATEWAY &&
5601 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5602 goto nla_put_failure;
5604 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5605 goto nla_put_failure;
5606 } else if (rt->fib6_nsiblings) {
5607 struct fib6_info *sibling, *next_sibling;
5610 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5612 goto nla_put_failure;
5614 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5615 rt->fib6_nh->fib_nh_weight, AF_INET6,
5617 goto nla_put_failure;
5619 list_for_each_entry_safe(sibling, next_sibling,
5620 &rt->fib6_siblings, fib6_siblings) {
5621 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5622 sibling->fib6_nh->fib_nh_weight,
5624 goto nla_put_failure;
5627 nla_nest_end(skb, mp);
5628 } else if (rt->nh) {
5629 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5630 goto nla_put_failure;
5632 if (nexthop_is_blackhole(rt->nh))
5633 rtm->rtm_type = RTN_BLACKHOLE;
5635 if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) &&
5636 rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5637 goto nla_put_failure;
5639 rtm->rtm_flags |= nh_flags;
5641 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5642 &nh_flags, false) < 0)
5643 goto nla_put_failure;
5645 rtm->rtm_flags |= nh_flags;
5648 if (rt6_flags & RTF_EXPIRES) {
5649 expires = dst ? dst->expires : rt->expires;
5655 rtm->rtm_flags |= RTM_F_OFFLOAD;
5657 rtm->rtm_flags |= RTM_F_TRAP;
5660 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5661 goto nla_put_failure;
5663 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5664 goto nla_put_failure;
5667 nlmsg_end(skb, nlh);
5671 nlmsg_cancel(skb, nlh);
5675 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5677 const struct net_device *dev = arg;
5679 if (nh->fib_nh_dev == dev)
5685 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5686 const struct net_device *dev)
5689 struct net_device *_dev = (struct net_device *)dev;
5691 return !!nexthop_for_each_fib6_nh(f6i->nh,
5692 fib6_info_nh_uses_dev,
5696 if (f6i->fib6_nh->fib_nh_dev == dev)
5699 if (f6i->fib6_nsiblings) {
5700 struct fib6_info *sibling, *next_sibling;
5702 list_for_each_entry_safe(sibling, next_sibling,
5703 &f6i->fib6_siblings, fib6_siblings) {
5704 if (sibling->fib6_nh->fib_nh_dev == dev)
5712 struct fib6_nh_exception_dump_walker {
5713 struct rt6_rtnl_dump_arg *dump;
5714 struct fib6_info *rt;
5720 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5722 struct fib6_nh_exception_dump_walker *w = arg;
5723 struct rt6_rtnl_dump_arg *dump = w->dump;
5724 struct rt6_exception_bucket *bucket;
5725 struct rt6_exception *rt6_ex;
5728 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5732 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5733 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5739 /* Expiration of entries doesn't bump sernum, insertion
5740 * does. Removal is triggered by insertion, so we can
5741 * rely on the fact that if entries change between two
5742 * partial dumps, this node is scanned again completely,
5743 * see rt6_insert_exception() and fib6_dump_table().
5745 * Count expired entries we go through as handled
5746 * entries that we'll skip next time, in case of partial
5747 * node dump. Otherwise, if entries expire meanwhile,
5748 * we'll skip the wrong amount.
5750 if (rt6_check_expired(rt6_ex->rt6i)) {
5755 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5756 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5758 NETLINK_CB(dump->cb->skb).portid,
5759 dump->cb->nlh->nlmsg_seq, w->flags);
5771 /* Return -1 if done with node, number of handled routes on partial dump */
5772 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5774 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5775 struct fib_dump_filter *filter = &arg->filter;
5776 unsigned int flags = NLM_F_MULTI;
5777 struct net *net = arg->net;
5780 if (rt == net->ipv6.fib6_null_entry)
5783 if ((filter->flags & RTM_F_PREFIX) &&
5784 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5785 /* success since this is not a prefix route */
5788 if (filter->filter_set &&
5789 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5790 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5791 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5795 if (filter->filter_set ||
5796 !filter->dump_routes || !filter->dump_exceptions) {
5797 flags |= NLM_F_DUMP_FILTERED;
5800 if (filter->dump_routes) {
5804 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5806 NETLINK_CB(arg->cb->skb).portid,
5807 arg->cb->nlh->nlmsg_seq, flags)) {
5814 if (filter->dump_exceptions) {
5815 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5824 err = nexthop_for_each_fib6_nh(rt->nh,
5825 rt6_nh_dump_exceptions,
5828 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5833 return count += w.count;
5839 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5840 const struct nlmsghdr *nlh,
5842 struct netlink_ext_ack *extack)
5847 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5848 NL_SET_ERR_MSG_MOD(extack,
5849 "Invalid header for get route request");
5853 if (!netlink_strict_get_check(skb))
5854 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5855 rtm_ipv6_policy, extack);
5857 rtm = nlmsg_data(nlh);
5858 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5859 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5860 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5862 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5865 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5866 NL_SET_ERR_MSG_MOD(extack,
5867 "Invalid flags for get route request");
5871 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5872 rtm_ipv6_policy, extack);
5876 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5877 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5878 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5882 for (i = 0; i <= RTA_MAX; i++) {
5898 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5906 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5907 struct netlink_ext_ack *extack)
5909 struct net *net = sock_net(in_skb->sk);
5910 struct nlattr *tb[RTA_MAX+1];
5911 int err, iif = 0, oif = 0;
5912 struct fib6_info *from;
5913 struct dst_entry *dst;
5914 struct rt6_info *rt;
5915 struct sk_buff *skb;
5917 struct flowi6 fl6 = {};
5920 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
5925 rtm = nlmsg_data(nlh);
5926 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
5927 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
5930 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
5933 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
5937 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
5940 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
5944 iif = nla_get_u32(tb[RTA_IIF]);
5947 oif = nla_get_u32(tb[RTA_OIF]);
5950 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
5953 fl6.flowi6_uid = make_kuid(current_user_ns(),
5954 nla_get_u32(tb[RTA_UID]));
5956 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5959 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5962 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5964 if (tb[RTA_IP_PROTO]) {
5965 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5966 &fl6.flowi6_proto, AF_INET6,
5973 struct net_device *dev;
5978 dev = dev_get_by_index_rcu(net, iif);
5985 fl6.flowi6_iif = iif;
5987 if (!ipv6_addr_any(&fl6.saddr))
5988 flags |= RT6_LOOKUP_F_HAS_SADDR;
5990 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5994 fl6.flowi6_oif = oif;
5996 dst = ip6_route_output(net, NULL, &fl6);
6000 rt = container_of(dst, struct rt6_info, dst);
6001 if (rt->dst.error) {
6002 err = rt->dst.error;
6007 if (rt == net->ipv6.ip6_null_entry) {
6008 err = rt->dst.error;
6013 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
6020 skb_dst_set(skb, &rt->dst);
6023 from = rcu_dereference(rt->from);
6026 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
6028 NETLINK_CB(in_skb).portid,
6031 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
6032 &fl6.saddr, iif, RTM_NEWROUTE,
6033 NETLINK_CB(in_skb).portid,
6045 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
6050 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
6051 unsigned int nlm_flags)
6053 struct sk_buff *skb;
6054 struct net *net = info->nl_net;
6059 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6061 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6065 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6066 event, info->portid, seq, nlm_flags);
6068 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6069 WARN_ON(err == -EMSGSIZE);
6073 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6074 info->nlh, gfp_any());
6078 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6081 void fib6_rt_update(struct net *net, struct fib6_info *rt,
6082 struct nl_info *info)
6084 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6085 struct sk_buff *skb;
6088 /* call_fib6_entry_notifiers will be removed when in-kernel notifier
6089 * is implemented and supported for nexthop objects
6091 call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
6093 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6097 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6098 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6100 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6101 WARN_ON(err == -EMSGSIZE);
6105 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6106 info->nlh, gfp_any());
6110 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6113 static int ip6_route_dev_notify(struct notifier_block *this,
6114 unsigned long event, void *ptr)
6116 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6117 struct net *net = dev_net(dev);
6119 if (!(dev->flags & IFF_LOOPBACK))
6122 if (event == NETDEV_REGISTER) {
6123 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6124 net->ipv6.ip6_null_entry->dst.dev = dev;
6125 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6126 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6127 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6128 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6129 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6130 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6132 } else if (event == NETDEV_UNREGISTER &&
6133 dev->reg_state != NETREG_UNREGISTERED) {
6134 /* NETDEV_UNREGISTER could be fired for multiple times by
6135 * netdev_wait_allrefs(). Make sure we only call this once.
6137 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6138 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6139 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6140 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6151 #ifdef CONFIG_PROC_FS
6152 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6154 struct net *net = (struct net *)seq->private;
6155 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6156 net->ipv6.rt6_stats->fib_nodes,
6157 net->ipv6.rt6_stats->fib_route_nodes,
6158 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6159 net->ipv6.rt6_stats->fib_rt_entries,
6160 net->ipv6.rt6_stats->fib_rt_cache,
6161 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6162 net->ipv6.rt6_stats->fib_discarded_routes);
6166 #endif /* CONFIG_PROC_FS */
6168 #ifdef CONFIG_SYSCTL
6170 static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6171 void *buffer, size_t *lenp, loff_t *ppos)
6179 net = (struct net *)ctl->extra1;
6180 delay = net->ipv6.sysctl.flush_delay;
6181 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6185 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6189 static struct ctl_table ipv6_route_table_template[] = {
6191 .procname = "flush",
6192 .data = &init_net.ipv6.sysctl.flush_delay,
6193 .maxlen = sizeof(int),
6195 .proc_handler = ipv6_sysctl_rtcache_flush
6198 .procname = "gc_thresh",
6199 .data = &ip6_dst_ops_template.gc_thresh,
6200 .maxlen = sizeof(int),
6202 .proc_handler = proc_dointvec,
6205 .procname = "max_size",
6206 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
6207 .maxlen = sizeof(int),
6209 .proc_handler = proc_dointvec,
6212 .procname = "gc_min_interval",
6213 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6214 .maxlen = sizeof(int),
6216 .proc_handler = proc_dointvec_jiffies,
6219 .procname = "gc_timeout",
6220 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6221 .maxlen = sizeof(int),
6223 .proc_handler = proc_dointvec_jiffies,
6226 .procname = "gc_interval",
6227 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
6228 .maxlen = sizeof(int),
6230 .proc_handler = proc_dointvec_jiffies,
6233 .procname = "gc_elasticity",
6234 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6235 .maxlen = sizeof(int),
6237 .proc_handler = proc_dointvec,
6240 .procname = "mtu_expires",
6241 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6242 .maxlen = sizeof(int),
6244 .proc_handler = proc_dointvec_jiffies,
6247 .procname = "min_adv_mss",
6248 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
6249 .maxlen = sizeof(int),
6251 .proc_handler = proc_dointvec,
6254 .procname = "gc_min_interval_ms",
6255 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6256 .maxlen = sizeof(int),
6258 .proc_handler = proc_dointvec_ms_jiffies,
6261 .procname = "skip_notify_on_dev_down",
6262 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6263 .maxlen = sizeof(int),
6265 .proc_handler = proc_dointvec_minmax,
6266 .extra1 = SYSCTL_ZERO,
6267 .extra2 = SYSCTL_ONE,
6272 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6274 struct ctl_table *table;
6276 table = kmemdup(ipv6_route_table_template,
6277 sizeof(ipv6_route_table_template),
6281 table[0].data = &net->ipv6.sysctl.flush_delay;
6282 table[0].extra1 = net;
6283 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6284 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
6285 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6286 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6287 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6288 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6289 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6290 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6291 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6292 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6294 /* Don't export sysctls to unprivileged users */
6295 if (net->user_ns != &init_user_ns)
6296 table[0].procname = NULL;
6303 static int __net_init ip6_route_net_init(struct net *net)
6307 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6308 sizeof(net->ipv6.ip6_dst_ops));
6310 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6311 goto out_ip6_dst_ops;
6313 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6314 if (!net->ipv6.fib6_null_entry)
6315 goto out_ip6_dst_entries;
6316 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6317 sizeof(*net->ipv6.fib6_null_entry));
6319 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6320 sizeof(*net->ipv6.ip6_null_entry),
6322 if (!net->ipv6.ip6_null_entry)
6323 goto out_fib6_null_entry;
6324 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6325 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6326 ip6_template_metrics, true);
6327 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
6329 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6330 net->ipv6.fib6_has_custom_rules = false;
6331 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6332 sizeof(*net->ipv6.ip6_prohibit_entry),
6334 if (!net->ipv6.ip6_prohibit_entry)
6335 goto out_ip6_null_entry;
6336 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6337 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6338 ip6_template_metrics, true);
6339 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
6341 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6342 sizeof(*net->ipv6.ip6_blk_hole_entry),
6344 if (!net->ipv6.ip6_blk_hole_entry)
6345 goto out_ip6_prohibit_entry;
6346 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6347 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6348 ip6_template_metrics, true);
6349 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
6350 #ifdef CONFIG_IPV6_SUBTREES
6351 net->ipv6.fib6_routes_require_src = 0;
6355 net->ipv6.sysctl.flush_delay = 0;
6356 net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
6357 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6358 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6359 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6360 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6361 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6362 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6363 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6365 atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
6371 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6372 out_ip6_prohibit_entry:
6373 kfree(net->ipv6.ip6_prohibit_entry);
6375 kfree(net->ipv6.ip6_null_entry);
6377 out_fib6_null_entry:
6378 kfree(net->ipv6.fib6_null_entry);
6379 out_ip6_dst_entries:
6380 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6385 static void __net_exit ip6_route_net_exit(struct net *net)
6387 kfree(net->ipv6.fib6_null_entry);
6388 kfree(net->ipv6.ip6_null_entry);
6389 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6390 kfree(net->ipv6.ip6_prohibit_entry);
6391 kfree(net->ipv6.ip6_blk_hole_entry);
6393 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6396 static int __net_init ip6_route_net_init_late(struct net *net)
6398 #ifdef CONFIG_PROC_FS
6399 if (!proc_create_net("ipv6_route", 0, net->proc_net,
6400 &ipv6_route_seq_ops,
6401 sizeof(struct ipv6_route_iter)))
6404 if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
6405 rt6_stats_seq_show, NULL)) {
6406 remove_proc_entry("ipv6_route", net->proc_net);
6413 static void __net_exit ip6_route_net_exit_late(struct net *net)
6415 #ifdef CONFIG_PROC_FS
6416 remove_proc_entry("ipv6_route", net->proc_net);
6417 remove_proc_entry("rt6_stats", net->proc_net);
6421 static struct pernet_operations ip6_route_net_ops = {
6422 .init = ip6_route_net_init,
6423 .exit = ip6_route_net_exit,
6426 static int __net_init ipv6_inetpeer_init(struct net *net)
6428 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6432 inet_peer_base_init(bp);
6433 net->ipv6.peers = bp;
6437 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6439 struct inet_peer_base *bp = net->ipv6.peers;
6441 net->ipv6.peers = NULL;
6442 inetpeer_invalidate_tree(bp);
6446 static struct pernet_operations ipv6_inetpeer_ops = {
6447 .init = ipv6_inetpeer_init,
6448 .exit = ipv6_inetpeer_exit,
6451 static struct pernet_operations ip6_route_net_late_ops = {
6452 .init = ip6_route_net_init_late,
6453 .exit = ip6_route_net_exit_late,
6456 static struct notifier_block ip6_route_dev_notifier = {
6457 .notifier_call = ip6_route_dev_notify,
6458 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6461 void __init ip6_route_init_special_entries(void)
6463 /* Registering of the loopback is done before this portion of code,
6464 * the loopback reference in rt6_info will not be taken, do it
6465 * manually for init_net */
6466 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6467 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6468 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6469 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6470 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6471 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6472 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6473 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6477 #if IS_BUILTIN(CONFIG_IPV6)
6478 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6479 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6481 BTF_ID_LIST(btf_fib6_info_id)
6482 BTF_ID(struct, fib6_info)
6484 static const struct bpf_iter_seq_info ipv6_route_seq_info = {
6485 .seq_ops = &ipv6_route_seq_ops,
6486 .init_seq_private = bpf_iter_init_seq_net,
6487 .fini_seq_private = bpf_iter_fini_seq_net,
6488 .seq_priv_size = sizeof(struct ipv6_route_iter),
6491 static struct bpf_iter_reg ipv6_route_reg_info = {
6492 .target = "ipv6_route",
6493 .ctx_arg_info_size = 1,
6495 { offsetof(struct bpf_iter__ipv6_route, rt),
6496 PTR_TO_BTF_ID_OR_NULL },
6498 .seq_info = &ipv6_route_seq_info,
6501 static int __init bpf_iter_register(void)
6503 ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
6504 return bpf_iter_reg_target(&ipv6_route_reg_info);
6507 static void bpf_iter_unregister(void)
6509 bpf_iter_unreg_target(&ipv6_route_reg_info);
6514 int __init ip6_route_init(void)
6520 ip6_dst_ops_template.kmem_cachep =
6521 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6522 SLAB_HWCACHE_ALIGN, NULL);
6523 if (!ip6_dst_ops_template.kmem_cachep)
6526 ret = dst_entries_init(&ip6_dst_blackhole_ops);
6528 goto out_kmem_cache;
6530 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6532 goto out_dst_entries;
6534 ret = register_pernet_subsys(&ip6_route_net_ops);
6536 goto out_register_inetpeer;
6538 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6542 goto out_register_subsys;
6548 ret = fib6_rules_init();
6552 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6554 goto fib6_rules_init;
6556 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6557 inet6_rtm_newroute, NULL, 0);
6559 goto out_register_late_subsys;
6561 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6562 inet6_rtm_delroute, NULL, 0);
6564 goto out_register_late_subsys;
6566 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6567 inet6_rtm_getroute, NULL,
6568 RTNL_FLAG_DOIT_UNLOCKED);
6570 goto out_register_late_subsys;
6572 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6574 goto out_register_late_subsys;
6576 #if IS_BUILTIN(CONFIG_IPV6)
6577 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6578 ret = bpf_iter_register();
6580 goto out_register_late_subsys;
6584 for_each_possible_cpu(cpu) {
6585 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6587 INIT_LIST_HEAD(&ul->head);
6588 spin_lock_init(&ul->lock);
6594 out_register_late_subsys:
6595 rtnl_unregister_all(PF_INET6);
6596 unregister_pernet_subsys(&ip6_route_net_late_ops);
6598 fib6_rules_cleanup();
6603 out_register_subsys:
6604 unregister_pernet_subsys(&ip6_route_net_ops);
6605 out_register_inetpeer:
6606 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6608 dst_entries_destroy(&ip6_dst_blackhole_ops);
6610 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6614 void ip6_route_cleanup(void)
6616 #if IS_BUILTIN(CONFIG_IPV6)
6617 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6618 bpf_iter_unregister();
6621 unregister_netdevice_notifier(&ip6_route_dev_notifier);
6622 unregister_pernet_subsys(&ip6_route_net_late_ops);
6623 fib6_rules_cleanup();
6626 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6627 unregister_pernet_subsys(&ip6_route_net_ops);
6628 dst_entries_destroy(&ip6_dst_blackhole_ops);
6629 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);