1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux IPv6 multicast routing support for BSD pim6sd
4 * Based on net/ipv4/ipmr.c.
6 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
7 * LSIIT Laboratory, Strasbourg, France
8 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
10 * Copyright (C)2007,2008 USAGI/WIDE Project
11 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
14 #include <linux/uaccess.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/fcntl.h>
21 #include <linux/stat.h>
22 #include <linux/socket.h>
23 #include <linux/inet.h>
24 #include <linux/netdevice.h>
25 #include <linux/inetdevice.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/init.h>
29 #include <linux/compat.h>
30 #include <linux/rhashtable.h>
31 #include <net/protocol.h>
32 #include <linux/skbuff.h>
34 #include <linux/notifier.h>
35 #include <linux/if_arp.h>
36 #include <net/checksum.h>
37 #include <net/netlink.h>
38 #include <net/fib_rules.h>
41 #include <net/ip6_route.h>
42 #include <linux/mroute6.h>
43 #include <linux/pim.h>
44 #include <net/addrconf.h>
45 #include <linux/netfilter_ipv6.h>
46 #include <linux/export.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/netconf.h>
49 #include <net/ip_tunnels.h>
51 #include <linux/nospec.h>
54 struct fib_rule common;
61 /* Big lock, protecting vif table, mrt cache and mroute socket state.
62 Note that the changes are semaphored via rtnl_lock.
65 static DEFINE_RWLOCK(mrt_lock);
67 /* Multicast router control variables */
69 /* Special spinlock for queue of unresolved entries */
70 static DEFINE_SPINLOCK(mfc_unres_lock);
72 /* We return to original Alan's scheme. Hash table of resolved
73 entries is changed only in process context and protected
74 with weak lock mrt_lock. Queue of unresolved entries is protected
75 with strong spinlock mfc_unres_lock.
77 In this case data path is free of exclusive locks at all.
80 static struct kmem_cache *mrt_cachep __read_mostly;
82 static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
83 static void ip6mr_free_table(struct mr_table *mrt);
85 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
86 struct net_device *dev, struct sk_buff *skb,
87 struct mfc6_cache *cache);
88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 mifi_t mifi, int assert);
90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
93 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 struct netlink_callback *cb);
95 static void mroute_clean_tables(struct mr_table *mrt, int flags);
96 static void ipmr_expire_process(struct timer_list *t);
98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99 #define ip6mr_for_each_table(mrt, net) \
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
102 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
103 struct mr_table *mrt)
105 struct mr_table *ret;
108 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
109 struct mr_table, list);
111 ret = list_entry_rcu(mrt->list.next,
112 struct mr_table, list);
114 if (&ret->list == &net->ipv6.mr6_tables)
119 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
121 struct mr_table *mrt;
123 ip6mr_for_each_table(mrt, net) {
130 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
131 struct mr_table **mrt)
134 struct ip6mr_result res;
135 struct fib_lookup_arg arg = {
137 .flags = FIB_LOOKUP_NOREF,
140 /* update flow if oif or iif point to device enslaved to l3mdev */
141 l3mdev_update_flow(net, flowi6_to_flowi(flp6));
143 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
144 flowi6_to_flowi(flp6), 0, &arg);
151 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
152 int flags, struct fib_lookup_arg *arg)
154 struct ip6mr_result *res = arg->result;
155 struct mr_table *mrt;
157 switch (rule->action) {
160 case FR_ACT_UNREACHABLE:
162 case FR_ACT_PROHIBIT:
164 case FR_ACT_BLACKHOLE:
169 arg->table = fib_rule_get_table(rule, arg);
171 mrt = ip6mr_get_table(rule->fr_net, arg->table);
178 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
183 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
187 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
188 struct fib_rule_hdr *frh, struct nlattr **tb,
189 struct netlink_ext_ack *extack)
194 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
200 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
201 struct fib_rule_hdr *frh)
209 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
210 .family = RTNL_FAMILY_IP6MR,
211 .rule_size = sizeof(struct ip6mr_rule),
212 .addr_size = sizeof(struct in6_addr),
213 .action = ip6mr_rule_action,
214 .match = ip6mr_rule_match,
215 .configure = ip6mr_rule_configure,
216 .compare = ip6mr_rule_compare,
217 .fill = ip6mr_rule_fill,
218 .nlgroup = RTNLGRP_IPV6_RULE,
219 .policy = ip6mr_rule_policy,
220 .owner = THIS_MODULE,
223 static int __net_init ip6mr_rules_init(struct net *net)
225 struct fib_rules_ops *ops;
226 struct mr_table *mrt;
229 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
233 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
235 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
241 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
245 net->ipv6.mr6_rules_ops = ops;
250 ip6mr_free_table(mrt);
253 fib_rules_unregister(ops);
257 static void __net_exit ip6mr_rules_exit(struct net *net)
259 struct mr_table *mrt, *next;
262 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
263 list_del(&mrt->list);
264 ip6mr_free_table(mrt);
266 fib_rules_unregister(net->ipv6.mr6_rules_ops);
270 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
272 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR);
275 static unsigned int ip6mr_rules_seq_read(struct net *net)
277 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
280 bool ip6mr_rule_default(const struct fib_rule *rule)
282 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
283 rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
285 EXPORT_SYMBOL(ip6mr_rule_default);
287 #define ip6mr_for_each_table(mrt, net) \
288 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
290 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
291 struct mr_table *mrt)
294 return net->ipv6.mrt6;
298 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
300 return net->ipv6.mrt6;
303 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
304 struct mr_table **mrt)
306 *mrt = net->ipv6.mrt6;
310 static int __net_init ip6mr_rules_init(struct net *net)
312 struct mr_table *mrt;
314 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
317 net->ipv6.mrt6 = mrt;
321 static void __net_exit ip6mr_rules_exit(struct net *net)
324 ip6mr_free_table(net->ipv6.mrt6);
325 net->ipv6.mrt6 = NULL;
329 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
334 static unsigned int ip6mr_rules_seq_read(struct net *net)
340 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
343 const struct mfc6_cache_cmp_arg *cmparg = arg->key;
344 struct mfc6_cache *c = (struct mfc6_cache *)ptr;
346 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
347 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
350 static const struct rhashtable_params ip6mr_rht_params = {
351 .head_offset = offsetof(struct mr_mfc, mnode),
352 .key_offset = offsetof(struct mfc6_cache, cmparg),
353 .key_len = sizeof(struct mfc6_cache_cmp_arg),
355 .obj_cmpfn = ip6mr_hash_cmp,
356 .automatic_shrinking = true,
359 static void ip6mr_new_table_set(struct mr_table *mrt,
362 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
363 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
367 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
368 .mf6c_origin = IN6ADDR_ANY_INIT,
369 .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
372 static struct mr_table_ops ip6mr_mr_table_ops = {
373 .rht_params = &ip6mr_rht_params,
374 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
377 static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
379 struct mr_table *mrt;
381 mrt = ip6mr_get_table(net, id);
385 return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
386 ipmr_expire_process, ip6mr_new_table_set);
389 static void ip6mr_free_table(struct mr_table *mrt)
391 del_timer_sync(&mrt->ipmr_expire_timer);
392 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
393 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
394 rhltable_destroy(&mrt->mfc_hash);
398 #ifdef CONFIG_PROC_FS
399 /* The /proc interfaces to multicast routing
400 * /proc/ip6_mr_cache /proc/ip6_mr_vif
403 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
406 struct mr_vif_iter *iter = seq->private;
407 struct net *net = seq_file_net(seq);
408 struct mr_table *mrt;
410 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
412 return ERR_PTR(-ENOENT);
416 read_lock(&mrt_lock);
417 return mr_vif_seq_start(seq, pos);
420 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
423 read_unlock(&mrt_lock);
426 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
428 struct mr_vif_iter *iter = seq->private;
429 struct mr_table *mrt = iter->mrt;
431 if (v == SEQ_START_TOKEN) {
433 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
435 const struct vif_device *vif = v;
436 const char *name = vif->dev ? vif->dev->name : "none";
439 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
440 vif - mrt->vif_table,
441 name, vif->bytes_in, vif->pkt_in,
442 vif->bytes_out, vif->pkt_out,
448 static const struct seq_operations ip6mr_vif_seq_ops = {
449 .start = ip6mr_vif_seq_start,
450 .next = mr_vif_seq_next,
451 .stop = ip6mr_vif_seq_stop,
452 .show = ip6mr_vif_seq_show,
455 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
457 struct net *net = seq_file_net(seq);
458 struct mr_table *mrt;
460 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
462 return ERR_PTR(-ENOENT);
464 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
467 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
471 if (v == SEQ_START_TOKEN) {
475 "Iif Pkts Bytes Wrong Oifs\n");
477 const struct mfc6_cache *mfc = v;
478 const struct mr_mfc_iter *it = seq->private;
479 struct mr_table *mrt = it->mrt;
481 seq_printf(seq, "%pI6 %pI6 %-3hd",
482 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
485 if (it->cache != &mrt->mfc_unres_queue) {
486 seq_printf(seq, " %8lu %8lu %8lu",
487 mfc->_c.mfc_un.res.pkt,
488 mfc->_c.mfc_un.res.bytes,
489 mfc->_c.mfc_un.res.wrong_if);
490 for (n = mfc->_c.mfc_un.res.minvif;
491 n < mfc->_c.mfc_un.res.maxvif; n++) {
492 if (VIF_EXISTS(mrt, n) &&
493 mfc->_c.mfc_un.res.ttls[n] < 255)
496 mfc->_c.mfc_un.res.ttls[n]);
499 /* unresolved mfc_caches don't contain
500 * pkt, bytes and wrong_if values
502 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
509 static const struct seq_operations ipmr_mfc_seq_ops = {
510 .start = ipmr_mfc_seq_start,
511 .next = mr_mfc_seq_next,
512 .stop = mr_mfc_seq_stop,
513 .show = ipmr_mfc_seq_show,
517 #ifdef CONFIG_IPV6_PIMSM_V2
519 static int pim6_rcv(struct sk_buff *skb)
521 struct pimreghdr *pim;
522 struct ipv6hdr *encap;
523 struct net_device *reg_dev = NULL;
524 struct net *net = dev_net(skb->dev);
525 struct mr_table *mrt;
526 struct flowi6 fl6 = {
527 .flowi6_iif = skb->dev->ifindex,
528 .flowi6_mark = skb->mark,
532 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
535 pim = (struct pimreghdr *)skb_transport_header(skb);
536 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
537 (pim->flags & PIM_NULL_REGISTER) ||
538 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
539 sizeof(*pim), IPPROTO_PIM,
540 csum_partial((void *)pim, sizeof(*pim), 0)) &&
541 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
544 /* check if the inner packet is destined to mcast group */
545 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
548 if (!ipv6_addr_is_multicast(&encap->daddr) ||
549 encap->payload_len == 0 ||
550 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
553 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
555 reg_vif_num = mrt->mroute_reg_vif_num;
557 read_lock(&mrt_lock);
558 if (reg_vif_num >= 0)
559 reg_dev = mrt->vif_table[reg_vif_num].dev;
562 read_unlock(&mrt_lock);
567 skb->mac_header = skb->network_header;
568 skb_pull(skb, (u8 *)encap - skb->data);
569 skb_reset_network_header(skb);
570 skb->protocol = htons(ETH_P_IPV6);
571 skb->ip_summed = CHECKSUM_NONE;
573 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
584 static const struct inet6_protocol pim6_protocol = {
588 /* Service routines creating virtual interfaces: PIMREG */
590 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
591 struct net_device *dev)
593 struct net *net = dev_net(dev);
594 struct mr_table *mrt;
595 struct flowi6 fl6 = {
596 .flowi6_oif = dev->ifindex,
597 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
598 .flowi6_mark = skb->mark,
601 if (!pskb_inet_may_pull(skb))
604 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
607 read_lock(&mrt_lock);
608 dev->stats.tx_bytes += skb->len;
609 dev->stats.tx_packets++;
610 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
611 read_unlock(&mrt_lock);
616 dev->stats.tx_errors++;
621 static int reg_vif_get_iflink(const struct net_device *dev)
626 static const struct net_device_ops reg_vif_netdev_ops = {
627 .ndo_start_xmit = reg_vif_xmit,
628 .ndo_get_iflink = reg_vif_get_iflink,
631 static void reg_vif_setup(struct net_device *dev)
633 dev->type = ARPHRD_PIMREG;
634 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
635 dev->flags = IFF_NOARP;
636 dev->netdev_ops = ®_vif_netdev_ops;
637 dev->needs_free_netdev = true;
638 dev->features |= NETIF_F_NETNS_LOCAL;
641 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
643 struct net_device *dev;
646 if (mrt->id == RT6_TABLE_DFLT)
647 sprintf(name, "pim6reg");
649 sprintf(name, "pim6reg%u", mrt->id);
651 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
655 dev_net_set(dev, net);
657 if (register_netdevice(dev)) {
662 if (dev_open(dev, NULL))
669 unregister_netdevice(dev);
674 static int call_ip6mr_vif_entry_notifiers(struct net *net,
675 enum fib_event_type event_type,
676 struct vif_device *vif,
677 mifi_t vif_index, u32 tb_id)
679 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
680 vif, vif_index, tb_id,
681 &net->ipv6.ipmr_seq);
684 static int call_ip6mr_mfc_entry_notifiers(struct net *net,
685 enum fib_event_type event_type,
686 struct mfc6_cache *mfc, u32 tb_id)
688 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
689 &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
692 /* Delete a VIF entry */
693 static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
694 struct list_head *head)
696 struct vif_device *v;
697 struct net_device *dev;
698 struct inet6_dev *in6_dev;
700 if (vifi < 0 || vifi >= mrt->maxvif)
701 return -EADDRNOTAVAIL;
703 v = &mrt->vif_table[vifi];
705 if (VIF_EXISTS(mrt, vifi))
706 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
707 FIB_EVENT_VIF_DEL, v, vifi,
710 write_lock_bh(&mrt_lock);
715 write_unlock_bh(&mrt_lock);
716 return -EADDRNOTAVAIL;
719 #ifdef CONFIG_IPV6_PIMSM_V2
720 if (vifi == mrt->mroute_reg_vif_num)
721 mrt->mroute_reg_vif_num = -1;
724 if (vifi + 1 == mrt->maxvif) {
726 for (tmp = vifi - 1; tmp >= 0; tmp--) {
727 if (VIF_EXISTS(mrt, tmp))
730 mrt->maxvif = tmp + 1;
733 write_unlock_bh(&mrt_lock);
735 dev_set_allmulti(dev, -1);
737 in6_dev = __in6_dev_get(dev);
739 atomic_dec(&in6_dev->cnf.mc_forwarding);
740 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
741 NETCONFA_MC_FORWARDING,
742 dev->ifindex, &in6_dev->cnf);
745 if ((v->flags & MIFF_REGISTER) && !notify)
746 unregister_netdevice_queue(dev, head);
752 static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
754 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
756 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
759 static inline void ip6mr_cache_free(struct mfc6_cache *c)
761 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
764 /* Destroy an unresolved cache entry, killing queued skbs
765 and reporting error to netlink readers.
768 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
770 struct net *net = read_pnet(&mrt->net);
773 atomic_dec(&mrt->cache_resolve_queue_len);
775 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
776 if (ipv6_hdr(skb)->version == 0) {
777 struct nlmsghdr *nlh = skb_pull(skb,
778 sizeof(struct ipv6hdr));
779 nlh->nlmsg_type = NLMSG_ERROR;
780 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
781 skb_trim(skb, nlh->nlmsg_len);
782 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
783 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
792 /* Timer process for all the unresolved queue. */
794 static void ipmr_do_expire_process(struct mr_table *mrt)
796 unsigned long now = jiffies;
797 unsigned long expires = 10 * HZ;
798 struct mr_mfc *c, *next;
800 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
801 if (time_after(c->mfc_un.unres.expires, now)) {
803 unsigned long interval = c->mfc_un.unres.expires - now;
804 if (interval < expires)
810 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
811 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
814 if (!list_empty(&mrt->mfc_unres_queue))
815 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
818 static void ipmr_expire_process(struct timer_list *t)
820 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
822 if (!spin_trylock(&mfc_unres_lock)) {
823 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
827 if (!list_empty(&mrt->mfc_unres_queue))
828 ipmr_do_expire_process(mrt);
830 spin_unlock(&mfc_unres_lock);
833 /* Fill oifs list. It is called under write locked mrt_lock. */
835 static void ip6mr_update_thresholds(struct mr_table *mrt,
836 struct mr_mfc *cache,
841 cache->mfc_un.res.minvif = MAXMIFS;
842 cache->mfc_un.res.maxvif = 0;
843 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
845 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
846 if (VIF_EXISTS(mrt, vifi) &&
847 ttls[vifi] && ttls[vifi] < 255) {
848 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
849 if (cache->mfc_un.res.minvif > vifi)
850 cache->mfc_un.res.minvif = vifi;
851 if (cache->mfc_un.res.maxvif <= vifi)
852 cache->mfc_un.res.maxvif = vifi + 1;
855 cache->mfc_un.res.lastuse = jiffies;
858 static int mif6_add(struct net *net, struct mr_table *mrt,
859 struct mif6ctl *vifc, int mrtsock)
861 int vifi = vifc->mif6c_mifi;
862 struct vif_device *v = &mrt->vif_table[vifi];
863 struct net_device *dev;
864 struct inet6_dev *in6_dev;
868 if (VIF_EXISTS(mrt, vifi))
871 switch (vifc->mif6c_flags) {
872 #ifdef CONFIG_IPV6_PIMSM_V2
875 * Special Purpose VIF in PIM
876 * All the packets will be sent to the daemon
878 if (mrt->mroute_reg_vif_num >= 0)
880 dev = ip6mr_reg_vif(net, mrt);
883 err = dev_set_allmulti(dev, 1);
885 unregister_netdevice(dev);
892 dev = dev_get_by_index(net, vifc->mif6c_pifi);
894 return -EADDRNOTAVAIL;
895 err = dev_set_allmulti(dev, 1);
905 in6_dev = __in6_dev_get(dev);
907 atomic_inc(&in6_dev->cnf.mc_forwarding);
908 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
909 NETCONFA_MC_FORWARDING,
910 dev->ifindex, &in6_dev->cnf);
913 /* Fill in the VIF structures */
914 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
915 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
918 /* And finish update writing critical data */
919 write_lock_bh(&mrt_lock);
921 #ifdef CONFIG_IPV6_PIMSM_V2
922 if (v->flags & MIFF_REGISTER)
923 mrt->mroute_reg_vif_num = vifi;
925 if (vifi + 1 > mrt->maxvif)
926 mrt->maxvif = vifi + 1;
927 write_unlock_bh(&mrt_lock);
928 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
933 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
934 const struct in6_addr *origin,
935 const struct in6_addr *mcastgrp)
937 struct mfc6_cache_cmp_arg arg = {
938 .mf6c_origin = *origin,
939 .mf6c_mcastgrp = *mcastgrp,
942 return mr_mfc_find(mrt, &arg);
945 /* Look for a (*,G) entry */
946 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
947 struct in6_addr *mcastgrp,
950 struct mfc6_cache_cmp_arg arg = {
951 .mf6c_origin = in6addr_any,
952 .mf6c_mcastgrp = *mcastgrp,
955 if (ipv6_addr_any(mcastgrp))
956 return mr_mfc_find_any_parent(mrt, mifi);
957 return mr_mfc_find_any(mrt, mifi, &arg);
960 /* Look for a (S,G,iif) entry if parent != -1 */
961 static struct mfc6_cache *
962 ip6mr_cache_find_parent(struct mr_table *mrt,
963 const struct in6_addr *origin,
964 const struct in6_addr *mcastgrp,
967 struct mfc6_cache_cmp_arg arg = {
968 .mf6c_origin = *origin,
969 .mf6c_mcastgrp = *mcastgrp,
972 return mr_mfc_find_parent(mrt, &arg, parent);
975 /* Allocate a multicast cache entry */
976 static struct mfc6_cache *ip6mr_cache_alloc(void)
978 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
981 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
982 c->_c.mfc_un.res.minvif = MAXMIFS;
983 c->_c.free = ip6mr_cache_free_rcu;
984 refcount_set(&c->_c.mfc_un.res.refcount, 1);
988 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
990 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
993 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
994 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
999 * A cache entry has gone into a resolved state from queued
1002 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1003 struct mfc6_cache *uc, struct mfc6_cache *c)
1005 struct sk_buff *skb;
1008 * Play the pending entries through our router
1011 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1012 if (ipv6_hdr(skb)->version == 0) {
1013 struct nlmsghdr *nlh = skb_pull(skb,
1014 sizeof(struct ipv6hdr));
1016 if (mr_fill_mroute(mrt, skb, &c->_c,
1017 nlmsg_data(nlh)) > 0) {
1018 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1020 nlh->nlmsg_type = NLMSG_ERROR;
1021 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1022 skb_trim(skb, nlh->nlmsg_len);
1023 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1025 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1027 ip6_mr_forward(net, mrt, skb->dev, skb, c);
1032 * Bounce a cache query up to pim6sd and netlink.
1034 * Called under mrt_lock.
1037 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1038 mifi_t mifi, int assert)
1040 struct sock *mroute6_sk;
1041 struct sk_buff *skb;
1042 struct mrt6msg *msg;
1045 #ifdef CONFIG_IPV6_PIMSM_V2
1046 if (assert == MRT6MSG_WHOLEPKT)
1047 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1051 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1056 /* I suppose that internal messages
1057 * do not require checksums */
1059 skb->ip_summed = CHECKSUM_UNNECESSARY;
1061 #ifdef CONFIG_IPV6_PIMSM_V2
1062 if (assert == MRT6MSG_WHOLEPKT) {
1063 /* Ugly, but we have no choice with this interface.
1064 Duplicate old header, fix length etc.
1065 And all this only to mangle msg->im6_msgtype and
1066 to set msg->im6_mbz to "mbz" :-)
1068 skb_push(skb, -skb_network_offset(pkt));
1070 skb_push(skb, sizeof(*msg));
1071 skb_reset_transport_header(skb);
1072 msg = (struct mrt6msg *)skb_transport_header(skb);
1074 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1075 msg->im6_mif = mrt->mroute_reg_vif_num;
1077 msg->im6_src = ipv6_hdr(pkt)->saddr;
1078 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1080 skb->ip_summed = CHECKSUM_UNNECESSARY;
1085 * Copy the IP header
1088 skb_put(skb, sizeof(struct ipv6hdr));
1089 skb_reset_network_header(skb);
1090 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1095 skb_put(skb, sizeof(*msg));
1096 skb_reset_transport_header(skb);
1097 msg = (struct mrt6msg *)skb_transport_header(skb);
1100 msg->im6_msgtype = assert;
1101 msg->im6_mif = mifi;
1103 msg->im6_src = ipv6_hdr(pkt)->saddr;
1104 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1106 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1107 skb->ip_summed = CHECKSUM_UNNECESSARY;
1111 mroute6_sk = rcu_dereference(mrt->mroute_sk);
1118 mrt6msg_netlink_event(mrt, skb);
1120 /* Deliver to user space multicast routing algorithms */
1121 ret = sock_queue_rcv_skb(mroute6_sk, skb);
1124 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1131 /* Queue a packet for resolution. It gets locked cache entry! */
1132 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1133 struct sk_buff *skb, struct net_device *dev)
1135 struct mfc6_cache *c;
1139 spin_lock_bh(&mfc_unres_lock);
1140 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1141 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1142 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1150 * Create a new entry if allowable
1153 c = ip6mr_cache_alloc_unres();
1155 spin_unlock_bh(&mfc_unres_lock);
1161 /* Fill in the new cache entry */
1162 c->_c.mfc_parent = -1;
1163 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1164 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1167 * Reflect first query at pim6sd
1169 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1171 /* If the report failed throw the cache entry
1174 spin_unlock_bh(&mfc_unres_lock);
1176 ip6mr_cache_free(c);
1181 atomic_inc(&mrt->cache_resolve_queue_len);
1182 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1183 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1185 ipmr_do_expire_process(mrt);
1188 /* See if we can append the packet */
1189 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1195 skb->skb_iif = dev->ifindex;
1197 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1201 spin_unlock_bh(&mfc_unres_lock);
1206 * MFC6 cache manipulation by user space
1209 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1212 struct mfc6_cache *c;
1214 /* The entries are added/deleted only under RTNL */
1216 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1217 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1221 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1222 list_del_rcu(&c->_c.list);
1224 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1225 FIB_EVENT_ENTRY_DEL, c, mrt->id);
1226 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1227 mr_cache_put(&c->_c);
1231 static int ip6mr_device_event(struct notifier_block *this,
1232 unsigned long event, void *ptr)
1234 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1235 struct net *net = dev_net(dev);
1236 struct mr_table *mrt;
1237 struct vif_device *v;
1240 if (event != NETDEV_UNREGISTER)
1243 ip6mr_for_each_table(mrt, net) {
1244 v = &mrt->vif_table[0];
1245 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1247 mif6_delete(mrt, ct, 1, NULL);
1254 static unsigned int ip6mr_seq_read(struct net *net)
1258 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1261 static int ip6mr_dump(struct net *net, struct notifier_block *nb)
1263 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1264 ip6mr_mr_table_iter, &mrt_lock);
1267 static struct notifier_block ip6_mr_notifier = {
1268 .notifier_call = ip6mr_device_event
1271 static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1272 .family = RTNL_FAMILY_IP6MR,
1273 .fib_seq_read = ip6mr_seq_read,
1274 .fib_dump = ip6mr_dump,
1275 .owner = THIS_MODULE,
1278 static int __net_init ip6mr_notifier_init(struct net *net)
1280 struct fib_notifier_ops *ops;
1282 net->ipv6.ipmr_seq = 0;
1284 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1286 return PTR_ERR(ops);
1288 net->ipv6.ip6mr_notifier_ops = ops;
1293 static void __net_exit ip6mr_notifier_exit(struct net *net)
1295 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1296 net->ipv6.ip6mr_notifier_ops = NULL;
1299 /* Setup for IP multicast routing */
1300 static int __net_init ip6mr_net_init(struct net *net)
1304 err = ip6mr_notifier_init(net);
1308 err = ip6mr_rules_init(net);
1310 goto ip6mr_rules_fail;
1312 #ifdef CONFIG_PROC_FS
1314 if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
1315 sizeof(struct mr_vif_iter)))
1317 if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
1318 sizeof(struct mr_mfc_iter)))
1319 goto proc_cache_fail;
1324 #ifdef CONFIG_PROC_FS
1326 remove_proc_entry("ip6_mr_vif", net->proc_net);
1328 ip6mr_rules_exit(net);
1331 ip6mr_notifier_exit(net);
1335 static void __net_exit ip6mr_net_exit(struct net *net)
1337 #ifdef CONFIG_PROC_FS
1338 remove_proc_entry("ip6_mr_cache", net->proc_net);
1339 remove_proc_entry("ip6_mr_vif", net->proc_net);
1341 ip6mr_rules_exit(net);
1342 ip6mr_notifier_exit(net);
1345 static struct pernet_operations ip6mr_net_ops = {
1346 .init = ip6mr_net_init,
1347 .exit = ip6mr_net_exit,
1350 int __init ip6_mr_init(void)
1354 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1355 sizeof(struct mfc6_cache),
1356 0, SLAB_HWCACHE_ALIGN,
1361 err = register_pernet_subsys(&ip6mr_net_ops);
1363 goto reg_pernet_fail;
1365 err = register_netdevice_notifier(&ip6_mr_notifier);
1367 goto reg_notif_fail;
1368 #ifdef CONFIG_IPV6_PIMSM_V2
1369 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1370 pr_err("%s: can't add PIM protocol\n", __func__);
1372 goto add_proto_fail;
1375 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1376 NULL, ip6mr_rtm_dumproute, 0);
1380 #ifdef CONFIG_IPV6_PIMSM_V2
1381 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1383 unregister_netdevice_notifier(&ip6_mr_notifier);
1386 unregister_pernet_subsys(&ip6mr_net_ops);
1388 kmem_cache_destroy(mrt_cachep);
1392 void ip6_mr_cleanup(void)
1394 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1395 #ifdef CONFIG_IPV6_PIMSM_V2
1396 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1398 unregister_netdevice_notifier(&ip6_mr_notifier);
1399 unregister_pernet_subsys(&ip6mr_net_ops);
1400 kmem_cache_destroy(mrt_cachep);
1403 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1404 struct mf6cctl *mfc, int mrtsock, int parent)
1406 unsigned char ttls[MAXMIFS];
1407 struct mfc6_cache *uc, *c;
1412 if (mfc->mf6cc_parent >= MAXMIFS)
1415 memset(ttls, 255, MAXMIFS);
1416 for (i = 0; i < MAXMIFS; i++) {
1417 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1421 /* The entries are added/deleted only under RTNL */
1423 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1424 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1427 write_lock_bh(&mrt_lock);
1428 c->_c.mfc_parent = mfc->mf6cc_parent;
1429 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1431 c->_c.mfc_flags |= MFC_STATIC;
1432 write_unlock_bh(&mrt_lock);
1433 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1435 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1439 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1440 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1443 c = ip6mr_cache_alloc();
1447 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1448 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1449 c->_c.mfc_parent = mfc->mf6cc_parent;
1450 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1452 c->_c.mfc_flags |= MFC_STATIC;
1454 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1457 pr_err("ip6mr: rhtable insert error %d\n", err);
1458 ip6mr_cache_free(c);
1461 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1463 /* Check to see if we resolved a queued list. If so we
1464 * need to send on the frames and tidy up.
1467 spin_lock_bh(&mfc_unres_lock);
1468 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1469 uc = (struct mfc6_cache *)_uc;
1470 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1471 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1472 list_del(&_uc->list);
1473 atomic_dec(&mrt->cache_resolve_queue_len);
1478 if (list_empty(&mrt->mfc_unres_queue))
1479 del_timer(&mrt->ipmr_expire_timer);
1480 spin_unlock_bh(&mfc_unres_lock);
1483 ip6mr_cache_resolve(net, mrt, uc, c);
1484 ip6mr_cache_free(uc);
1486 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1488 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1493 * Close the multicast socket, and clear the vif tables etc
1496 static void mroute_clean_tables(struct mr_table *mrt, int flags)
1498 struct mr_mfc *c, *tmp;
1502 /* Shut down all active vif entries */
1503 if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
1504 for (i = 0; i < mrt->maxvif; i++) {
1505 if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1506 !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
1507 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
1509 mif6_delete(mrt, i, 0, &list);
1511 unregister_netdevice_many(&list);
1514 /* Wipe the cache */
1515 if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
1516 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1517 if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
1518 (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
1520 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1521 list_del_rcu(&c->list);
1522 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1523 FIB_EVENT_ENTRY_DEL,
1524 (struct mfc6_cache *)c, mrt->id);
1525 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1530 if (flags & MRT6_FLUSH_MFC) {
1531 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1532 spin_lock_bh(&mfc_unres_lock);
1533 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1535 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1537 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1539 spin_unlock_bh(&mfc_unres_lock);
1544 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1547 struct net *net = sock_net(sk);
1550 write_lock_bh(&mrt_lock);
1551 if (rtnl_dereference(mrt->mroute_sk)) {
1554 rcu_assign_pointer(mrt->mroute_sk, sk);
1555 sock_set_flag(sk, SOCK_RCU_FREE);
1556 atomic_inc(&net->ipv6.devconf_all->mc_forwarding);
1558 write_unlock_bh(&mrt_lock);
1561 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1562 NETCONFA_MC_FORWARDING,
1563 NETCONFA_IFINDEX_ALL,
1564 net->ipv6.devconf_all);
1570 int ip6mr_sk_done(struct sock *sk)
1573 struct net *net = sock_net(sk);
1574 struct mr_table *mrt;
1576 if (sk->sk_type != SOCK_RAW ||
1577 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1581 ip6mr_for_each_table(mrt, net) {
1582 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1583 write_lock_bh(&mrt_lock);
1584 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1585 /* Note that mroute_sk had SOCK_RCU_FREE set,
1586 * so the RCU grace period before sk freeing
1587 * is guaranteed by sk_destruct()
1589 atomic_dec(&net->ipv6.devconf_all->mc_forwarding);
1590 write_unlock_bh(&mrt_lock);
1591 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1592 NETCONFA_MC_FORWARDING,
1593 NETCONFA_IFINDEX_ALL,
1594 net->ipv6.devconf_all);
1596 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
1606 bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1608 struct mr_table *mrt;
1609 struct flowi6 fl6 = {
1610 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1611 .flowi6_oif = skb->dev->ifindex,
1612 .flowi6_mark = skb->mark,
1615 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1618 return rcu_access_pointer(mrt->mroute_sk);
1620 EXPORT_SYMBOL(mroute6_is_socket);
1623 * Socket options and virtual interface manipulation. The whole
1624 * virtual interface system is a complete heap, but unfortunately
1625 * that's how BSD mrouted happens to think. Maybe one day with a proper
1626 * MOSPF/PIM router set up we can clean this up.
1629 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1631 int ret, parent = 0;
1635 struct net *net = sock_net(sk);
1636 struct mr_table *mrt;
1638 if (sk->sk_type != SOCK_RAW ||
1639 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1642 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1646 if (optname != MRT6_INIT) {
1647 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1648 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1654 if (optlen < sizeof(int))
1657 return ip6mr_sk_init(mrt, sk);
1660 return ip6mr_sk_done(sk);
1663 if (optlen < sizeof(vif))
1665 if (copy_from_user(&vif, optval, sizeof(vif)))
1667 if (vif.mif6c_mifi >= MAXMIFS)
1670 ret = mif6_add(net, mrt, &vif,
1671 sk == rtnl_dereference(mrt->mroute_sk));
1676 if (optlen < sizeof(mifi_t))
1678 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1681 ret = mif6_delete(mrt, mifi, 0, NULL);
1686 * Manipulate the forwarding caches. These live
1687 * in a sort of kernel/user symbiosis.
1693 case MRT6_ADD_MFC_PROXY:
1694 case MRT6_DEL_MFC_PROXY:
1695 if (optlen < sizeof(mfc))
1697 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1700 parent = mfc.mf6cc_parent;
1702 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1703 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1705 ret = ip6mr_mfc_add(net, mrt, &mfc,
1707 rtnl_dereference(mrt->mroute_sk),
1716 if (optlen != sizeof(flags))
1718 if (get_user(flags, (int __user *)optval))
1721 mroute_clean_tables(mrt, flags);
1727 * Control PIM assert (to activate pim will activate assert)
1733 if (optlen != sizeof(v))
1735 if (get_user(v, (int __user *)optval))
1737 mrt->mroute_do_assert = v;
1741 #ifdef CONFIG_IPV6_PIMSM_V2
1746 if (optlen != sizeof(v))
1748 if (get_user(v, (int __user *)optval))
1753 if (v != mrt->mroute_do_pim) {
1754 mrt->mroute_do_pim = v;
1755 mrt->mroute_do_assert = v;
1762 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1767 if (optlen != sizeof(u32))
1769 if (get_user(v, (u32 __user *)optval))
1771 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1772 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1774 if (sk == rcu_access_pointer(mrt->mroute_sk))
1779 mrt = ip6mr_new_table(net, v);
1783 raw6_sk(sk)->ip6mr_table = v;
1789 * Spurious command, or MRT6_VERSION which you cannot
1793 return -ENOPROTOOPT;
1798 * Getsock opt support for the multicast routing system.
1801 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1806 struct net *net = sock_net(sk);
1807 struct mr_table *mrt;
1809 if (sk->sk_type != SOCK_RAW ||
1810 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1813 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1821 #ifdef CONFIG_IPV6_PIMSM_V2
1823 val = mrt->mroute_do_pim;
1827 val = mrt->mroute_do_assert;
1830 return -ENOPROTOOPT;
1833 if (get_user(olr, optlen))
1836 olr = min_t(int, olr, sizeof(int));
1840 if (put_user(olr, optlen))
1842 if (copy_to_user(optval, &val, olr))
1848 * The IP multicast ioctl support routines.
1851 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1853 struct sioc_sg_req6 sr;
1854 struct sioc_mif_req6 vr;
1855 struct vif_device *vif;
1856 struct mfc6_cache *c;
1857 struct net *net = sock_net(sk);
1858 struct mr_table *mrt;
1860 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1865 case SIOCGETMIFCNT_IN6:
1866 if (copy_from_user(&vr, arg, sizeof(vr)))
1868 if (vr.mifi >= mrt->maxvif)
1870 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1871 read_lock(&mrt_lock);
1872 vif = &mrt->vif_table[vr.mifi];
1873 if (VIF_EXISTS(mrt, vr.mifi)) {
1874 vr.icount = vif->pkt_in;
1875 vr.ocount = vif->pkt_out;
1876 vr.ibytes = vif->bytes_in;
1877 vr.obytes = vif->bytes_out;
1878 read_unlock(&mrt_lock);
1880 if (copy_to_user(arg, &vr, sizeof(vr)))
1884 read_unlock(&mrt_lock);
1885 return -EADDRNOTAVAIL;
1886 case SIOCGETSGCNT_IN6:
1887 if (copy_from_user(&sr, arg, sizeof(sr)))
1891 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1893 sr.pktcnt = c->_c.mfc_un.res.pkt;
1894 sr.bytecnt = c->_c.mfc_un.res.bytes;
1895 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1898 if (copy_to_user(arg, &sr, sizeof(sr)))
1903 return -EADDRNOTAVAIL;
1905 return -ENOIOCTLCMD;
1909 #ifdef CONFIG_COMPAT
1910 struct compat_sioc_sg_req6 {
1911 struct sockaddr_in6 src;
1912 struct sockaddr_in6 grp;
1913 compat_ulong_t pktcnt;
1914 compat_ulong_t bytecnt;
1915 compat_ulong_t wrong_if;
1918 struct compat_sioc_mif_req6 {
1920 compat_ulong_t icount;
1921 compat_ulong_t ocount;
1922 compat_ulong_t ibytes;
1923 compat_ulong_t obytes;
1926 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1928 struct compat_sioc_sg_req6 sr;
1929 struct compat_sioc_mif_req6 vr;
1930 struct vif_device *vif;
1931 struct mfc6_cache *c;
1932 struct net *net = sock_net(sk);
1933 struct mr_table *mrt;
1935 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1940 case SIOCGETMIFCNT_IN6:
1941 if (copy_from_user(&vr, arg, sizeof(vr)))
1943 if (vr.mifi >= mrt->maxvif)
1945 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1946 read_lock(&mrt_lock);
1947 vif = &mrt->vif_table[vr.mifi];
1948 if (VIF_EXISTS(mrt, vr.mifi)) {
1949 vr.icount = vif->pkt_in;
1950 vr.ocount = vif->pkt_out;
1951 vr.ibytes = vif->bytes_in;
1952 vr.obytes = vif->bytes_out;
1953 read_unlock(&mrt_lock);
1955 if (copy_to_user(arg, &vr, sizeof(vr)))
1959 read_unlock(&mrt_lock);
1960 return -EADDRNOTAVAIL;
1961 case SIOCGETSGCNT_IN6:
1962 if (copy_from_user(&sr, arg, sizeof(sr)))
1966 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1968 sr.pktcnt = c->_c.mfc_un.res.pkt;
1969 sr.bytecnt = c->_c.mfc_un.res.bytes;
1970 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1973 if (copy_to_user(arg, &sr, sizeof(sr)))
1978 return -EADDRNOTAVAIL;
1980 return -ENOIOCTLCMD;
1985 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1987 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1988 IPSTATS_MIB_OUTFORWDATAGRAMS);
1989 IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1990 IPSTATS_MIB_OUTOCTETS, skb->len);
1991 return dst_output(net, sk, skb);
1995 * Processing handlers for ip6mr_forward
1998 static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
1999 struct sk_buff *skb, int vifi)
2001 struct ipv6hdr *ipv6h;
2002 struct vif_device *vif = &mrt->vif_table[vifi];
2003 struct net_device *dev;
2004 struct dst_entry *dst;
2010 #ifdef CONFIG_IPV6_PIMSM_V2
2011 if (vif->flags & MIFF_REGISTER) {
2013 vif->bytes_out += skb->len;
2014 vif->dev->stats.tx_bytes += skb->len;
2015 vif->dev->stats.tx_packets++;
2016 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2021 ipv6h = ipv6_hdr(skb);
2023 fl6 = (struct flowi6) {
2024 .flowi6_oif = vif->link,
2025 .daddr = ipv6h->daddr,
2028 dst = ip6_route_output(net, NULL, &fl6);
2035 skb_dst_set(skb, dst);
2038 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2039 * not only before forwarding, but after forwarding on all output
2040 * interfaces. It is clear, if mrouter runs a multicasting
2041 * program, it should receive packets not depending to what interface
2042 * program is joined.
2043 * If we will not make it, the program will have to join on all
2044 * interfaces. On the other hand, multihoming host (or router, but
2045 * not mrouter) cannot join to more than one interface - it will
2046 * result in receiving multiple packets.
2051 vif->bytes_out += skb->len;
2053 /* We are about to write */
2054 /* XXX: extension headers? */
2055 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2058 ipv6h = ipv6_hdr(skb);
2061 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2063 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2064 net, NULL, skb, skb->dev, dev,
2065 ip6mr_forward2_finish);
2072 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2076 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2077 if (mrt->vif_table[ct].dev == dev)
2083 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2084 struct net_device *dev, struct sk_buff *skb,
2085 struct mfc6_cache *c)
2089 int true_vifi = ip6mr_find_vif(mrt, dev);
2091 vif = c->_c.mfc_parent;
2092 c->_c.mfc_un.res.pkt++;
2093 c->_c.mfc_un.res.bytes += skb->len;
2094 c->_c.mfc_un.res.lastuse = jiffies;
2096 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2097 struct mfc6_cache *cache_proxy;
2099 /* For an (*,G) entry, we only check that the incoming
2100 * interface is part of the static tree.
2103 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2105 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2113 * Wrong interface: drop packet and (maybe) send PIM assert.
2115 if (mrt->vif_table[vif].dev != dev) {
2116 c->_c.mfc_un.res.wrong_if++;
2118 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2119 /* pimsm uses asserts, when switching from RPT to SPT,
2120 so that we cannot check that packet arrived on an oif.
2121 It is bad, but otherwise we would need to move pretty
2122 large chunk of pimd to kernel. Ough... --ANK
2124 (mrt->mroute_do_pim ||
2125 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2127 c->_c.mfc_un.res.last_assert +
2128 MFC_ASSERT_THRESH)) {
2129 c->_c.mfc_un.res.last_assert = jiffies;
2130 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2136 mrt->vif_table[vif].pkt_in++;
2137 mrt->vif_table[vif].bytes_in += skb->len;
2142 if (ipv6_addr_any(&c->mf6c_origin) &&
2143 ipv6_addr_any(&c->mf6c_mcastgrp)) {
2144 if (true_vifi >= 0 &&
2145 true_vifi != c->_c.mfc_parent &&
2146 ipv6_hdr(skb)->hop_limit >
2147 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2148 /* It's an (*,*) entry and the packet is not coming from
2149 * the upstream: forward the packet to the upstream
2152 psend = c->_c.mfc_parent;
2157 for (ct = c->_c.mfc_un.res.maxvif - 1;
2158 ct >= c->_c.mfc_un.res.minvif; ct--) {
2159 /* For (*,G) entry, don't forward to the incoming interface */
2160 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2161 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2163 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2165 ip6mr_forward2(net, mrt, skb2, psend);
2172 ip6mr_forward2(net, mrt, skb, psend);
2182 * Multicast packets for forwarding arrive here
2185 int ip6_mr_input(struct sk_buff *skb)
2187 struct mfc6_cache *cache;
2188 struct net *net = dev_net(skb->dev);
2189 struct mr_table *mrt;
2190 struct flowi6 fl6 = {
2191 .flowi6_iif = skb->dev->ifindex,
2192 .flowi6_mark = skb->mark,
2195 struct net_device *dev;
2197 /* skb->dev passed in is the master dev for vrfs.
2198 * Get the proper interface that does have a vif associated with it.
2201 if (netif_is_l3_master(skb->dev)) {
2202 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2209 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2215 read_lock(&mrt_lock);
2216 cache = ip6mr_cache_find(mrt,
2217 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2219 int vif = ip6mr_find_vif(mrt, dev);
2222 cache = ip6mr_cache_find_any(mrt,
2223 &ipv6_hdr(skb)->daddr,
2228 * No usable cache entry
2233 vif = ip6mr_find_vif(mrt, dev);
2235 int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
2236 read_unlock(&mrt_lock);
2240 read_unlock(&mrt_lock);
2245 ip6_mr_forward(net, mrt, dev, skb, cache);
2247 read_unlock(&mrt_lock);
2252 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2256 struct mr_table *mrt;
2257 struct mfc6_cache *cache;
2258 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2260 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2264 read_lock(&mrt_lock);
2265 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2266 if (!cache && skb->dev) {
2267 int vif = ip6mr_find_vif(mrt, skb->dev);
2270 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2275 struct sk_buff *skb2;
2276 struct ipv6hdr *iph;
2277 struct net_device *dev;
2281 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2282 read_unlock(&mrt_lock);
2286 /* really correct? */
2287 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2289 read_unlock(&mrt_lock);
2293 NETLINK_CB(skb2).portid = portid;
2294 skb_reset_transport_header(skb2);
2296 skb_put(skb2, sizeof(struct ipv6hdr));
2297 skb_reset_network_header(skb2);
2299 iph = ipv6_hdr(skb2);
2302 iph->flow_lbl[0] = 0;
2303 iph->flow_lbl[1] = 0;
2304 iph->flow_lbl[2] = 0;
2305 iph->payload_len = 0;
2306 iph->nexthdr = IPPROTO_NONE;
2308 iph->saddr = rt->rt6i_src.addr;
2309 iph->daddr = rt->rt6i_dst.addr;
2311 err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
2312 read_unlock(&mrt_lock);
2317 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2318 read_unlock(&mrt_lock);
2322 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2323 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2326 struct nlmsghdr *nlh;
2330 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2334 rtm = nlmsg_data(nlh);
2335 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2336 rtm->rtm_dst_len = 128;
2337 rtm->rtm_src_len = 128;
2339 rtm->rtm_table = mrt->id;
2340 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2341 goto nla_put_failure;
2342 rtm->rtm_type = RTN_MULTICAST;
2343 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2344 if (c->_c.mfc_flags & MFC_STATIC)
2345 rtm->rtm_protocol = RTPROT_STATIC;
2347 rtm->rtm_protocol = RTPROT_MROUTED;
2350 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2351 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2352 goto nla_put_failure;
2353 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2354 /* do not break the dump if cache is unresolved */
2355 if (err < 0 && err != -ENOENT)
2356 goto nla_put_failure;
2358 nlmsg_end(skb, nlh);
2362 nlmsg_cancel(skb, nlh);
2366 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2367 u32 portid, u32 seq, struct mr_mfc *c,
2370 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2374 static int mr6_msgsize(bool unresolved, int maxvif)
2377 NLMSG_ALIGN(sizeof(struct rtmsg))
2378 + nla_total_size(4) /* RTA_TABLE */
2379 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2380 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2385 + nla_total_size(4) /* RTA_IIF */
2386 + nla_total_size(0) /* RTA_MULTIPATH */
2387 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2389 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2395 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2398 struct net *net = read_pnet(&mrt->net);
2399 struct sk_buff *skb;
2402 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2407 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2411 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2417 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2420 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2423 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2424 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2425 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2426 /* IP6MRA_CREPORT_SRC_ADDR */
2427 + nla_total_size(sizeof(struct in6_addr))
2428 /* IP6MRA_CREPORT_DST_ADDR */
2429 + nla_total_size(sizeof(struct in6_addr))
2430 /* IP6MRA_CREPORT_PKT */
2431 + nla_total_size(payloadlen)
2437 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2439 struct net *net = read_pnet(&mrt->net);
2440 struct nlmsghdr *nlh;
2441 struct rtgenmsg *rtgenm;
2442 struct mrt6msg *msg;
2443 struct sk_buff *skb;
2447 payloadlen = pkt->len - sizeof(struct mrt6msg);
2448 msg = (struct mrt6msg *)skb_transport_header(pkt);
2450 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2454 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2455 sizeof(struct rtgenmsg), 0);
2458 rtgenm = nlmsg_data(nlh);
2459 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2460 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2461 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2462 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2464 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2466 goto nla_put_failure;
2468 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2469 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2470 nla_data(nla), payloadlen))
2471 goto nla_put_failure;
2473 nlmsg_end(skb, nlh);
2475 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2479 nlmsg_cancel(skb, nlh);
2482 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2485 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2487 const struct nlmsghdr *nlh = cb->nlh;
2488 struct fib_dump_filter filter = {};
2491 if (cb->strict_check) {
2492 err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
2498 if (filter.table_id) {
2499 struct mr_table *mrt;
2501 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
2503 if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
2506 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
2509 err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
2510 &mfc_unres_lock, &filter);
2511 return skb->len ? : err;
2514 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2515 _ip6mr_fill_mroute, &mfc_unres_lock, &filter);