2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
58 struct list_head list;
61 struct sock *mroute6_sk;
62 struct timer_list ipmr_expire_timer;
63 struct list_head mfc6_unres_queue;
64 struct list_head mfc6_cache_array[MFC6_LINES];
65 struct mif_device vif6_table[MAXMIFS];
67 atomic_t cache_resolve_queue_len;
68 bool mroute_do_assert;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 int mroute_reg_vif_num;
75 #include <linux/nospec.h>
78 struct fib_rule common;
82 struct mr6_table *mrt;
85 /* Big lock, protecting vif table, mrt cache and mroute socket state.
86 Note that the changes are semaphored via rtnl_lock.
89 static DEFINE_RWLOCK(mrt_lock);
92 * Multicast router control variables
95 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
97 /* Special spinlock for queue of unresolved entries */
98 static DEFINE_SPINLOCK(mfc_unres_lock);
100 /* We return to original Alan's scheme. Hash table of resolved
101 entries is changed only in process context and protected
102 with weak lock mrt_lock. Queue of unresolved entries is protected
103 with strong spinlock mfc_unres_lock.
105 In this case data path is free of exclusive locks at all.
108 static struct kmem_cache *mrt_cachep __read_mostly;
110 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
111 static void ip6mr_free_table(struct mr6_table *mrt);
113 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
114 struct sk_buff *skb, struct mfc6_cache *cache);
115 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
116 mifi_t mifi, int assert);
117 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
118 struct mfc6_cache *c, struct rtmsg *rtm);
119 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
121 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
122 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
123 struct netlink_callback *cb);
124 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
125 static void ipmr_expire_process(unsigned long arg);
127 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
128 #define ip6mr_for_each_table(mrt, net) \
129 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
131 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
133 struct mr6_table *mrt;
135 ip6mr_for_each_table(mrt, net) {
142 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
143 struct mr6_table **mrt)
146 struct ip6mr_result res;
147 struct fib_lookup_arg arg = {
149 .flags = FIB_LOOKUP_NOREF,
152 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
153 flowi6_to_flowi(flp6), 0, &arg);
160 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
161 int flags, struct fib_lookup_arg *arg)
163 struct ip6mr_result *res = arg->result;
164 struct mr6_table *mrt;
166 switch (rule->action) {
169 case FR_ACT_UNREACHABLE:
171 case FR_ACT_PROHIBIT:
173 case FR_ACT_BLACKHOLE:
178 mrt = ip6mr_get_table(rule->fr_net, rule->table);
185 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
190 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
194 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
195 struct fib_rule_hdr *frh, struct nlattr **tb)
200 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
206 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
207 struct fib_rule_hdr *frh)
215 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
216 .family = RTNL_FAMILY_IP6MR,
217 .rule_size = sizeof(struct ip6mr_rule),
218 .addr_size = sizeof(struct in6_addr),
219 .action = ip6mr_rule_action,
220 .match = ip6mr_rule_match,
221 .configure = ip6mr_rule_configure,
222 .compare = ip6mr_rule_compare,
223 .fill = ip6mr_rule_fill,
224 .nlgroup = RTNLGRP_IPV6_RULE,
225 .policy = ip6mr_rule_policy,
226 .owner = THIS_MODULE,
229 static int __net_init ip6mr_rules_init(struct net *net)
231 struct fib_rules_ops *ops;
232 struct mr6_table *mrt;
235 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
239 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
241 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
247 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
251 net->ipv6.mr6_rules_ops = ops;
256 ip6mr_free_table(mrt);
259 fib_rules_unregister(ops);
263 static void __net_exit ip6mr_rules_exit(struct net *net)
265 struct mr6_table *mrt, *next;
268 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
269 list_del(&mrt->list);
270 ip6mr_free_table(mrt);
272 fib_rules_unregister(net->ipv6.mr6_rules_ops);
276 #define ip6mr_for_each_table(mrt, net) \
277 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
279 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
281 return net->ipv6.mrt6;
284 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
285 struct mr6_table **mrt)
287 *mrt = net->ipv6.mrt6;
291 static int __net_init ip6mr_rules_init(struct net *net)
293 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
294 return net->ipv6.mrt6 ? 0 : -ENOMEM;
297 static void __net_exit ip6mr_rules_exit(struct net *net)
300 ip6mr_free_table(net->ipv6.mrt6);
301 net->ipv6.mrt6 = NULL;
306 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
308 struct mr6_table *mrt;
311 mrt = ip6mr_get_table(net, id);
315 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
319 write_pnet(&mrt->net, net);
321 /* Forwarding cache */
322 for (i = 0; i < MFC6_LINES; i++)
323 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
325 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
327 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
330 #ifdef CONFIG_IPV6_PIMSM_V2
331 mrt->mroute_reg_vif_num = -1;
333 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
334 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
339 static void ip6mr_free_table(struct mr6_table *mrt)
341 del_timer_sync(&mrt->ipmr_expire_timer);
342 mroute_clean_tables(mrt, true);
346 #ifdef CONFIG_PROC_FS
348 struct ipmr_mfc_iter {
349 struct seq_net_private p;
350 struct mr6_table *mrt;
351 struct list_head *cache;
356 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
357 struct ipmr_mfc_iter *it, loff_t pos)
359 struct mr6_table *mrt = it->mrt;
360 struct mfc6_cache *mfc;
362 read_lock(&mrt_lock);
363 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
364 it->cache = &mrt->mfc6_cache_array[it->ct];
365 list_for_each_entry(mfc, it->cache, list)
369 read_unlock(&mrt_lock);
371 spin_lock_bh(&mfc_unres_lock);
372 it->cache = &mrt->mfc6_unres_queue;
373 list_for_each_entry(mfc, it->cache, list)
376 spin_unlock_bh(&mfc_unres_lock);
383 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
386 struct ipmr_vif_iter {
387 struct seq_net_private p;
388 struct mr6_table *mrt;
392 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
393 struct ipmr_vif_iter *iter,
396 struct mr6_table *mrt = iter->mrt;
398 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
399 if (!MIF_EXISTS(mrt, iter->ct))
402 return &mrt->vif6_table[iter->ct];
407 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
410 struct ipmr_vif_iter *iter = seq->private;
411 struct net *net = seq_file_net(seq);
412 struct mr6_table *mrt;
414 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
416 return ERR_PTR(-ENOENT);
420 read_lock(&mrt_lock);
421 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
425 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
427 struct ipmr_vif_iter *iter = seq->private;
428 struct net *net = seq_file_net(seq);
429 struct mr6_table *mrt = iter->mrt;
432 if (v == SEQ_START_TOKEN)
433 return ip6mr_vif_seq_idx(net, iter, 0);
435 while (++iter->ct < mrt->maxvif) {
436 if (!MIF_EXISTS(mrt, iter->ct))
438 return &mrt->vif6_table[iter->ct];
443 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
446 read_unlock(&mrt_lock);
449 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
451 struct ipmr_vif_iter *iter = seq->private;
452 struct mr6_table *mrt = iter->mrt;
454 if (v == SEQ_START_TOKEN) {
456 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
458 const struct mif_device *vif = v;
459 const char *name = vif->dev ? vif->dev->name : "none";
462 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
463 vif - mrt->vif6_table,
464 name, vif->bytes_in, vif->pkt_in,
465 vif->bytes_out, vif->pkt_out,
471 static const struct seq_operations ip6mr_vif_seq_ops = {
472 .start = ip6mr_vif_seq_start,
473 .next = ip6mr_vif_seq_next,
474 .stop = ip6mr_vif_seq_stop,
475 .show = ip6mr_vif_seq_show,
478 static int ip6mr_vif_open(struct inode *inode, struct file *file)
480 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
481 sizeof(struct ipmr_vif_iter));
484 static const struct file_operations ip6mr_vif_fops = {
485 .owner = THIS_MODULE,
486 .open = ip6mr_vif_open,
489 .release = seq_release_net,
492 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
494 struct ipmr_mfc_iter *it = seq->private;
495 struct net *net = seq_file_net(seq);
496 struct mr6_table *mrt;
498 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
500 return ERR_PTR(-ENOENT);
504 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
508 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
510 struct mfc6_cache *mfc = v;
511 struct ipmr_mfc_iter *it = seq->private;
512 struct net *net = seq_file_net(seq);
513 struct mr6_table *mrt = it->mrt;
517 if (v == SEQ_START_TOKEN)
518 return ipmr_mfc_seq_idx(net, seq->private, 0);
520 if (mfc->list.next != it->cache)
521 return list_entry(mfc->list.next, struct mfc6_cache, list);
523 if (it->cache == &mrt->mfc6_unres_queue)
526 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
528 while (++it->ct < MFC6_LINES) {
529 it->cache = &mrt->mfc6_cache_array[it->ct];
530 if (list_empty(it->cache))
532 return list_first_entry(it->cache, struct mfc6_cache, list);
535 /* exhausted cache_array, show unresolved */
536 read_unlock(&mrt_lock);
537 it->cache = &mrt->mfc6_unres_queue;
540 spin_lock_bh(&mfc_unres_lock);
541 if (!list_empty(it->cache))
542 return list_first_entry(it->cache, struct mfc6_cache, list);
545 spin_unlock_bh(&mfc_unres_lock);
551 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
553 struct ipmr_mfc_iter *it = seq->private;
554 struct mr6_table *mrt = it->mrt;
556 if (it->cache == &mrt->mfc6_unres_queue)
557 spin_unlock_bh(&mfc_unres_lock);
558 else if (it->cache == &mrt->mfc6_cache_array[it->ct])
559 read_unlock(&mrt_lock);
562 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
566 if (v == SEQ_START_TOKEN) {
570 "Iif Pkts Bytes Wrong Oifs\n");
572 const struct mfc6_cache *mfc = v;
573 const struct ipmr_mfc_iter *it = seq->private;
574 struct mr6_table *mrt = it->mrt;
576 seq_printf(seq, "%pI6 %pI6 %-3hd",
577 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
580 if (it->cache != &mrt->mfc6_unres_queue) {
581 seq_printf(seq, " %8lu %8lu %8lu",
583 mfc->mfc_un.res.bytes,
584 mfc->mfc_un.res.wrong_if);
585 for (n = mfc->mfc_un.res.minvif;
586 n < mfc->mfc_un.res.maxvif; n++) {
587 if (MIF_EXISTS(mrt, n) &&
588 mfc->mfc_un.res.ttls[n] < 255)
591 n, mfc->mfc_un.res.ttls[n]);
594 /* unresolved mfc_caches don't contain
595 * pkt, bytes and wrong_if values
597 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
604 static const struct seq_operations ipmr_mfc_seq_ops = {
605 .start = ipmr_mfc_seq_start,
606 .next = ipmr_mfc_seq_next,
607 .stop = ipmr_mfc_seq_stop,
608 .show = ipmr_mfc_seq_show,
611 static int ipmr_mfc_open(struct inode *inode, struct file *file)
613 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
614 sizeof(struct ipmr_mfc_iter));
617 static const struct file_operations ip6mr_mfc_fops = {
618 .owner = THIS_MODULE,
619 .open = ipmr_mfc_open,
622 .release = seq_release_net,
626 #ifdef CONFIG_IPV6_PIMSM_V2
628 static int pim6_rcv(struct sk_buff *skb)
630 struct pimreghdr *pim;
631 struct ipv6hdr *encap;
632 struct net_device *reg_dev = NULL;
633 struct net *net = dev_net(skb->dev);
634 struct mr6_table *mrt;
635 struct flowi6 fl6 = {
636 .flowi6_iif = skb->dev->ifindex,
637 .flowi6_mark = skb->mark,
641 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
644 pim = (struct pimreghdr *)skb_transport_header(skb);
645 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
646 (pim->flags & PIM_NULL_REGISTER) ||
647 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
648 sizeof(*pim), IPPROTO_PIM,
649 csum_partial((void *)pim, sizeof(*pim), 0)) &&
650 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
653 /* check if the inner packet is destined to mcast group */
654 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
657 if (!ipv6_addr_is_multicast(&encap->daddr) ||
658 encap->payload_len == 0 ||
659 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
662 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
664 reg_vif_num = mrt->mroute_reg_vif_num;
666 read_lock(&mrt_lock);
667 if (reg_vif_num >= 0)
668 reg_dev = mrt->vif6_table[reg_vif_num].dev;
671 read_unlock(&mrt_lock);
676 skb->mac_header = skb->network_header;
677 skb_pull(skb, (u8 *)encap - skb->data);
678 skb_reset_network_header(skb);
679 skb->protocol = htons(ETH_P_IPV6);
680 skb->ip_summed = CHECKSUM_NONE;
682 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
693 static const struct inet6_protocol pim6_protocol = {
697 /* Service routines creating virtual interfaces: PIMREG */
699 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
700 struct net_device *dev)
702 struct net *net = dev_net(dev);
703 struct mr6_table *mrt;
704 struct flowi6 fl6 = {
705 .flowi6_oif = dev->ifindex,
706 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
707 .flowi6_mark = skb->mark,
711 err = ip6mr_fib_lookup(net, &fl6, &mrt);
717 read_lock(&mrt_lock);
718 dev->stats.tx_bytes += skb->len;
719 dev->stats.tx_packets++;
720 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
721 read_unlock(&mrt_lock);
726 static int reg_vif_get_iflink(const struct net_device *dev)
731 static const struct net_device_ops reg_vif_netdev_ops = {
732 .ndo_start_xmit = reg_vif_xmit,
733 .ndo_get_iflink = reg_vif_get_iflink,
736 static void reg_vif_setup(struct net_device *dev)
738 dev->type = ARPHRD_PIMREG;
739 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
740 dev->flags = IFF_NOARP;
741 dev->netdev_ops = ®_vif_netdev_ops;
742 dev->needs_free_netdev = true;
743 dev->features |= NETIF_F_NETNS_LOCAL;
746 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
748 struct net_device *dev;
751 if (mrt->id == RT6_TABLE_DFLT)
752 sprintf(name, "pim6reg");
754 sprintf(name, "pim6reg%u", mrt->id);
756 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
760 dev_net_set(dev, net);
762 if (register_netdevice(dev)) {
774 unregister_netdevice(dev);
783 static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
784 struct list_head *head)
786 struct mif_device *v;
787 struct net_device *dev;
788 struct inet6_dev *in6_dev;
790 if (vifi < 0 || vifi >= mrt->maxvif)
791 return -EADDRNOTAVAIL;
793 v = &mrt->vif6_table[vifi];
795 write_lock_bh(&mrt_lock);
800 write_unlock_bh(&mrt_lock);
801 return -EADDRNOTAVAIL;
804 #ifdef CONFIG_IPV6_PIMSM_V2
805 if (vifi == mrt->mroute_reg_vif_num)
806 mrt->mroute_reg_vif_num = -1;
809 if (vifi + 1 == mrt->maxvif) {
811 for (tmp = vifi - 1; tmp >= 0; tmp--) {
812 if (MIF_EXISTS(mrt, tmp))
815 mrt->maxvif = tmp + 1;
818 write_unlock_bh(&mrt_lock);
820 dev_set_allmulti(dev, -1);
822 in6_dev = __in6_dev_get(dev);
824 in6_dev->cnf.mc_forwarding--;
825 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
826 NETCONFA_MC_FORWARDING,
827 dev->ifindex, &in6_dev->cnf);
830 if ((v->flags & MIFF_REGISTER) && !notify)
831 unregister_netdevice_queue(dev, head);
837 static inline void ip6mr_cache_free(struct mfc6_cache *c)
839 kmem_cache_free(mrt_cachep, c);
842 /* Destroy an unresolved cache entry, killing queued skbs
843 and reporting error to netlink readers.
846 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
848 struct net *net = read_pnet(&mrt->net);
851 atomic_dec(&mrt->cache_resolve_queue_len);
853 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
854 if (ipv6_hdr(skb)->version == 0) {
855 struct nlmsghdr *nlh = skb_pull(skb,
856 sizeof(struct ipv6hdr));
857 nlh->nlmsg_type = NLMSG_ERROR;
858 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
859 skb_trim(skb, nlh->nlmsg_len);
860 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
861 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
870 /* Timer process for all the unresolved queue. */
872 static void ipmr_do_expire_process(struct mr6_table *mrt)
874 unsigned long now = jiffies;
875 unsigned long expires = 10 * HZ;
876 struct mfc6_cache *c, *next;
878 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
879 if (time_after(c->mfc_un.unres.expires, now)) {
881 unsigned long interval = c->mfc_un.unres.expires - now;
882 if (interval < expires)
888 mr6_netlink_event(mrt, c, RTM_DELROUTE);
889 ip6mr_destroy_unres(mrt, c);
892 if (!list_empty(&mrt->mfc6_unres_queue))
893 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
896 static void ipmr_expire_process(unsigned long arg)
898 struct mr6_table *mrt = (struct mr6_table *)arg;
900 if (!spin_trylock(&mfc_unres_lock)) {
901 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
905 if (!list_empty(&mrt->mfc6_unres_queue))
906 ipmr_do_expire_process(mrt);
908 spin_unlock(&mfc_unres_lock);
911 /* Fill oifs list. It is called under write locked mrt_lock. */
913 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
918 cache->mfc_un.res.minvif = MAXMIFS;
919 cache->mfc_un.res.maxvif = 0;
920 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
922 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
923 if (MIF_EXISTS(mrt, vifi) &&
924 ttls[vifi] && ttls[vifi] < 255) {
925 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
926 if (cache->mfc_un.res.minvif > vifi)
927 cache->mfc_un.res.minvif = vifi;
928 if (cache->mfc_un.res.maxvif <= vifi)
929 cache->mfc_un.res.maxvif = vifi + 1;
932 cache->mfc_un.res.lastuse = jiffies;
935 static int mif6_add(struct net *net, struct mr6_table *mrt,
936 struct mif6ctl *vifc, int mrtsock)
938 int vifi = vifc->mif6c_mifi;
939 struct mif_device *v = &mrt->vif6_table[vifi];
940 struct net_device *dev;
941 struct inet6_dev *in6_dev;
945 if (MIF_EXISTS(mrt, vifi))
948 switch (vifc->mif6c_flags) {
949 #ifdef CONFIG_IPV6_PIMSM_V2
952 * Special Purpose VIF in PIM
953 * All the packets will be sent to the daemon
955 if (mrt->mroute_reg_vif_num >= 0)
957 dev = ip6mr_reg_vif(net, mrt);
960 err = dev_set_allmulti(dev, 1);
962 unregister_netdevice(dev);
969 dev = dev_get_by_index(net, vifc->mif6c_pifi);
971 return -EADDRNOTAVAIL;
972 err = dev_set_allmulti(dev, 1);
982 in6_dev = __in6_dev_get(dev);
984 in6_dev->cnf.mc_forwarding++;
985 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
986 NETCONFA_MC_FORWARDING,
987 dev->ifindex, &in6_dev->cnf);
991 * Fill in the VIF structures
993 v->rate_limit = vifc->vifc_rate_limit;
994 v->flags = vifc->mif6c_flags;
996 v->flags |= VIFF_STATIC;
997 v->threshold = vifc->vifc_threshold;
1002 v->link = dev->ifindex;
1003 if (v->flags & MIFF_REGISTER)
1004 v->link = dev_get_iflink(dev);
1006 /* And finish update writing critical data */
1007 write_lock_bh(&mrt_lock);
1009 #ifdef CONFIG_IPV6_PIMSM_V2
1010 if (v->flags & MIFF_REGISTER)
1011 mrt->mroute_reg_vif_num = vifi;
1013 if (vifi + 1 > mrt->maxvif)
1014 mrt->maxvif = vifi + 1;
1015 write_unlock_bh(&mrt_lock);
1019 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1020 const struct in6_addr *origin,
1021 const struct in6_addr *mcastgrp)
1023 int line = MFC6_HASH(mcastgrp, origin);
1024 struct mfc6_cache *c;
1026 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1027 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1028 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1034 /* Look for a (*,*,oif) entry */
1035 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1038 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1039 struct mfc6_cache *c;
1041 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1042 if (ipv6_addr_any(&c->mf6c_origin) &&
1043 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1044 (c->mfc_un.res.ttls[mifi] < 255))
1050 /* Look for a (*,G) entry */
1051 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1052 struct in6_addr *mcastgrp,
1055 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1056 struct mfc6_cache *c, *proxy;
1058 if (ipv6_addr_any(mcastgrp))
1061 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1062 if (ipv6_addr_any(&c->mf6c_origin) &&
1063 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1064 if (c->mfc_un.res.ttls[mifi] < 255)
1067 /* It's ok if the mifi is part of the static tree */
1068 proxy = ip6mr_cache_find_any_parent(mrt,
1070 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1075 return ip6mr_cache_find_any_parent(mrt, mifi);
1079 * Allocate a multicast cache entry
1081 static struct mfc6_cache *ip6mr_cache_alloc(void)
1083 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1086 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1087 c->mfc_un.res.minvif = MAXMIFS;
1091 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1093 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1096 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1097 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1102 * A cache entry has gone into a resolved state from queued
1105 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1106 struct mfc6_cache *uc, struct mfc6_cache *c)
1108 struct sk_buff *skb;
1111 * Play the pending entries through our router
1114 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1115 if (ipv6_hdr(skb)->version == 0) {
1116 struct nlmsghdr *nlh = skb_pull(skb,
1117 sizeof(struct ipv6hdr));
1119 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1120 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1122 nlh->nlmsg_type = NLMSG_ERROR;
1123 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1124 skb_trim(skb, nlh->nlmsg_len);
1125 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1127 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1129 ip6_mr_forward(net, mrt, skb, c);
1134 * Bounce a cache query up to pim6sd and netlink.
1136 * Called under mrt_lock.
1139 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1140 mifi_t mifi, int assert)
1142 struct sk_buff *skb;
1143 struct mrt6msg *msg;
1146 #ifdef CONFIG_IPV6_PIMSM_V2
1147 if (assert == MRT6MSG_WHOLEPKT)
1148 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1152 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1157 /* I suppose that internal messages
1158 * do not require checksums */
1160 skb->ip_summed = CHECKSUM_UNNECESSARY;
1162 #ifdef CONFIG_IPV6_PIMSM_V2
1163 if (assert == MRT6MSG_WHOLEPKT) {
1164 /* Ugly, but we have no choice with this interface.
1165 Duplicate old header, fix length etc.
1166 And all this only to mangle msg->im6_msgtype and
1167 to set msg->im6_mbz to "mbz" :-)
1169 __skb_pull(skb, skb_network_offset(pkt));
1171 skb_push(skb, sizeof(*msg));
1172 skb_reset_transport_header(skb);
1173 msg = (struct mrt6msg *)skb_transport_header(skb);
1175 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1176 msg->im6_mif = mrt->mroute_reg_vif_num;
1178 msg->im6_src = ipv6_hdr(pkt)->saddr;
1179 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1181 skb->ip_summed = CHECKSUM_UNNECESSARY;
1186 * Copy the IP header
1189 skb_put(skb, sizeof(struct ipv6hdr));
1190 skb_reset_network_header(skb);
1191 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1196 skb_put(skb, sizeof(*msg));
1197 skb_reset_transport_header(skb);
1198 msg = (struct mrt6msg *)skb_transport_header(skb);
1201 msg->im6_msgtype = assert;
1202 msg->im6_mif = mifi;
1204 msg->im6_src = ipv6_hdr(pkt)->saddr;
1205 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1207 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1208 skb->ip_summed = CHECKSUM_UNNECESSARY;
1211 if (!mrt->mroute6_sk) {
1216 mrt6msg_netlink_event(mrt, skb);
1219 * Deliver to user space multicast routing algorithms
1221 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1223 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1231 * Queue a packet for resolution. It gets locked cache entry!
1235 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1239 struct mfc6_cache *c;
1241 spin_lock_bh(&mfc_unres_lock);
1242 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1243 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1244 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1252 * Create a new entry if allowable
1255 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1256 (c = ip6mr_cache_alloc_unres()) == NULL) {
1257 spin_unlock_bh(&mfc_unres_lock);
1264 * Fill in the new cache entry
1266 c->mf6c_parent = -1;
1267 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1268 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1271 * Reflect first query at pim6sd
1273 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1275 /* If the report failed throw the cache entry
1278 spin_unlock_bh(&mfc_unres_lock);
1280 ip6mr_cache_free(c);
1285 atomic_inc(&mrt->cache_resolve_queue_len);
1286 list_add(&c->list, &mrt->mfc6_unres_queue);
1287 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1289 ipmr_do_expire_process(mrt);
1293 * See if we can append the packet
1295 if (c->mfc_un.unres.unresolved.qlen > 3) {
1299 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1303 spin_unlock_bh(&mfc_unres_lock);
1308 * MFC6 cache manipulation by user space
1311 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1315 struct mfc6_cache *c, *next;
1317 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1319 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1320 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1321 ipv6_addr_equal(&c->mf6c_mcastgrp,
1322 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1323 (parent == -1 || parent == c->mf6c_parent)) {
1324 write_lock_bh(&mrt_lock);
1326 write_unlock_bh(&mrt_lock);
1328 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1329 ip6mr_cache_free(c);
1336 static int ip6mr_device_event(struct notifier_block *this,
1337 unsigned long event, void *ptr)
1339 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1340 struct net *net = dev_net(dev);
1341 struct mr6_table *mrt;
1342 struct mif_device *v;
1345 if (event != NETDEV_UNREGISTER)
1348 ip6mr_for_each_table(mrt, net) {
1349 v = &mrt->vif6_table[0];
1350 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1352 mif6_delete(mrt, ct, 1, NULL);
1359 static struct notifier_block ip6_mr_notifier = {
1360 .notifier_call = ip6mr_device_event
1364 * Setup for IP multicast routing
1367 static int __net_init ip6mr_net_init(struct net *net)
1371 err = ip6mr_rules_init(net);
1375 #ifdef CONFIG_PROC_FS
1377 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1379 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1380 goto proc_cache_fail;
1385 #ifdef CONFIG_PROC_FS
1387 remove_proc_entry("ip6_mr_vif", net->proc_net);
1389 ip6mr_rules_exit(net);
1395 static void __net_exit ip6mr_net_exit(struct net *net)
1397 #ifdef CONFIG_PROC_FS
1398 remove_proc_entry("ip6_mr_cache", net->proc_net);
1399 remove_proc_entry("ip6_mr_vif", net->proc_net);
1401 ip6mr_rules_exit(net);
1404 static struct pernet_operations ip6mr_net_ops = {
1405 .init = ip6mr_net_init,
1406 .exit = ip6mr_net_exit,
1409 int __init ip6_mr_init(void)
1413 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1414 sizeof(struct mfc6_cache),
1415 0, SLAB_HWCACHE_ALIGN,
1420 err = register_pernet_subsys(&ip6mr_net_ops);
1422 goto reg_pernet_fail;
1424 err = register_netdevice_notifier(&ip6_mr_notifier);
1426 goto reg_notif_fail;
1427 #ifdef CONFIG_IPV6_PIMSM_V2
1428 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1429 pr_err("%s: can't add PIM protocol\n", __func__);
1431 goto add_proto_fail;
1434 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1435 ip6mr_rtm_dumproute, 0);
1437 #ifdef CONFIG_IPV6_PIMSM_V2
1439 unregister_netdevice_notifier(&ip6_mr_notifier);
1442 unregister_pernet_subsys(&ip6mr_net_ops);
1444 kmem_cache_destroy(mrt_cachep);
1448 void ip6_mr_cleanup(void)
1450 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1451 #ifdef CONFIG_IPV6_PIMSM_V2
1452 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1454 unregister_netdevice_notifier(&ip6_mr_notifier);
1455 unregister_pernet_subsys(&ip6mr_net_ops);
1456 kmem_cache_destroy(mrt_cachep);
1459 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1460 struct mf6cctl *mfc, int mrtsock, int parent)
1464 struct mfc6_cache *uc, *c;
1465 unsigned char ttls[MAXMIFS];
1468 if (mfc->mf6cc_parent >= MAXMIFS)
1471 memset(ttls, 255, MAXMIFS);
1472 for (i = 0; i < MAXMIFS; i++) {
1473 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1478 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1480 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1481 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1482 ipv6_addr_equal(&c->mf6c_mcastgrp,
1483 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1484 (parent == -1 || parent == mfc->mf6cc_parent)) {
1491 write_lock_bh(&mrt_lock);
1492 c->mf6c_parent = mfc->mf6cc_parent;
1493 ip6mr_update_thresholds(mrt, c, ttls);
1495 c->mfc_flags |= MFC_STATIC;
1496 write_unlock_bh(&mrt_lock);
1497 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1501 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1502 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1505 c = ip6mr_cache_alloc();
1509 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1510 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1511 c->mf6c_parent = mfc->mf6cc_parent;
1512 ip6mr_update_thresholds(mrt, c, ttls);
1514 c->mfc_flags |= MFC_STATIC;
1516 write_lock_bh(&mrt_lock);
1517 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1518 write_unlock_bh(&mrt_lock);
1521 * Check to see if we resolved a queued list. If so we
1522 * need to send on the frames and tidy up.
1525 spin_lock_bh(&mfc_unres_lock);
1526 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1527 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1528 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1529 list_del(&uc->list);
1530 atomic_dec(&mrt->cache_resolve_queue_len);
1535 if (list_empty(&mrt->mfc6_unres_queue))
1536 del_timer(&mrt->ipmr_expire_timer);
1537 spin_unlock_bh(&mfc_unres_lock);
1540 ip6mr_cache_resolve(net, mrt, uc, c);
1541 ip6mr_cache_free(uc);
1543 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1548 * Close the multicast socket, and clear the vif tables etc
1551 static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1555 struct mfc6_cache *c, *next;
1558 * Shut down all active vif entries
1560 for (i = 0; i < mrt->maxvif; i++) {
1561 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1563 mif6_delete(mrt, i, 0, &list);
1565 unregister_netdevice_many(&list);
1570 for (i = 0; i < MFC6_LINES; i++) {
1571 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1572 if (!all && (c->mfc_flags & MFC_STATIC))
1574 write_lock_bh(&mrt_lock);
1576 write_unlock_bh(&mrt_lock);
1578 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1579 ip6mr_cache_free(c);
1583 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1584 spin_lock_bh(&mfc_unres_lock);
1585 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1587 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1588 ip6mr_destroy_unres(mrt, c);
1590 spin_unlock_bh(&mfc_unres_lock);
1594 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1597 struct net *net = sock_net(sk);
1600 write_lock_bh(&mrt_lock);
1601 if (likely(mrt->mroute6_sk == NULL)) {
1602 mrt->mroute6_sk = sk;
1603 net->ipv6.devconf_all->mc_forwarding++;
1607 write_unlock_bh(&mrt_lock);
1610 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1611 NETCONFA_MC_FORWARDING,
1612 NETCONFA_IFINDEX_ALL,
1613 net->ipv6.devconf_all);
1619 int ip6mr_sk_done(struct sock *sk)
1622 struct net *net = sock_net(sk);
1623 struct mr6_table *mrt;
1626 ip6mr_for_each_table(mrt, net) {
1627 if (sk == mrt->mroute6_sk) {
1628 write_lock_bh(&mrt_lock);
1629 mrt->mroute6_sk = NULL;
1630 net->ipv6.devconf_all->mc_forwarding--;
1631 write_unlock_bh(&mrt_lock);
1632 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1633 NETCONFA_MC_FORWARDING,
1634 NETCONFA_IFINDEX_ALL,
1635 net->ipv6.devconf_all);
1637 mroute_clean_tables(mrt, false);
1647 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1649 struct mr6_table *mrt;
1650 struct flowi6 fl6 = {
1651 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1652 .flowi6_oif = skb->dev->ifindex,
1653 .flowi6_mark = skb->mark,
1656 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1659 return mrt->mroute6_sk;
1663 * Socket options and virtual interface manipulation. The whole
1664 * virtual interface system is a complete heap, but unfortunately
1665 * that's how BSD mrouted happens to think. Maybe one day with a proper
1666 * MOSPF/PIM router set up we can clean this up.
1669 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1671 int ret, parent = 0;
1675 struct net *net = sock_net(sk);
1676 struct mr6_table *mrt;
1678 if (sk->sk_type != SOCK_RAW ||
1679 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1682 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1686 if (optname != MRT6_INIT) {
1687 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1693 if (optlen < sizeof(int))
1696 return ip6mr_sk_init(mrt, sk);
1699 return ip6mr_sk_done(sk);
1702 if (optlen < sizeof(vif))
1704 if (copy_from_user(&vif, optval, sizeof(vif)))
1706 if (vif.mif6c_mifi >= MAXMIFS)
1709 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1714 if (optlen < sizeof(mifi_t))
1716 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1719 ret = mif6_delete(mrt, mifi, 0, NULL);
1724 * Manipulate the forwarding caches. These live
1725 * in a sort of kernel/user symbiosis.
1730 case MRT6_ADD_MFC_PROXY:
1731 case MRT6_DEL_MFC_PROXY:
1732 if (optlen < sizeof(mfc))
1734 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1737 parent = mfc.mf6cc_parent;
1739 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1740 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1742 ret = ip6mr_mfc_add(net, mrt, &mfc,
1743 sk == mrt->mroute6_sk, parent);
1748 * Control PIM assert (to activate pim will activate assert)
1754 if (optlen != sizeof(v))
1756 if (get_user(v, (int __user *)optval))
1758 mrt->mroute_do_assert = v;
1762 #ifdef CONFIG_IPV6_PIMSM_V2
1767 if (optlen != sizeof(v))
1769 if (get_user(v, (int __user *)optval))
1774 if (v != mrt->mroute_do_pim) {
1775 mrt->mroute_do_pim = v;
1776 mrt->mroute_do_assert = v;
1783 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1788 if (optlen != sizeof(u32))
1790 if (get_user(v, (u32 __user *)optval))
1792 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1793 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1795 if (sk == mrt->mroute6_sk)
1800 if (!ip6mr_new_table(net, v))
1803 raw6_sk(sk)->ip6mr_table = v;
1809 * Spurious command, or MRT6_VERSION which you cannot
1813 return -ENOPROTOOPT;
1818 * Getsock opt support for the multicast routing system.
1821 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1826 struct net *net = sock_net(sk);
1827 struct mr6_table *mrt;
1829 if (sk->sk_type != SOCK_RAW ||
1830 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1833 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1841 #ifdef CONFIG_IPV6_PIMSM_V2
1843 val = mrt->mroute_do_pim;
1847 val = mrt->mroute_do_assert;
1850 return -ENOPROTOOPT;
1853 if (get_user(olr, optlen))
1856 olr = min_t(int, olr, sizeof(int));
1860 if (put_user(olr, optlen))
1862 if (copy_to_user(optval, &val, olr))
1868 * The IP multicast ioctl support routines.
1871 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1873 struct sioc_sg_req6 sr;
1874 struct sioc_mif_req6 vr;
1875 struct mif_device *vif;
1876 struct mfc6_cache *c;
1877 struct net *net = sock_net(sk);
1878 struct mr6_table *mrt;
1880 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1885 case SIOCGETMIFCNT_IN6:
1886 if (copy_from_user(&vr, arg, sizeof(vr)))
1888 if (vr.mifi >= mrt->maxvif)
1890 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1891 read_lock(&mrt_lock);
1892 vif = &mrt->vif6_table[vr.mifi];
1893 if (MIF_EXISTS(mrt, vr.mifi)) {
1894 vr.icount = vif->pkt_in;
1895 vr.ocount = vif->pkt_out;
1896 vr.ibytes = vif->bytes_in;
1897 vr.obytes = vif->bytes_out;
1898 read_unlock(&mrt_lock);
1900 if (copy_to_user(arg, &vr, sizeof(vr)))
1904 read_unlock(&mrt_lock);
1905 return -EADDRNOTAVAIL;
1906 case SIOCGETSGCNT_IN6:
1907 if (copy_from_user(&sr, arg, sizeof(sr)))
1910 read_lock(&mrt_lock);
1911 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1913 sr.pktcnt = c->mfc_un.res.pkt;
1914 sr.bytecnt = c->mfc_un.res.bytes;
1915 sr.wrong_if = c->mfc_un.res.wrong_if;
1916 read_unlock(&mrt_lock);
1918 if (copy_to_user(arg, &sr, sizeof(sr)))
1922 read_unlock(&mrt_lock);
1923 return -EADDRNOTAVAIL;
1925 return -ENOIOCTLCMD;
1929 #ifdef CONFIG_COMPAT
1930 struct compat_sioc_sg_req6 {
1931 struct sockaddr_in6 src;
1932 struct sockaddr_in6 grp;
1933 compat_ulong_t pktcnt;
1934 compat_ulong_t bytecnt;
1935 compat_ulong_t wrong_if;
1938 struct compat_sioc_mif_req6 {
1940 compat_ulong_t icount;
1941 compat_ulong_t ocount;
1942 compat_ulong_t ibytes;
1943 compat_ulong_t obytes;
1946 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1948 struct compat_sioc_sg_req6 sr;
1949 struct compat_sioc_mif_req6 vr;
1950 struct mif_device *vif;
1951 struct mfc6_cache *c;
1952 struct net *net = sock_net(sk);
1953 struct mr6_table *mrt;
1955 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1960 case SIOCGETMIFCNT_IN6:
1961 if (copy_from_user(&vr, arg, sizeof(vr)))
1963 if (vr.mifi >= mrt->maxvif)
1965 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1966 read_lock(&mrt_lock);
1967 vif = &mrt->vif6_table[vr.mifi];
1968 if (MIF_EXISTS(mrt, vr.mifi)) {
1969 vr.icount = vif->pkt_in;
1970 vr.ocount = vif->pkt_out;
1971 vr.ibytes = vif->bytes_in;
1972 vr.obytes = vif->bytes_out;
1973 read_unlock(&mrt_lock);
1975 if (copy_to_user(arg, &vr, sizeof(vr)))
1979 read_unlock(&mrt_lock);
1980 return -EADDRNOTAVAIL;
1981 case SIOCGETSGCNT_IN6:
1982 if (copy_from_user(&sr, arg, sizeof(sr)))
1985 read_lock(&mrt_lock);
1986 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1988 sr.pktcnt = c->mfc_un.res.pkt;
1989 sr.bytecnt = c->mfc_un.res.bytes;
1990 sr.wrong_if = c->mfc_un.res.wrong_if;
1991 read_unlock(&mrt_lock);
1993 if (copy_to_user(arg, &sr, sizeof(sr)))
1997 read_unlock(&mrt_lock);
1998 return -EADDRNOTAVAIL;
2000 return -ENOIOCTLCMD;
2005 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2007 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2008 IPSTATS_MIB_OUTFORWDATAGRAMS);
2009 IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
2010 IPSTATS_MIB_OUTOCTETS, skb->len);
2011 return dst_output(net, sk, skb);
2015 * Processing handlers for ip6mr_forward
2018 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
2019 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
2021 struct ipv6hdr *ipv6h;
2022 struct mif_device *vif = &mrt->vif6_table[vifi];
2023 struct net_device *dev;
2024 struct dst_entry *dst;
2030 #ifdef CONFIG_IPV6_PIMSM_V2
2031 if (vif->flags & MIFF_REGISTER) {
2033 vif->bytes_out += skb->len;
2034 vif->dev->stats.tx_bytes += skb->len;
2035 vif->dev->stats.tx_packets++;
2036 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2041 ipv6h = ipv6_hdr(skb);
2043 fl6 = (struct flowi6) {
2044 .flowi6_oif = vif->link,
2045 .daddr = ipv6h->daddr,
2048 dst = ip6_route_output(net, NULL, &fl6);
2055 skb_dst_set(skb, dst);
2058 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2059 * not only before forwarding, but after forwarding on all output
2060 * interfaces. It is clear, if mrouter runs a multicasting
2061 * program, it should receive packets not depending to what interface
2062 * program is joined.
2063 * If we will not make it, the program will have to join on all
2064 * interfaces. On the other hand, multihoming host (or router, but
2065 * not mrouter) cannot join to more than one interface - it will
2066 * result in receiving multiple packets.
2071 vif->bytes_out += skb->len;
2073 /* We are about to write */
2074 /* XXX: extension headers? */
2075 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2078 ipv6h = ipv6_hdr(skb);
2081 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2083 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2084 net, NULL, skb, skb->dev, dev,
2085 ip6mr_forward2_finish);
2092 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2096 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2097 if (mrt->vif6_table[ct].dev == dev)
2103 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2104 struct sk_buff *skb, struct mfc6_cache *cache)
2108 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2110 vif = cache->mf6c_parent;
2111 cache->mfc_un.res.pkt++;
2112 cache->mfc_un.res.bytes += skb->len;
2113 cache->mfc_un.res.lastuse = jiffies;
2115 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2116 struct mfc6_cache *cache_proxy;
2118 /* For an (*,G) entry, we only check that the incoming
2119 * interface is part of the static tree.
2121 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2123 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2128 * Wrong interface: drop packet and (maybe) send PIM assert.
2130 if (mrt->vif6_table[vif].dev != skb->dev) {
2131 cache->mfc_un.res.wrong_if++;
2133 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2134 /* pimsm uses asserts, when switching from RPT to SPT,
2135 so that we cannot check that packet arrived on an oif.
2136 It is bad, but otherwise we would need to move pretty
2137 large chunk of pimd to kernel. Ough... --ANK
2139 (mrt->mroute_do_pim ||
2140 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2142 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2143 cache->mfc_un.res.last_assert = jiffies;
2144 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2150 mrt->vif6_table[vif].pkt_in++;
2151 mrt->vif6_table[vif].bytes_in += skb->len;
2156 if (ipv6_addr_any(&cache->mf6c_origin) &&
2157 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2158 if (true_vifi >= 0 &&
2159 true_vifi != cache->mf6c_parent &&
2160 ipv6_hdr(skb)->hop_limit >
2161 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2162 /* It's an (*,*) entry and the packet is not coming from
2163 * the upstream: forward the packet to the upstream
2166 psend = cache->mf6c_parent;
2171 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2172 /* For (*,G) entry, don't forward to the incoming interface */
2173 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2174 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2176 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2178 ip6mr_forward2(net, mrt, skb2, cache, psend);
2185 ip6mr_forward2(net, mrt, skb, cache, psend);
2195 * Multicast packets for forwarding arrive here
2198 int ip6_mr_input(struct sk_buff *skb)
2200 struct mfc6_cache *cache;
2201 struct net *net = dev_net(skb->dev);
2202 struct mr6_table *mrt;
2203 struct flowi6 fl6 = {
2204 .flowi6_iif = skb->dev->ifindex,
2205 .flowi6_mark = skb->mark,
2209 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2215 read_lock(&mrt_lock);
2216 cache = ip6mr_cache_find(mrt,
2217 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2219 int vif = ip6mr_find_vif(mrt, skb->dev);
2222 cache = ip6mr_cache_find_any(mrt,
2223 &ipv6_hdr(skb)->daddr,
2228 * No usable cache entry
2233 vif = ip6mr_find_vif(mrt, skb->dev);
2235 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2236 read_unlock(&mrt_lock);
2240 read_unlock(&mrt_lock);
2245 ip6_mr_forward(net, mrt, skb, cache);
2247 read_unlock(&mrt_lock);
2253 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2254 struct mfc6_cache *c, struct rtmsg *rtm)
2256 struct rta_mfc_stats mfcs;
2257 struct nlattr *mp_attr;
2258 struct rtnexthop *nhp;
2259 unsigned long lastuse;
2262 /* If cache is unresolved, don't try to parse IIF and OIF */
2263 if (c->mf6c_parent >= MAXMIFS) {
2264 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2268 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2269 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2271 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2275 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2276 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2277 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2279 nla_nest_cancel(skb, mp_attr);
2283 nhp->rtnh_flags = 0;
2284 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2285 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2286 nhp->rtnh_len = sizeof(*nhp);
2290 nla_nest_end(skb, mp_attr);
2292 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2293 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2295 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2296 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2297 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2298 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2299 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2303 rtm->rtm_type = RTN_MULTICAST;
2307 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2311 struct mr6_table *mrt;
2312 struct mfc6_cache *cache;
2313 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2315 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2319 read_lock(&mrt_lock);
2320 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2321 if (!cache && skb->dev) {
2322 int vif = ip6mr_find_vif(mrt, skb->dev);
2325 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2330 struct sk_buff *skb2;
2331 struct ipv6hdr *iph;
2332 struct net_device *dev;
2336 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2337 read_unlock(&mrt_lock);
2341 /* really correct? */
2342 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2344 read_unlock(&mrt_lock);
2348 NETLINK_CB(skb2).portid = portid;
2349 skb_reset_transport_header(skb2);
2351 skb_put(skb2, sizeof(struct ipv6hdr));
2352 skb_reset_network_header(skb2);
2354 iph = ipv6_hdr(skb2);
2357 iph->flow_lbl[0] = 0;
2358 iph->flow_lbl[1] = 0;
2359 iph->flow_lbl[2] = 0;
2360 iph->payload_len = 0;
2361 iph->nexthdr = IPPROTO_NONE;
2363 iph->saddr = rt->rt6i_src.addr;
2364 iph->daddr = rt->rt6i_dst.addr;
2366 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2367 read_unlock(&mrt_lock);
2372 if (rtm->rtm_flags & RTM_F_NOTIFY)
2373 cache->mfc_flags |= MFC_NOTIFY;
2375 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2376 read_unlock(&mrt_lock);
2380 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2381 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2384 struct nlmsghdr *nlh;
2388 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2392 rtm = nlmsg_data(nlh);
2393 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2394 rtm->rtm_dst_len = 128;
2395 rtm->rtm_src_len = 128;
2397 rtm->rtm_table = mrt->id;
2398 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2399 goto nla_put_failure;
2400 rtm->rtm_type = RTN_MULTICAST;
2401 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2402 if (c->mfc_flags & MFC_STATIC)
2403 rtm->rtm_protocol = RTPROT_STATIC;
2405 rtm->rtm_protocol = RTPROT_MROUTED;
2408 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2409 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2410 goto nla_put_failure;
2411 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2412 /* do not break the dump if cache is unresolved */
2413 if (err < 0 && err != -ENOENT)
2414 goto nla_put_failure;
2416 nlmsg_end(skb, nlh);
2420 nlmsg_cancel(skb, nlh);
2424 static int mr6_msgsize(bool unresolved, int maxvif)
2427 NLMSG_ALIGN(sizeof(struct rtmsg))
2428 + nla_total_size(4) /* RTA_TABLE */
2429 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2430 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2435 + nla_total_size(4) /* RTA_IIF */
2436 + nla_total_size(0) /* RTA_MULTIPATH */
2437 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2439 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2445 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2448 struct net *net = read_pnet(&mrt->net);
2449 struct sk_buff *skb;
2452 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2457 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2461 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2467 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2470 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2473 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2474 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2475 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2476 /* IP6MRA_CREPORT_SRC_ADDR */
2477 + nla_total_size(sizeof(struct in6_addr))
2478 /* IP6MRA_CREPORT_DST_ADDR */
2479 + nla_total_size(sizeof(struct in6_addr))
2480 /* IP6MRA_CREPORT_PKT */
2481 + nla_total_size(payloadlen)
2487 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt)
2489 struct net *net = read_pnet(&mrt->net);
2490 struct nlmsghdr *nlh;
2491 struct rtgenmsg *rtgenm;
2492 struct mrt6msg *msg;
2493 struct sk_buff *skb;
2497 payloadlen = pkt->len - sizeof(struct mrt6msg);
2498 msg = (struct mrt6msg *)skb_transport_header(pkt);
2500 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2504 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2505 sizeof(struct rtgenmsg), 0);
2508 rtgenm = nlmsg_data(nlh);
2509 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2510 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2511 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2512 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2514 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2516 goto nla_put_failure;
2518 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2519 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2520 nla_data(nla), payloadlen))
2521 goto nla_put_failure;
2523 nlmsg_end(skb, nlh);
2525 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2529 nlmsg_cancel(skb, nlh);
2532 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2535 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2537 struct net *net = sock_net(skb->sk);
2538 struct mr6_table *mrt;
2539 struct mfc6_cache *mfc;
2540 unsigned int t = 0, s_t;
2541 unsigned int h = 0, s_h;
2542 unsigned int e = 0, s_e;
2548 read_lock(&mrt_lock);
2549 ip6mr_for_each_table(mrt, net) {
2554 for (h = s_h; h < MFC6_LINES; h++) {
2555 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2558 if (ip6mr_fill_mroute(mrt, skb,
2559 NETLINK_CB(cb->skb).portid,
2569 spin_lock_bh(&mfc_unres_lock);
2570 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2573 if (ip6mr_fill_mroute(mrt, skb,
2574 NETLINK_CB(cb->skb).portid,
2578 spin_unlock_bh(&mfc_unres_lock);
2584 spin_unlock_bh(&mfc_unres_lock);
2591 read_unlock(&mrt_lock);