2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
69 #include <net/nexthop.h>
71 #include <linux/nospec.h>
74 struct fib_rule common;
81 /* Big lock, protecting vif table, mrt cache and mroute socket state.
82 * Note that the changes are semaphored via rtnl_lock.
85 static DEFINE_RWLOCK(mrt_lock);
87 /* Multicast router control variables */
89 /* Special spinlock for queue of unresolved entries */
90 static DEFINE_SPINLOCK(mfc_unres_lock);
92 /* We return to original Alan's scheme. Hash table of resolved
93 * entries is changed only in process context and protected
94 * with weak lock mrt_lock. Queue of unresolved entries is protected
95 * with strong spinlock mfc_unres_lock.
97 * In this case data path is free of exclusive locks at all.
100 static struct kmem_cache *mrt_cachep __read_mostly;
102 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
103 static void ipmr_free_table(struct mr_table *mrt);
105 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
106 struct sk_buff *skb, struct mfc_cache *cache,
108 static int ipmr_cache_report(struct mr_table *mrt,
109 struct sk_buff *pkt, vifi_t vifi, int assert);
110 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
111 struct mfc_cache *c, struct rtmsg *rtm);
112 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
114 static void mroute_clean_tables(struct mr_table *mrt, bool all);
115 static void ipmr_expire_process(unsigned long arg);
117 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
118 #define ipmr_for_each_table(mrt, net) \
119 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
121 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
123 struct mr_table *mrt;
125 ipmr_for_each_table(mrt, net) {
132 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
133 struct mr_table **mrt)
136 struct ipmr_result res;
137 struct fib_lookup_arg arg = {
139 .flags = FIB_LOOKUP_NOREF,
142 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
143 flowi4_to_flowi(flp4), 0, &arg);
150 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
151 int flags, struct fib_lookup_arg *arg)
153 struct ipmr_result *res = arg->result;
154 struct mr_table *mrt;
156 switch (rule->action) {
159 case FR_ACT_UNREACHABLE:
161 case FR_ACT_PROHIBIT:
163 case FR_ACT_BLACKHOLE:
168 mrt = ipmr_get_table(rule->fr_net, rule->table);
175 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
180 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
184 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
185 struct fib_rule_hdr *frh, struct nlattr **tb)
190 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
196 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
197 struct fib_rule_hdr *frh)
205 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
206 .family = RTNL_FAMILY_IPMR,
207 .rule_size = sizeof(struct ipmr_rule),
208 .addr_size = sizeof(u32),
209 .action = ipmr_rule_action,
210 .match = ipmr_rule_match,
211 .configure = ipmr_rule_configure,
212 .compare = ipmr_rule_compare,
213 .fill = ipmr_rule_fill,
214 .nlgroup = RTNLGRP_IPV4_RULE,
215 .policy = ipmr_rule_policy,
216 .owner = THIS_MODULE,
219 static int __net_init ipmr_rules_init(struct net *net)
221 struct fib_rules_ops *ops;
222 struct mr_table *mrt;
225 ops = fib_rules_register(&ipmr_rules_ops_template, net);
229 INIT_LIST_HEAD(&net->ipv4.mr_tables);
231 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
237 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
241 net->ipv4.mr_rules_ops = ops;
245 ipmr_free_table(mrt);
247 fib_rules_unregister(ops);
251 static void __net_exit ipmr_rules_exit(struct net *net)
253 struct mr_table *mrt, *next;
256 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
257 list_del(&mrt->list);
258 ipmr_free_table(mrt);
260 fib_rules_unregister(net->ipv4.mr_rules_ops);
264 #define ipmr_for_each_table(mrt, net) \
265 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
267 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
269 return net->ipv4.mrt;
272 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
273 struct mr_table **mrt)
275 *mrt = net->ipv4.mrt;
279 static int __net_init ipmr_rules_init(struct net *net)
281 struct mr_table *mrt;
283 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
290 static void __net_exit ipmr_rules_exit(struct net *net)
293 ipmr_free_table(net->ipv4.mrt);
294 net->ipv4.mrt = NULL;
299 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
301 struct mr_table *mrt;
304 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
305 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
306 return ERR_PTR(-EINVAL);
308 mrt = ipmr_get_table(net, id);
312 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
314 return ERR_PTR(-ENOMEM);
315 write_pnet(&mrt->net, net);
318 /* Forwarding cache */
319 for (i = 0; i < MFC_LINES; i++)
320 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
322 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
324 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
327 mrt->mroute_reg_vif_num = -1;
328 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
329 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
334 static void ipmr_free_table(struct mr_table *mrt)
336 del_timer_sync(&mrt->ipmr_expire_timer);
337 mroute_clean_tables(mrt, true);
341 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
343 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
345 struct net *net = dev_net(dev);
349 dev = __dev_get_by_name(net, "tunl0");
351 const struct net_device_ops *ops = dev->netdev_ops;
353 struct ip_tunnel_parm p;
355 memset(&p, 0, sizeof(p));
356 p.iph.daddr = v->vifc_rmt_addr.s_addr;
357 p.iph.saddr = v->vifc_lcl_addr.s_addr;
360 p.iph.protocol = IPPROTO_IPIP;
361 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
362 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
364 if (ops->ndo_do_ioctl) {
365 mm_segment_t oldfs = get_fs();
368 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
374 /* Initialize ipmr pimreg/tunnel in_device */
375 static bool ipmr_init_vif_indev(const struct net_device *dev)
377 struct in_device *in_dev;
381 in_dev = __in_dev_get_rtnl(dev);
384 ipv4_devconf_setall(in_dev);
385 neigh_parms_data_state_setall(in_dev->arp_parms);
386 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
391 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
393 struct net_device *dev;
395 dev = __dev_get_by_name(net, "tunl0");
398 const struct net_device_ops *ops = dev->netdev_ops;
401 struct ip_tunnel_parm p;
403 memset(&p, 0, sizeof(p));
404 p.iph.daddr = v->vifc_rmt_addr.s_addr;
405 p.iph.saddr = v->vifc_lcl_addr.s_addr;
408 p.iph.protocol = IPPROTO_IPIP;
409 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
410 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
412 if (ops->ndo_do_ioctl) {
413 mm_segment_t oldfs = get_fs();
416 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
424 (dev = __dev_get_by_name(net, p.name)) != NULL) {
425 dev->flags |= IFF_MULTICAST;
426 if (!ipmr_init_vif_indev(dev))
436 unregister_netdevice(dev);
440 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
441 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
443 struct net *net = dev_net(dev);
444 struct mr_table *mrt;
445 struct flowi4 fl4 = {
446 .flowi4_oif = dev->ifindex,
447 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
448 .flowi4_mark = skb->mark,
452 err = ipmr_fib_lookup(net, &fl4, &mrt);
458 read_lock(&mrt_lock);
459 dev->stats.tx_bytes += skb->len;
460 dev->stats.tx_packets++;
461 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
462 read_unlock(&mrt_lock);
467 static int reg_vif_get_iflink(const struct net_device *dev)
472 static const struct net_device_ops reg_vif_netdev_ops = {
473 .ndo_start_xmit = reg_vif_xmit,
474 .ndo_get_iflink = reg_vif_get_iflink,
477 static void reg_vif_setup(struct net_device *dev)
479 dev->type = ARPHRD_PIMREG;
480 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
481 dev->flags = IFF_NOARP;
482 dev->netdev_ops = ®_vif_netdev_ops;
483 dev->destructor = free_netdev;
484 dev->features |= NETIF_F_NETNS_LOCAL;
487 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
489 struct net_device *dev;
492 if (mrt->id == RT_TABLE_DEFAULT)
493 sprintf(name, "pimreg");
495 sprintf(name, "pimreg%u", mrt->id);
497 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
502 dev_net_set(dev, net);
504 if (register_netdevice(dev)) {
509 if (!ipmr_init_vif_indev(dev))
519 unregister_netdevice(dev);
523 /* called with rcu_read_lock() */
524 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
527 struct net_device *reg_dev = NULL;
530 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
532 * a. packet is really sent to a multicast group
533 * b. packet is not a NULL-REGISTER
534 * c. packet is not truncated
536 if (!ipv4_is_multicast(encap->daddr) ||
537 encap->tot_len == 0 ||
538 ntohs(encap->tot_len) + pimlen > skb->len)
541 read_lock(&mrt_lock);
542 if (mrt->mroute_reg_vif_num >= 0)
543 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
544 read_unlock(&mrt_lock);
549 skb->mac_header = skb->network_header;
550 skb_pull(skb, (u8 *)encap - skb->data);
551 skb_reset_network_header(skb);
552 skb->protocol = htons(ETH_P_IP);
553 skb->ip_summed = CHECKSUM_NONE;
555 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
559 return NET_RX_SUCCESS;
562 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
569 * vif_delete - Delete a VIF entry
570 * @notify: Set to 1, if the caller is a notifier_call
572 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
573 struct list_head *head)
575 struct vif_device *v;
576 struct net_device *dev;
577 struct in_device *in_dev;
579 if (vifi < 0 || vifi >= mrt->maxvif)
580 return -EADDRNOTAVAIL;
582 v = &mrt->vif_table[vifi];
584 write_lock_bh(&mrt_lock);
589 write_unlock_bh(&mrt_lock);
590 return -EADDRNOTAVAIL;
593 if (vifi == mrt->mroute_reg_vif_num)
594 mrt->mroute_reg_vif_num = -1;
596 if (vifi + 1 == mrt->maxvif) {
599 for (tmp = vifi - 1; tmp >= 0; tmp--) {
600 if (VIF_EXISTS(mrt, tmp))
606 write_unlock_bh(&mrt_lock);
608 dev_set_allmulti(dev, -1);
610 in_dev = __in_dev_get_rtnl(dev);
612 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
613 inet_netconf_notify_devconf(dev_net(dev),
614 NETCONFA_MC_FORWARDING,
615 dev->ifindex, &in_dev->cnf);
616 ip_rt_multicast_event(in_dev);
619 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
620 unregister_netdevice_queue(dev, head);
626 static void ipmr_cache_free_rcu(struct rcu_head *head)
628 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
630 kmem_cache_free(mrt_cachep, c);
633 static inline void ipmr_cache_free(struct mfc_cache *c)
635 call_rcu(&c->rcu, ipmr_cache_free_rcu);
638 /* Destroy an unresolved cache entry, killing queued skbs
639 * and reporting error to netlink readers.
641 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
643 struct net *net = read_pnet(&mrt->net);
647 atomic_dec(&mrt->cache_resolve_queue_len);
649 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
650 if (ip_hdr(skb)->version == 0) {
651 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
652 nlh->nlmsg_type = NLMSG_ERROR;
653 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
654 skb_trim(skb, nlh->nlmsg_len);
656 e->error = -ETIMEDOUT;
657 memset(&e->msg, 0, sizeof(e->msg));
659 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
668 /* Timer process for the unresolved queue. */
669 static void ipmr_expire_process(unsigned long arg)
671 struct mr_table *mrt = (struct mr_table *)arg;
673 unsigned long expires;
674 struct mfc_cache *c, *next;
676 if (!spin_trylock(&mfc_unres_lock)) {
677 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
681 if (list_empty(&mrt->mfc_unres_queue))
687 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
688 if (time_after(c->mfc_un.unres.expires, now)) {
689 unsigned long interval = c->mfc_un.unres.expires - now;
690 if (interval < expires)
696 mroute_netlink_event(mrt, c, RTM_DELROUTE);
697 ipmr_destroy_unres(mrt, c);
700 if (!list_empty(&mrt->mfc_unres_queue))
701 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
704 spin_unlock(&mfc_unres_lock);
707 /* Fill oifs list. It is called under write locked mrt_lock. */
708 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
713 cache->mfc_un.res.minvif = MAXVIFS;
714 cache->mfc_un.res.maxvif = 0;
715 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
717 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
718 if (VIF_EXISTS(mrt, vifi) &&
719 ttls[vifi] && ttls[vifi] < 255) {
720 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
721 if (cache->mfc_un.res.minvif > vifi)
722 cache->mfc_un.res.minvif = vifi;
723 if (cache->mfc_un.res.maxvif <= vifi)
724 cache->mfc_un.res.maxvif = vifi + 1;
727 cache->mfc_un.res.lastuse = jiffies;
730 static int vif_add(struct net *net, struct mr_table *mrt,
731 struct vifctl *vifc, int mrtsock)
733 int vifi = vifc->vifc_vifi;
734 struct vif_device *v = &mrt->vif_table[vifi];
735 struct net_device *dev;
736 struct in_device *in_dev;
740 if (VIF_EXISTS(mrt, vifi))
743 switch (vifc->vifc_flags) {
745 if (!ipmr_pimsm_enabled())
747 /* Special Purpose VIF in PIM
748 * All the packets will be sent to the daemon
750 if (mrt->mroute_reg_vif_num >= 0)
752 dev = ipmr_reg_vif(net, mrt);
755 err = dev_set_allmulti(dev, 1);
757 unregister_netdevice(dev);
763 dev = ipmr_new_tunnel(net, vifc);
766 err = dev_set_allmulti(dev, 1);
768 ipmr_del_tunnel(dev, vifc);
773 case VIFF_USE_IFINDEX:
775 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
776 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
777 if (dev && !__in_dev_get_rtnl(dev)) {
779 return -EADDRNOTAVAIL;
782 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
785 return -EADDRNOTAVAIL;
786 err = dev_set_allmulti(dev, 1);
796 in_dev = __in_dev_get_rtnl(dev);
799 return -EADDRNOTAVAIL;
801 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
802 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
804 ip_rt_multicast_event(in_dev);
806 /* Fill in the VIF structures */
808 v->rate_limit = vifc->vifc_rate_limit;
809 v->local = vifc->vifc_lcl_addr.s_addr;
810 v->remote = vifc->vifc_rmt_addr.s_addr;
811 v->flags = vifc->vifc_flags;
813 v->flags |= VIFF_STATIC;
814 v->threshold = vifc->vifc_threshold;
819 v->link = dev->ifindex;
820 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
821 v->link = dev_get_iflink(dev);
823 /* And finish update writing critical data */
824 write_lock_bh(&mrt_lock);
826 if (v->flags & VIFF_REGISTER)
827 mrt->mroute_reg_vif_num = vifi;
828 if (vifi+1 > mrt->maxvif)
829 mrt->maxvif = vifi+1;
830 write_unlock_bh(&mrt_lock);
834 /* called with rcu_read_lock() */
835 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
839 int line = MFC_HASH(mcastgrp, origin);
842 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
843 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
849 /* Look for a (*,*,oif) entry */
850 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
853 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
856 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
857 if (c->mfc_origin == htonl(INADDR_ANY) &&
858 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
859 c->mfc_un.res.ttls[vifi] < 255)
865 /* Look for a (*,G) entry */
866 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
867 __be32 mcastgrp, int vifi)
869 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
870 struct mfc_cache *c, *proxy;
872 if (mcastgrp == htonl(INADDR_ANY))
875 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
876 if (c->mfc_origin == htonl(INADDR_ANY) &&
877 c->mfc_mcastgrp == mcastgrp) {
878 if (c->mfc_un.res.ttls[vifi] < 255)
881 /* It's ok if the vifi is part of the static tree */
882 proxy = ipmr_cache_find_any_parent(mrt,
884 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
889 return ipmr_cache_find_any_parent(mrt, vifi);
892 /* Allocate a multicast cache entry */
893 static struct mfc_cache *ipmr_cache_alloc(void)
895 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
898 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
899 c->mfc_un.res.minvif = MAXVIFS;
904 static struct mfc_cache *ipmr_cache_alloc_unres(void)
906 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
909 skb_queue_head_init(&c->mfc_un.unres.unresolved);
910 c->mfc_un.unres.expires = jiffies + 10*HZ;
915 /* A cache entry has gone into a resolved state from queued */
916 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
917 struct mfc_cache *uc, struct mfc_cache *c)
922 /* Play the pending entries through our router */
923 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
924 if (ip_hdr(skb)->version == 0) {
925 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
927 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
928 nlh->nlmsg_len = skb_tail_pointer(skb) -
931 nlh->nlmsg_type = NLMSG_ERROR;
932 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
933 skb_trim(skb, nlh->nlmsg_len);
935 e->error = -EMSGSIZE;
936 memset(&e->msg, 0, sizeof(e->msg));
939 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
941 ip_mr_forward(net, mrt, skb, c, 0);
946 /* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
947 * expects the following bizarre scheme.
949 * Called under mrt_lock.
951 static int ipmr_cache_report(struct mr_table *mrt,
952 struct sk_buff *pkt, vifi_t vifi, int assert)
954 const int ihl = ip_hdrlen(pkt);
955 struct sock *mroute_sk;
956 struct igmphdr *igmp;
961 if (assert == IGMPMSG_WHOLEPKT)
962 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
964 skb = alloc_skb(128, GFP_ATOMIC);
969 if (assert == IGMPMSG_WHOLEPKT) {
970 /* Ugly, but we have no choice with this interface.
971 * Duplicate old header, fix ihl, length etc.
972 * And all this only to mangle msg->im_msgtype and
973 * to set msg->im_mbz to "mbz" :-)
975 skb_push(skb, sizeof(struct iphdr));
976 skb_reset_network_header(skb);
977 skb_reset_transport_header(skb);
978 msg = (struct igmpmsg *)skb_network_header(skb);
979 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
980 msg->im_msgtype = IGMPMSG_WHOLEPKT;
982 msg->im_vif = mrt->mroute_reg_vif_num;
983 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
984 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
985 sizeof(struct iphdr));
987 /* Copy the IP header */
988 skb_set_network_header(skb, skb->len);
990 skb_copy_to_linear_data(skb, pkt->data, ihl);
991 /* Flag to the kernel this is a route add */
992 ip_hdr(skb)->protocol = 0;
993 msg = (struct igmpmsg *)skb_network_header(skb);
995 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
997 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
999 msg->im_msgtype = assert;
1001 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1002 skb->transport_header = skb->network_header;
1006 mroute_sk = rcu_dereference(mrt->mroute_sk);
1013 /* Deliver to mrouted */
1014 ret = sock_queue_rcv_skb(mroute_sk, skb);
1017 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1024 /* Queue a packet for resolution. It gets locked cache entry! */
1025 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1026 struct sk_buff *skb)
1030 struct mfc_cache *c;
1031 const struct iphdr *iph = ip_hdr(skb);
1033 spin_lock_bh(&mfc_unres_lock);
1034 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1035 if (c->mfc_mcastgrp == iph->daddr &&
1036 c->mfc_origin == iph->saddr) {
1043 /* Create a new entry if allowable */
1044 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1045 (c = ipmr_cache_alloc_unres()) == NULL) {
1046 spin_unlock_bh(&mfc_unres_lock);
1052 /* Fill in the new cache entry */
1054 c->mfc_origin = iph->saddr;
1055 c->mfc_mcastgrp = iph->daddr;
1057 /* Reflect first query at mrouted. */
1058 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1060 /* If the report failed throw the cache entry
1063 spin_unlock_bh(&mfc_unres_lock);
1070 atomic_inc(&mrt->cache_resolve_queue_len);
1071 list_add(&c->list, &mrt->mfc_unres_queue);
1072 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1074 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1075 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1078 /* See if we can append the packet */
1079 if (c->mfc_un.unres.unresolved.qlen > 3) {
1083 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1087 spin_unlock_bh(&mfc_unres_lock);
1091 /* MFC cache manipulation by user space mroute daemon */
1093 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1096 struct mfc_cache *c, *next;
1098 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1100 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1101 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1102 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1103 (parent == -1 || parent == c->mfc_parent)) {
1104 list_del_rcu(&c->list);
1105 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1113 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1114 struct mfcctl *mfc, int mrtsock, int parent)
1118 struct mfc_cache *uc, *c;
1120 if (mfc->mfcc_parent >= MAXVIFS)
1123 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1125 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1126 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1127 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1128 (parent == -1 || parent == c->mfc_parent)) {
1135 write_lock_bh(&mrt_lock);
1136 c->mfc_parent = mfc->mfcc_parent;
1137 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1139 c->mfc_flags |= MFC_STATIC;
1140 write_unlock_bh(&mrt_lock);
1141 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1145 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1146 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1149 c = ipmr_cache_alloc();
1153 c->mfc_origin = mfc->mfcc_origin.s_addr;
1154 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1155 c->mfc_parent = mfc->mfcc_parent;
1156 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1158 c->mfc_flags |= MFC_STATIC;
1160 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1162 /* Check to see if we resolved a queued list. If so we
1163 * need to send on the frames and tidy up.
1166 spin_lock_bh(&mfc_unres_lock);
1167 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1168 if (uc->mfc_origin == c->mfc_origin &&
1169 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1170 list_del(&uc->list);
1171 atomic_dec(&mrt->cache_resolve_queue_len);
1176 if (list_empty(&mrt->mfc_unres_queue))
1177 del_timer(&mrt->ipmr_expire_timer);
1178 spin_unlock_bh(&mfc_unres_lock);
1181 ipmr_cache_resolve(net, mrt, uc, c);
1182 ipmr_cache_free(uc);
1184 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1188 /* Close the multicast socket, and clear the vif tables etc */
1189 static void mroute_clean_tables(struct mr_table *mrt, bool all)
1193 struct mfc_cache *c, *next;
1195 /* Shut down all active vif entries */
1196 for (i = 0; i < mrt->maxvif; i++) {
1197 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1199 vif_delete(mrt, i, 0, &list);
1201 unregister_netdevice_many(&list);
1203 /* Wipe the cache */
1204 for (i = 0; i < MFC_LINES; i++) {
1205 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1206 if (!all && (c->mfc_flags & MFC_STATIC))
1208 list_del_rcu(&c->list);
1209 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1214 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1215 spin_lock_bh(&mfc_unres_lock);
1216 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1218 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1219 ipmr_destroy_unres(mrt, c);
1221 spin_unlock_bh(&mfc_unres_lock);
1225 /* called from ip_ra_control(), before an RCU grace period,
1226 * we dont need to call synchronize_rcu() here
1228 static void mrtsock_destruct(struct sock *sk)
1230 struct net *net = sock_net(sk);
1231 struct mr_table *mrt;
1234 ipmr_for_each_table(mrt, net) {
1235 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1236 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1237 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1238 NETCONFA_IFINDEX_ALL,
1239 net->ipv4.devconf_all);
1240 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1241 mroute_clean_tables(mrt, false);
1247 /* Socket options and virtual interface manipulation. The whole
1248 * virtual interface system is a complete heap, but unfortunately
1249 * that's how BSD mrouted happens to think. Maybe one day with a proper
1250 * MOSPF/PIM router set up we can clean this up.
1253 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1254 unsigned int optlen)
1256 struct net *net = sock_net(sk);
1257 int val, ret = 0, parent = 0;
1258 struct mr_table *mrt;
1263 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1265 if (sk->sk_type != SOCK_RAW ||
1266 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1271 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1276 if (optname != MRT_INIT) {
1277 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1278 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1286 if (optlen != sizeof(int)) {
1290 if (rtnl_dereference(mrt->mroute_sk)) {
1295 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1297 rcu_assign_pointer(mrt->mroute_sk, sk);
1298 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1299 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1300 NETCONFA_IFINDEX_ALL,
1301 net->ipv4.devconf_all);
1305 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1308 /* We need to unlock here because mrtsock_destruct takes
1309 * care of rtnl itself and we can't change that due to
1310 * the IP_ROUTER_ALERT setsockopt which runs without it.
1313 ret = ip_ra_control(sk, 0, NULL);
1319 if (optlen != sizeof(vif)) {
1323 if (copy_from_user(&vif, optval, sizeof(vif))) {
1327 if (vif.vifc_vifi >= MAXVIFS) {
1331 if (optname == MRT_ADD_VIF) {
1332 ret = vif_add(net, mrt, &vif,
1333 sk == rtnl_dereference(mrt->mroute_sk));
1335 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1338 /* Manipulate the forwarding caches. These live
1339 * in a sort of kernel/user symbiosis.
1344 case MRT_ADD_MFC_PROXY:
1345 case MRT_DEL_MFC_PROXY:
1346 if (optlen != sizeof(mfc)) {
1350 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1355 parent = mfc.mfcc_parent;
1356 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1357 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1359 ret = ipmr_mfc_add(net, mrt, &mfc,
1360 sk == rtnl_dereference(mrt->mroute_sk),
1363 /* Control PIM assert. */
1365 if (optlen != sizeof(val)) {
1369 if (get_user(val, (int __user *)optval)) {
1373 mrt->mroute_do_assert = val;
1376 if (!ipmr_pimsm_enabled()) {
1380 if (optlen != sizeof(val)) {
1384 if (get_user(val, (int __user *)optval)) {
1390 if (val != mrt->mroute_do_pim) {
1391 mrt->mroute_do_pim = val;
1392 mrt->mroute_do_assert = val;
1396 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1400 if (optlen != sizeof(uval)) {
1404 if (get_user(uval, (u32 __user *)optval)) {
1409 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1412 mrt = ipmr_new_table(net, uval);
1416 raw_sk(sk)->ipmr_table = uval;
1419 /* Spurious command, or MRT_VERSION which you cannot set. */
1429 /* Getsock opt support for the multicast routing system. */
1430 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1434 struct net *net = sock_net(sk);
1435 struct mr_table *mrt;
1437 if (sk->sk_type != SOCK_RAW ||
1438 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1441 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1450 if (!ipmr_pimsm_enabled())
1451 return -ENOPROTOOPT;
1452 val = mrt->mroute_do_pim;
1455 val = mrt->mroute_do_assert;
1458 return -ENOPROTOOPT;
1461 if (get_user(olr, optlen))
1463 olr = min_t(unsigned int, olr, sizeof(int));
1466 if (put_user(olr, optlen))
1468 if (copy_to_user(optval, &val, olr))
1473 /* The IP multicast ioctl support routines. */
1474 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1476 struct sioc_sg_req sr;
1477 struct sioc_vif_req vr;
1478 struct vif_device *vif;
1479 struct mfc_cache *c;
1480 struct net *net = sock_net(sk);
1481 struct mr_table *mrt;
1483 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1489 if (copy_from_user(&vr, arg, sizeof(vr)))
1491 if (vr.vifi >= mrt->maxvif)
1493 read_lock(&mrt_lock);
1494 vif = &mrt->vif_table[vr.vifi];
1495 if (VIF_EXISTS(mrt, vr.vifi)) {
1496 vr.icount = vif->pkt_in;
1497 vr.ocount = vif->pkt_out;
1498 vr.ibytes = vif->bytes_in;
1499 vr.obytes = vif->bytes_out;
1500 read_unlock(&mrt_lock);
1502 if (copy_to_user(arg, &vr, sizeof(vr)))
1506 read_unlock(&mrt_lock);
1507 return -EADDRNOTAVAIL;
1509 if (copy_from_user(&sr, arg, sizeof(sr)))
1513 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1515 sr.pktcnt = c->mfc_un.res.pkt;
1516 sr.bytecnt = c->mfc_un.res.bytes;
1517 sr.wrong_if = c->mfc_un.res.wrong_if;
1520 if (copy_to_user(arg, &sr, sizeof(sr)))
1525 return -EADDRNOTAVAIL;
1527 return -ENOIOCTLCMD;
1531 #ifdef CONFIG_COMPAT
1532 struct compat_sioc_sg_req {
1535 compat_ulong_t pktcnt;
1536 compat_ulong_t bytecnt;
1537 compat_ulong_t wrong_if;
1540 struct compat_sioc_vif_req {
1541 vifi_t vifi; /* Which iface */
1542 compat_ulong_t icount;
1543 compat_ulong_t ocount;
1544 compat_ulong_t ibytes;
1545 compat_ulong_t obytes;
1548 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1550 struct compat_sioc_sg_req sr;
1551 struct compat_sioc_vif_req vr;
1552 struct vif_device *vif;
1553 struct mfc_cache *c;
1554 struct net *net = sock_net(sk);
1555 struct mr_table *mrt;
1557 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1563 if (copy_from_user(&vr, arg, sizeof(vr)))
1565 if (vr.vifi >= mrt->maxvif)
1567 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1568 read_lock(&mrt_lock);
1569 vif = &mrt->vif_table[vr.vifi];
1570 if (VIF_EXISTS(mrt, vr.vifi)) {
1571 vr.icount = vif->pkt_in;
1572 vr.ocount = vif->pkt_out;
1573 vr.ibytes = vif->bytes_in;
1574 vr.obytes = vif->bytes_out;
1575 read_unlock(&mrt_lock);
1577 if (copy_to_user(arg, &vr, sizeof(vr)))
1581 read_unlock(&mrt_lock);
1582 return -EADDRNOTAVAIL;
1584 if (copy_from_user(&sr, arg, sizeof(sr)))
1588 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1590 sr.pktcnt = c->mfc_un.res.pkt;
1591 sr.bytecnt = c->mfc_un.res.bytes;
1592 sr.wrong_if = c->mfc_un.res.wrong_if;
1595 if (copy_to_user(arg, &sr, sizeof(sr)))
1600 return -EADDRNOTAVAIL;
1602 return -ENOIOCTLCMD;
1607 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1609 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1610 struct net *net = dev_net(dev);
1611 struct mr_table *mrt;
1612 struct vif_device *v;
1615 if (event != NETDEV_UNREGISTER)
1618 ipmr_for_each_table(mrt, net) {
1619 v = &mrt->vif_table[0];
1620 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1622 vif_delete(mrt, ct, 1, NULL);
1628 static struct notifier_block ip_mr_notifier = {
1629 .notifier_call = ipmr_device_event,
1632 /* Encapsulate a packet by attaching a valid IPIP header to it.
1633 * This avoids tunnel drivers and other mess and gives us the speed so
1634 * important for multicast video.
1636 static void ip_encap(struct net *net, struct sk_buff *skb,
1637 __be32 saddr, __be32 daddr)
1640 const struct iphdr *old_iph = ip_hdr(skb);
1642 skb_push(skb, sizeof(struct iphdr));
1643 skb->transport_header = skb->network_header;
1644 skb_reset_network_header(skb);
1648 iph->tos = old_iph->tos;
1649 iph->ttl = old_iph->ttl;
1653 iph->protocol = IPPROTO_IPIP;
1655 iph->tot_len = htons(skb->len);
1656 ip_select_ident(net, skb, NULL);
1659 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1663 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1664 struct sk_buff *skb)
1666 struct ip_options *opt = &(IPCB(skb)->opt);
1668 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1669 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1671 if (unlikely(opt->optlen))
1672 ip_forward_options(skb);
1674 return dst_output(net, sk, skb);
1677 /* Processing handlers for ipmr_forward */
1679 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1680 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1682 const struct iphdr *iph = ip_hdr(skb);
1683 struct vif_device *vif = &mrt->vif_table[vifi];
1684 struct net_device *dev;
1692 if (vif->flags & VIFF_REGISTER) {
1694 vif->bytes_out += skb->len;
1695 vif->dev->stats.tx_bytes += skb->len;
1696 vif->dev->stats.tx_packets++;
1697 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1701 if (vif->flags & VIFF_TUNNEL) {
1702 rt = ip_route_output_ports(net, &fl4, NULL,
1703 vif->remote, vif->local,
1706 RT_TOS(iph->tos), vif->link);
1709 encap = sizeof(struct iphdr);
1711 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1714 RT_TOS(iph->tos), vif->link);
1721 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1722 /* Do not fragment multicasts. Alas, IPv4 does not
1723 * allow to send ICMP, so that packets will disappear
1726 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1731 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1733 if (skb_cow(skb, encap)) {
1739 vif->bytes_out += skb->len;
1742 skb_dst_set(skb, &rt->dst);
1743 ip_decrease_ttl(ip_hdr(skb));
1745 /* FIXME: forward and output firewalls used to be called here.
1746 * What do we do with netfilter? -- RR
1748 if (vif->flags & VIFF_TUNNEL) {
1749 ip_encap(net, skb, vif->local, vif->remote);
1750 /* FIXME: extra output firewall step used to be here. --RR */
1751 vif->dev->stats.tx_packets++;
1752 vif->dev->stats.tx_bytes += skb->len;
1755 IPCB(skb)->flags |= IPSKB_FORWARDED;
1757 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1758 * not only before forwarding, but after forwarding on all output
1759 * interfaces. It is clear, if mrouter runs a multicasting
1760 * program, it should receive packets not depending to what interface
1761 * program is joined.
1762 * If we will not make it, the program will have to join on all
1763 * interfaces. On the other hand, multihoming host (or router, but
1764 * not mrouter) cannot join to more than one interface - it will
1765 * result in receiving multiple packets.
1767 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1768 net, NULL, skb, skb->dev, dev,
1769 ipmr_forward_finish);
1776 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1780 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1781 if (mrt->vif_table[ct].dev == dev)
1787 /* "local" means that we should preserve one skb (for local delivery) */
1788 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1789 struct sk_buff *skb, struct mfc_cache *cache,
1794 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1796 vif = cache->mfc_parent;
1797 cache->mfc_un.res.pkt++;
1798 cache->mfc_un.res.bytes += skb->len;
1799 cache->mfc_un.res.lastuse = jiffies;
1801 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1802 struct mfc_cache *cache_proxy;
1804 /* For an (*,G) entry, we only check that the incomming
1805 * interface is part of the static tree.
1807 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1809 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1813 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1814 if (mrt->vif_table[vif].dev != skb->dev) {
1815 if (rt_is_output_route(skb_rtable(skb))) {
1816 /* It is our own packet, looped back.
1817 * Very complicated situation...
1819 * The best workaround until routing daemons will be
1820 * fixed is not to redistribute packet, if it was
1821 * send through wrong interface. It means, that
1822 * multicast applications WILL NOT work for
1823 * (S,G), which have default multicast route pointing
1824 * to wrong oif. In any case, it is not a good
1825 * idea to use multicasting applications on router.
1830 cache->mfc_un.res.wrong_if++;
1832 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1833 /* pimsm uses asserts, when switching from RPT to SPT,
1834 * so that we cannot check that packet arrived on an oif.
1835 * It is bad, but otherwise we would need to move pretty
1836 * large chunk of pimd to kernel. Ough... --ANK
1838 (mrt->mroute_do_pim ||
1839 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1841 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1842 cache->mfc_un.res.last_assert = jiffies;
1843 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1849 mrt->vif_table[vif].pkt_in++;
1850 mrt->vif_table[vif].bytes_in += skb->len;
1852 /* Forward the frame */
1853 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1854 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1855 if (true_vifi >= 0 &&
1856 true_vifi != cache->mfc_parent &&
1858 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1859 /* It's an (*,*) entry and the packet is not coming from
1860 * the upstream: forward the packet to the upstream
1863 psend = cache->mfc_parent;
1868 for (ct = cache->mfc_un.res.maxvif - 1;
1869 ct >= cache->mfc_un.res.minvif; ct--) {
1870 /* For (*,G) entry, don't forward to the incoming interface */
1871 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1873 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1875 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1878 ipmr_queue_xmit(net, mrt, skb2, cache,
1887 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1890 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1892 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1902 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1904 struct rtable *rt = skb_rtable(skb);
1905 struct iphdr *iph = ip_hdr(skb);
1906 struct flowi4 fl4 = {
1907 .daddr = iph->daddr,
1908 .saddr = iph->saddr,
1909 .flowi4_tos = RT_TOS(iph->tos),
1910 .flowi4_oif = (rt_is_output_route(rt) ?
1911 skb->dev->ifindex : 0),
1912 .flowi4_iif = (rt_is_output_route(rt) ?
1915 .flowi4_mark = skb->mark,
1917 struct mr_table *mrt;
1920 err = ipmr_fib_lookup(net, &fl4, &mrt);
1922 return ERR_PTR(err);
1926 /* Multicast packets for forwarding arrive here
1927 * Called with rcu_read_lock();
1929 int ip_mr_input(struct sk_buff *skb)
1931 struct mfc_cache *cache;
1932 struct net *net = dev_net(skb->dev);
1933 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1934 struct mr_table *mrt;
1935 struct net_device *dev;
1937 /* skb->dev passed in is the loX master dev for vrfs.
1938 * As there are no vifs associated with loopback devices,
1939 * get the proper interface that does have a vif associated with it.
1942 if (netif_is_l3_master(skb->dev)) {
1943 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
1950 /* Packet is looped back after forward, it should not be
1951 * forwarded second time, but still can be delivered locally.
1953 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1956 mrt = ipmr_rt_fib_lookup(net, skb);
1959 return PTR_ERR(mrt);
1962 if (IPCB(skb)->opt.router_alert) {
1963 if (ip_call_ra_chain(skb))
1965 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1966 /* IGMPv1 (and broken IGMPv2 implementations sort of
1967 * Cisco IOS <= 11.2(8)) do not put router alert
1968 * option to IGMP packets destined to routable
1969 * groups. It is very bad, because it means
1970 * that we can forward NO IGMP messages.
1972 struct sock *mroute_sk;
1974 mroute_sk = rcu_dereference(mrt->mroute_sk);
1977 raw_rcv(mroute_sk, skb);
1983 /* already under rcu_read_lock() */
1984 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1986 int vif = ipmr_find_vif(mrt, dev);
1989 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
1993 /* No usable cache entry */
1998 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1999 ip_local_deliver(skb);
2005 read_lock(&mrt_lock);
2006 vif = ipmr_find_vif(mrt, dev);
2008 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
2009 read_unlock(&mrt_lock);
2013 read_unlock(&mrt_lock);
2018 read_lock(&mrt_lock);
2019 ip_mr_forward(net, mrt, skb, cache, local);
2020 read_unlock(&mrt_lock);
2023 return ip_local_deliver(skb);
2029 return ip_local_deliver(skb);
2034 #ifdef CONFIG_IP_PIMSM_V1
2035 /* Handle IGMP messages of PIMv1 */
2036 int pim_rcv_v1(struct sk_buff *skb)
2038 struct igmphdr *pim;
2039 struct net *net = dev_net(skb->dev);
2040 struct mr_table *mrt;
2042 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2045 pim = igmp_hdr(skb);
2047 mrt = ipmr_rt_fib_lookup(net, skb);
2050 if (!mrt->mroute_do_pim ||
2051 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2054 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2062 #ifdef CONFIG_IP_PIMSM_V2
2063 static int pim_rcv(struct sk_buff *skb)
2065 struct pimreghdr *pim;
2066 struct net *net = dev_net(skb->dev);
2067 struct mr_table *mrt;
2069 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2072 pim = (struct pimreghdr *)skb_transport_header(skb);
2073 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2074 (pim->flags & PIM_NULL_REGISTER) ||
2075 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2076 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2079 mrt = ipmr_rt_fib_lookup(net, skb);
2082 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2090 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2091 struct mfc_cache *c, struct rtmsg *rtm)
2093 struct rta_mfc_stats mfcs;
2094 struct nlattr *mp_attr;
2095 struct rtnexthop *nhp;
2096 unsigned long lastuse;
2099 /* If cache is unresolved, don't try to parse IIF and OIF */
2100 if (c->mfc_parent >= MAXVIFS)
2103 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2104 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2107 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2110 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2111 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2112 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2113 nla_nest_cancel(skb, mp_attr);
2117 nhp->rtnh_flags = 0;
2118 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2119 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2120 nhp->rtnh_len = sizeof(*nhp);
2124 nla_nest_end(skb, mp_attr);
2126 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2127 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2129 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2130 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2131 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2132 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2133 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2137 rtm->rtm_type = RTN_MULTICAST;
2141 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2142 __be32 saddr, __be32 daddr,
2143 struct rtmsg *rtm, int nowait, u32 portid)
2145 struct mfc_cache *cache;
2146 struct mr_table *mrt;
2149 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2154 cache = ipmr_cache_find(mrt, saddr, daddr);
2155 if (!cache && skb->dev) {
2156 int vif = ipmr_find_vif(mrt, skb->dev);
2159 cache = ipmr_cache_find_any(mrt, daddr, vif);
2162 struct sk_buff *skb2;
2164 struct net_device *dev;
2173 read_lock(&mrt_lock);
2175 vif = ipmr_find_vif(mrt, dev);
2177 read_unlock(&mrt_lock);
2181 skb2 = skb_clone(skb, GFP_ATOMIC);
2183 read_unlock(&mrt_lock);
2188 NETLINK_CB(skb2).portid = portid;
2189 skb_push(skb2, sizeof(struct iphdr));
2190 skb_reset_network_header(skb2);
2192 iph->ihl = sizeof(struct iphdr) >> 2;
2196 err = ipmr_cache_unresolved(mrt, vif, skb2);
2197 read_unlock(&mrt_lock);
2202 read_lock(&mrt_lock);
2203 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2204 read_unlock(&mrt_lock);
2209 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2210 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2213 struct nlmsghdr *nlh;
2217 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2221 rtm = nlmsg_data(nlh);
2222 rtm->rtm_family = RTNL_FAMILY_IPMR;
2223 rtm->rtm_dst_len = 32;
2224 rtm->rtm_src_len = 32;
2226 rtm->rtm_table = mrt->id;
2227 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2228 goto nla_put_failure;
2229 rtm->rtm_type = RTN_MULTICAST;
2230 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2231 if (c->mfc_flags & MFC_STATIC)
2232 rtm->rtm_protocol = RTPROT_STATIC;
2234 rtm->rtm_protocol = RTPROT_MROUTED;
2237 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2238 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2239 goto nla_put_failure;
2240 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2241 /* do not break the dump if cache is unresolved */
2242 if (err < 0 && err != -ENOENT)
2243 goto nla_put_failure;
2245 nlmsg_end(skb, nlh);
2249 nlmsg_cancel(skb, nlh);
2253 static size_t mroute_msgsize(bool unresolved, int maxvif)
2256 NLMSG_ALIGN(sizeof(struct rtmsg))
2257 + nla_total_size(4) /* RTA_TABLE */
2258 + nla_total_size(4) /* RTA_SRC */
2259 + nla_total_size(4) /* RTA_DST */
2264 + nla_total_size(4) /* RTA_IIF */
2265 + nla_total_size(0) /* RTA_MULTIPATH */
2266 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2268 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2274 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2277 struct net *net = read_pnet(&mrt->net);
2278 struct sk_buff *skb;
2281 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2286 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2290 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2296 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2299 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2301 struct net *net = sock_net(skb->sk);
2302 struct mr_table *mrt;
2303 struct mfc_cache *mfc;
2304 unsigned int t = 0, s_t;
2305 unsigned int h = 0, s_h;
2306 unsigned int e = 0, s_e;
2313 ipmr_for_each_table(mrt, net) {
2318 for (h = s_h; h < MFC_LINES; h++) {
2319 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2322 if (ipmr_fill_mroute(mrt, skb,
2323 NETLINK_CB(cb->skb).portid,
2333 spin_lock_bh(&mfc_unres_lock);
2334 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2337 if (ipmr_fill_mroute(mrt, skb,
2338 NETLINK_CB(cb->skb).portid,
2342 spin_unlock_bh(&mfc_unres_lock);
2348 spin_unlock_bh(&mfc_unres_lock);
2364 static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2365 [RTA_SRC] = { .type = NLA_U32 },
2366 [RTA_DST] = { .type = NLA_U32 },
2367 [RTA_IIF] = { .type = NLA_U32 },
2368 [RTA_TABLE] = { .type = NLA_U32 },
2369 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2372 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2374 switch (rtm_protocol) {
2376 case RTPROT_MROUTED:
2382 static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2384 struct rtnexthop *rtnh = nla_data(nla);
2385 int remaining = nla_len(nla), vifi = 0;
2387 while (rtnh_ok(rtnh, remaining)) {
2388 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2389 if (++vifi == MAXVIFS)
2391 rtnh = rtnh_next(rtnh, &remaining);
2394 return remaining > 0 ? -EINVAL : vifi;
2397 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2398 static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2399 struct mfcctl *mfcc, int *mrtsock,
2400 struct mr_table **mrtret)
2402 struct net_device *dev = NULL;
2403 u32 tblid = RT_TABLE_DEFAULT;
2404 struct mr_table *mrt;
2405 struct nlattr *attr;
2409 ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy);
2412 rtm = nlmsg_data(nlh);
2415 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2416 rtm->rtm_type != RTN_MULTICAST ||
2417 rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2418 !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2421 memset(mfcc, 0, sizeof(*mfcc));
2422 mfcc->mfcc_parent = -1;
2424 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2425 switch (nla_type(attr)) {
2427 mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2430 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2433 dev = __dev_get_by_index(net, nla_get_u32(attr));
2440 if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2449 tblid = nla_get_u32(attr);
2453 mrt = ipmr_get_table(net, tblid);
2459 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2461 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2467 /* takes care of both newroute and delroute */
2468 static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh)
2470 struct net *net = sock_net(skb->sk);
2471 int ret, mrtsock, parent;
2472 struct mr_table *tbl;
2477 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl);
2481 parent = ret ? mfcc.mfcc_parent : -1;
2482 if (nlh->nlmsg_type == RTM_NEWROUTE)
2483 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2485 return ipmr_mfc_delete(tbl, &mfcc, parent);
2488 #ifdef CONFIG_PROC_FS
2489 /* The /proc interfaces to multicast routing :
2490 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2492 struct ipmr_vif_iter {
2493 struct seq_net_private p;
2494 struct mr_table *mrt;
2498 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2499 struct ipmr_vif_iter *iter,
2502 struct mr_table *mrt = iter->mrt;
2504 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2505 if (!VIF_EXISTS(mrt, iter->ct))
2508 return &mrt->vif_table[iter->ct];
2513 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2514 __acquires(mrt_lock)
2516 struct ipmr_vif_iter *iter = seq->private;
2517 struct net *net = seq_file_net(seq);
2518 struct mr_table *mrt;
2520 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2522 return ERR_PTR(-ENOENT);
2526 read_lock(&mrt_lock);
2527 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2531 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2533 struct ipmr_vif_iter *iter = seq->private;
2534 struct net *net = seq_file_net(seq);
2535 struct mr_table *mrt = iter->mrt;
2538 if (v == SEQ_START_TOKEN)
2539 return ipmr_vif_seq_idx(net, iter, 0);
2541 while (++iter->ct < mrt->maxvif) {
2542 if (!VIF_EXISTS(mrt, iter->ct))
2544 return &mrt->vif_table[iter->ct];
2549 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2550 __releases(mrt_lock)
2552 read_unlock(&mrt_lock);
2555 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2557 struct ipmr_vif_iter *iter = seq->private;
2558 struct mr_table *mrt = iter->mrt;
2560 if (v == SEQ_START_TOKEN) {
2562 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2564 const struct vif_device *vif = v;
2565 const char *name = vif->dev ? vif->dev->name : "none";
2568 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2569 vif - mrt->vif_table,
2570 name, vif->bytes_in, vif->pkt_in,
2571 vif->bytes_out, vif->pkt_out,
2572 vif->flags, vif->local, vif->remote);
2577 static const struct seq_operations ipmr_vif_seq_ops = {
2578 .start = ipmr_vif_seq_start,
2579 .next = ipmr_vif_seq_next,
2580 .stop = ipmr_vif_seq_stop,
2581 .show = ipmr_vif_seq_show,
2584 static int ipmr_vif_open(struct inode *inode, struct file *file)
2586 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2587 sizeof(struct ipmr_vif_iter));
2590 static const struct file_operations ipmr_vif_fops = {
2591 .owner = THIS_MODULE,
2592 .open = ipmr_vif_open,
2594 .llseek = seq_lseek,
2595 .release = seq_release_net,
2598 struct ipmr_mfc_iter {
2599 struct seq_net_private p;
2600 struct mr_table *mrt;
2601 struct list_head *cache;
2606 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2607 struct ipmr_mfc_iter *it, loff_t pos)
2609 struct mr_table *mrt = it->mrt;
2610 struct mfc_cache *mfc;
2613 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2614 it->cache = &mrt->mfc_cache_array[it->ct];
2615 list_for_each_entry_rcu(mfc, it->cache, list)
2621 spin_lock_bh(&mfc_unres_lock);
2622 it->cache = &mrt->mfc_unres_queue;
2623 list_for_each_entry(mfc, it->cache, list)
2626 spin_unlock_bh(&mfc_unres_lock);
2633 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2635 struct ipmr_mfc_iter *it = seq->private;
2636 struct net *net = seq_file_net(seq);
2637 struct mr_table *mrt;
2639 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2641 return ERR_PTR(-ENOENT);
2646 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2650 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2652 struct mfc_cache *mfc = v;
2653 struct ipmr_mfc_iter *it = seq->private;
2654 struct net *net = seq_file_net(seq);
2655 struct mr_table *mrt = it->mrt;
2659 if (v == SEQ_START_TOKEN)
2660 return ipmr_mfc_seq_idx(net, seq->private, 0);
2662 if (mfc->list.next != it->cache)
2663 return list_entry(mfc->list.next, struct mfc_cache, list);
2665 if (it->cache == &mrt->mfc_unres_queue)
2668 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2670 while (++it->ct < MFC_LINES) {
2671 it->cache = &mrt->mfc_cache_array[it->ct];
2672 if (list_empty(it->cache))
2674 return list_first_entry(it->cache, struct mfc_cache, list);
2677 /* exhausted cache_array, show unresolved */
2679 it->cache = &mrt->mfc_unres_queue;
2682 spin_lock_bh(&mfc_unres_lock);
2683 if (!list_empty(it->cache))
2684 return list_first_entry(it->cache, struct mfc_cache, list);
2687 spin_unlock_bh(&mfc_unres_lock);
2693 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2695 struct ipmr_mfc_iter *it = seq->private;
2696 struct mr_table *mrt = it->mrt;
2698 if (it->cache == &mrt->mfc_unres_queue)
2699 spin_unlock_bh(&mfc_unres_lock);
2700 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2704 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2708 if (v == SEQ_START_TOKEN) {
2710 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2712 const struct mfc_cache *mfc = v;
2713 const struct ipmr_mfc_iter *it = seq->private;
2714 const struct mr_table *mrt = it->mrt;
2716 seq_printf(seq, "%08X %08X %-3hd",
2717 (__force u32) mfc->mfc_mcastgrp,
2718 (__force u32) mfc->mfc_origin,
2721 if (it->cache != &mrt->mfc_unres_queue) {
2722 seq_printf(seq, " %8lu %8lu %8lu",
2723 mfc->mfc_un.res.pkt,
2724 mfc->mfc_un.res.bytes,
2725 mfc->mfc_un.res.wrong_if);
2726 for (n = mfc->mfc_un.res.minvif;
2727 n < mfc->mfc_un.res.maxvif; n++) {
2728 if (VIF_EXISTS(mrt, n) &&
2729 mfc->mfc_un.res.ttls[n] < 255)
2732 n, mfc->mfc_un.res.ttls[n]);
2735 /* unresolved mfc_caches don't contain
2736 * pkt, bytes and wrong_if values
2738 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2740 seq_putc(seq, '\n');
2745 static const struct seq_operations ipmr_mfc_seq_ops = {
2746 .start = ipmr_mfc_seq_start,
2747 .next = ipmr_mfc_seq_next,
2748 .stop = ipmr_mfc_seq_stop,
2749 .show = ipmr_mfc_seq_show,
2752 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2754 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2755 sizeof(struct ipmr_mfc_iter));
2758 static const struct file_operations ipmr_mfc_fops = {
2759 .owner = THIS_MODULE,
2760 .open = ipmr_mfc_open,
2762 .llseek = seq_lseek,
2763 .release = seq_release_net,
2767 #ifdef CONFIG_IP_PIMSM_V2
2768 static const struct net_protocol pim_protocol = {
2774 /* Setup for IP multicast routing */
2775 static int __net_init ipmr_net_init(struct net *net)
2779 err = ipmr_rules_init(net);
2783 #ifdef CONFIG_PROC_FS
2785 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2787 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2788 goto proc_cache_fail;
2792 #ifdef CONFIG_PROC_FS
2794 remove_proc_entry("ip_mr_vif", net->proc_net);
2796 ipmr_rules_exit(net);
2802 static void __net_exit ipmr_net_exit(struct net *net)
2804 #ifdef CONFIG_PROC_FS
2805 remove_proc_entry("ip_mr_cache", net->proc_net);
2806 remove_proc_entry("ip_mr_vif", net->proc_net);
2808 ipmr_rules_exit(net);
2811 static struct pernet_operations ipmr_net_ops = {
2812 .init = ipmr_net_init,
2813 .exit = ipmr_net_exit,
2816 int __init ip_mr_init(void)
2820 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2821 sizeof(struct mfc_cache),
2822 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2825 err = register_pernet_subsys(&ipmr_net_ops);
2827 goto reg_pernet_fail;
2829 err = register_netdevice_notifier(&ip_mr_notifier);
2831 goto reg_notif_fail;
2832 #ifdef CONFIG_IP_PIMSM_V2
2833 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2834 pr_err("%s: can't add PIM protocol\n", __func__);
2836 goto add_proto_fail;
2839 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2840 NULL, ipmr_rtm_dumproute, NULL);
2841 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
2842 ipmr_rtm_route, NULL, NULL);
2843 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
2844 ipmr_rtm_route, NULL, NULL);
2847 #ifdef CONFIG_IP_PIMSM_V2
2849 unregister_netdevice_notifier(&ip_mr_notifier);
2852 unregister_pernet_subsys(&ipmr_net_ops);
2854 kmem_cache_destroy(mrt_cachep);