2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
69 #include <net/nexthop.h>
71 #include <linux/nospec.h>
74 struct fib_rule common;
81 /* Big lock, protecting vif table, mrt cache and mroute socket state.
82 * Note that the changes are semaphored via rtnl_lock.
85 static DEFINE_RWLOCK(mrt_lock);
87 /* Multicast router control variables */
89 /* Special spinlock for queue of unresolved entries */
90 static DEFINE_SPINLOCK(mfc_unres_lock);
92 /* We return to original Alan's scheme. Hash table of resolved
93 * entries is changed only in process context and protected
94 * with weak lock mrt_lock. Queue of unresolved entries is protected
95 * with strong spinlock mfc_unres_lock.
97 * In this case data path is free of exclusive locks at all.
100 static struct kmem_cache *mrt_cachep __read_mostly;
102 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
103 static void ipmr_free_table(struct mr_table *mrt);
105 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
106 struct sk_buff *skb, struct mfc_cache *cache,
108 static int ipmr_cache_report(struct mr_table *mrt,
109 struct sk_buff *pkt, vifi_t vifi, int assert);
110 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
111 struct mfc_cache *c, struct rtmsg *rtm);
112 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
114 static void mroute_clean_tables(struct mr_table *mrt, bool all);
115 static void ipmr_expire_process(unsigned long arg);
117 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
118 #define ipmr_for_each_table(mrt, net) \
119 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
121 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
123 struct mr_table *mrt;
125 ipmr_for_each_table(mrt, net) {
132 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
133 struct mr_table **mrt)
136 struct ipmr_result res;
137 struct fib_lookup_arg arg = {
139 .flags = FIB_LOOKUP_NOREF,
142 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
143 flowi4_to_flowi(flp4), 0, &arg);
150 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
151 int flags, struct fib_lookup_arg *arg)
153 struct ipmr_result *res = arg->result;
154 struct mr_table *mrt;
156 switch (rule->action) {
159 case FR_ACT_UNREACHABLE:
161 case FR_ACT_PROHIBIT:
163 case FR_ACT_BLACKHOLE:
168 mrt = ipmr_get_table(rule->fr_net, rule->table);
175 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
180 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
184 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
185 struct fib_rule_hdr *frh, struct nlattr **tb)
190 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
196 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
197 struct fib_rule_hdr *frh)
205 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
206 .family = RTNL_FAMILY_IPMR,
207 .rule_size = sizeof(struct ipmr_rule),
208 .addr_size = sizeof(u32),
209 .action = ipmr_rule_action,
210 .match = ipmr_rule_match,
211 .configure = ipmr_rule_configure,
212 .compare = ipmr_rule_compare,
213 .fill = ipmr_rule_fill,
214 .nlgroup = RTNLGRP_IPV4_RULE,
215 .policy = ipmr_rule_policy,
216 .owner = THIS_MODULE,
219 static int __net_init ipmr_rules_init(struct net *net)
221 struct fib_rules_ops *ops;
222 struct mr_table *mrt;
225 ops = fib_rules_register(&ipmr_rules_ops_template, net);
229 INIT_LIST_HEAD(&net->ipv4.mr_tables);
231 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
237 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
241 net->ipv4.mr_rules_ops = ops;
246 ipmr_free_table(mrt);
249 fib_rules_unregister(ops);
253 static void __net_exit ipmr_rules_exit(struct net *net)
255 struct mr_table *mrt, *next;
258 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
259 list_del(&mrt->list);
260 ipmr_free_table(mrt);
262 fib_rules_unregister(net->ipv4.mr_rules_ops);
266 #define ipmr_for_each_table(mrt, net) \
267 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
269 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
271 return net->ipv4.mrt;
274 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
275 struct mr_table **mrt)
277 *mrt = net->ipv4.mrt;
281 static int __net_init ipmr_rules_init(struct net *net)
283 struct mr_table *mrt;
285 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
292 static void __net_exit ipmr_rules_exit(struct net *net)
295 ipmr_free_table(net->ipv4.mrt);
296 net->ipv4.mrt = NULL;
301 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
303 struct mr_table *mrt;
306 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
307 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
308 return ERR_PTR(-EINVAL);
310 mrt = ipmr_get_table(net, id);
314 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
316 return ERR_PTR(-ENOMEM);
317 write_pnet(&mrt->net, net);
320 /* Forwarding cache */
321 for (i = 0; i < MFC_LINES; i++)
322 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
324 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
326 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
329 mrt->mroute_reg_vif_num = -1;
330 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
331 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
336 static void ipmr_free_table(struct mr_table *mrt)
338 del_timer_sync(&mrt->ipmr_expire_timer);
339 mroute_clean_tables(mrt, true);
343 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
345 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
347 struct net *net = dev_net(dev);
351 dev = __dev_get_by_name(net, "tunl0");
353 const struct net_device_ops *ops = dev->netdev_ops;
355 struct ip_tunnel_parm p;
357 memset(&p, 0, sizeof(p));
358 p.iph.daddr = v->vifc_rmt_addr.s_addr;
359 p.iph.saddr = v->vifc_lcl_addr.s_addr;
362 p.iph.protocol = IPPROTO_IPIP;
363 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
364 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
366 if (ops->ndo_do_ioctl) {
367 mm_segment_t oldfs = get_fs();
370 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
376 /* Initialize ipmr pimreg/tunnel in_device */
377 static bool ipmr_init_vif_indev(const struct net_device *dev)
379 struct in_device *in_dev;
383 in_dev = __in_dev_get_rtnl(dev);
386 ipv4_devconf_setall(in_dev);
387 neigh_parms_data_state_setall(in_dev->arp_parms);
388 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
393 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
395 struct net_device *dev;
397 dev = __dev_get_by_name(net, "tunl0");
400 const struct net_device_ops *ops = dev->netdev_ops;
403 struct ip_tunnel_parm p;
405 memset(&p, 0, sizeof(p));
406 p.iph.daddr = v->vifc_rmt_addr.s_addr;
407 p.iph.saddr = v->vifc_lcl_addr.s_addr;
410 p.iph.protocol = IPPROTO_IPIP;
411 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
412 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
414 if (ops->ndo_do_ioctl) {
415 mm_segment_t oldfs = get_fs();
418 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
426 (dev = __dev_get_by_name(net, p.name)) != NULL) {
427 dev->flags |= IFF_MULTICAST;
428 if (!ipmr_init_vif_indev(dev))
438 unregister_netdevice(dev);
442 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
443 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
445 struct net *net = dev_net(dev);
446 struct mr_table *mrt;
447 struct flowi4 fl4 = {
448 .flowi4_oif = dev->ifindex,
449 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
450 .flowi4_mark = skb->mark,
454 err = ipmr_fib_lookup(net, &fl4, &mrt);
460 read_lock(&mrt_lock);
461 dev->stats.tx_bytes += skb->len;
462 dev->stats.tx_packets++;
463 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
464 read_unlock(&mrt_lock);
469 static int reg_vif_get_iflink(const struct net_device *dev)
474 static const struct net_device_ops reg_vif_netdev_ops = {
475 .ndo_start_xmit = reg_vif_xmit,
476 .ndo_get_iflink = reg_vif_get_iflink,
479 static void reg_vif_setup(struct net_device *dev)
481 dev->type = ARPHRD_PIMREG;
482 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
483 dev->flags = IFF_NOARP;
484 dev->netdev_ops = ®_vif_netdev_ops;
485 dev->destructor = free_netdev;
486 dev->features |= NETIF_F_NETNS_LOCAL;
489 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
491 struct net_device *dev;
494 if (mrt->id == RT_TABLE_DEFAULT)
495 sprintf(name, "pimreg");
497 sprintf(name, "pimreg%u", mrt->id);
499 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
504 dev_net_set(dev, net);
506 if (register_netdevice(dev)) {
511 if (!ipmr_init_vif_indev(dev))
521 unregister_netdevice(dev);
525 /* called with rcu_read_lock() */
526 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
529 struct net_device *reg_dev = NULL;
532 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
534 * a. packet is really sent to a multicast group
535 * b. packet is not a NULL-REGISTER
536 * c. packet is not truncated
538 if (!ipv4_is_multicast(encap->daddr) ||
539 encap->tot_len == 0 ||
540 ntohs(encap->tot_len) + pimlen > skb->len)
543 read_lock(&mrt_lock);
544 if (mrt->mroute_reg_vif_num >= 0)
545 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
546 read_unlock(&mrt_lock);
551 skb->mac_header = skb->network_header;
552 skb_pull(skb, (u8 *)encap - skb->data);
553 skb_reset_network_header(skb);
554 skb->protocol = htons(ETH_P_IP);
555 skb->ip_summed = CHECKSUM_NONE;
557 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
561 return NET_RX_SUCCESS;
564 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
571 * vif_delete - Delete a VIF entry
572 * @notify: Set to 1, if the caller is a notifier_call
574 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
575 struct list_head *head)
577 struct vif_device *v;
578 struct net_device *dev;
579 struct in_device *in_dev;
581 if (vifi < 0 || vifi >= mrt->maxvif)
582 return -EADDRNOTAVAIL;
584 v = &mrt->vif_table[vifi];
586 write_lock_bh(&mrt_lock);
591 write_unlock_bh(&mrt_lock);
592 return -EADDRNOTAVAIL;
595 if (vifi == mrt->mroute_reg_vif_num)
596 mrt->mroute_reg_vif_num = -1;
598 if (vifi + 1 == mrt->maxvif) {
601 for (tmp = vifi - 1; tmp >= 0; tmp--) {
602 if (VIF_EXISTS(mrt, tmp))
608 write_unlock_bh(&mrt_lock);
610 dev_set_allmulti(dev, -1);
612 in_dev = __in_dev_get_rtnl(dev);
614 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
615 inet_netconf_notify_devconf(dev_net(dev),
616 NETCONFA_MC_FORWARDING,
617 dev->ifindex, &in_dev->cnf);
618 ip_rt_multicast_event(in_dev);
621 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
622 unregister_netdevice_queue(dev, head);
628 static void ipmr_cache_free_rcu(struct rcu_head *head)
630 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
632 kmem_cache_free(mrt_cachep, c);
635 static inline void ipmr_cache_free(struct mfc_cache *c)
637 call_rcu(&c->rcu, ipmr_cache_free_rcu);
640 /* Destroy an unresolved cache entry, killing queued skbs
641 * and reporting error to netlink readers.
643 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
645 struct net *net = read_pnet(&mrt->net);
649 atomic_dec(&mrt->cache_resolve_queue_len);
651 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
652 if (ip_hdr(skb)->version == 0) {
653 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
654 nlh->nlmsg_type = NLMSG_ERROR;
655 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
656 skb_trim(skb, nlh->nlmsg_len);
658 e->error = -ETIMEDOUT;
659 memset(&e->msg, 0, sizeof(e->msg));
661 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
670 /* Timer process for the unresolved queue. */
671 static void ipmr_expire_process(unsigned long arg)
673 struct mr_table *mrt = (struct mr_table *)arg;
675 unsigned long expires;
676 struct mfc_cache *c, *next;
678 if (!spin_trylock(&mfc_unres_lock)) {
679 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
683 if (list_empty(&mrt->mfc_unres_queue))
689 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
690 if (time_after(c->mfc_un.unres.expires, now)) {
691 unsigned long interval = c->mfc_un.unres.expires - now;
692 if (interval < expires)
698 mroute_netlink_event(mrt, c, RTM_DELROUTE);
699 ipmr_destroy_unres(mrt, c);
702 if (!list_empty(&mrt->mfc_unres_queue))
703 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
706 spin_unlock(&mfc_unres_lock);
709 /* Fill oifs list. It is called under write locked mrt_lock. */
710 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
715 cache->mfc_un.res.minvif = MAXVIFS;
716 cache->mfc_un.res.maxvif = 0;
717 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
719 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
720 if (VIF_EXISTS(mrt, vifi) &&
721 ttls[vifi] && ttls[vifi] < 255) {
722 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
723 if (cache->mfc_un.res.minvif > vifi)
724 cache->mfc_un.res.minvif = vifi;
725 if (cache->mfc_un.res.maxvif <= vifi)
726 cache->mfc_un.res.maxvif = vifi + 1;
729 cache->mfc_un.res.lastuse = jiffies;
732 static int vif_add(struct net *net, struct mr_table *mrt,
733 struct vifctl *vifc, int mrtsock)
735 int vifi = vifc->vifc_vifi;
736 struct vif_device *v = &mrt->vif_table[vifi];
737 struct net_device *dev;
738 struct in_device *in_dev;
742 if (VIF_EXISTS(mrt, vifi))
745 switch (vifc->vifc_flags) {
747 if (!ipmr_pimsm_enabled())
749 /* Special Purpose VIF in PIM
750 * All the packets will be sent to the daemon
752 if (mrt->mroute_reg_vif_num >= 0)
754 dev = ipmr_reg_vif(net, mrt);
757 err = dev_set_allmulti(dev, 1);
759 unregister_netdevice(dev);
765 dev = ipmr_new_tunnel(net, vifc);
768 err = dev_set_allmulti(dev, 1);
770 ipmr_del_tunnel(dev, vifc);
775 case VIFF_USE_IFINDEX:
777 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
778 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
779 if (dev && !__in_dev_get_rtnl(dev)) {
781 return -EADDRNOTAVAIL;
784 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
787 return -EADDRNOTAVAIL;
788 err = dev_set_allmulti(dev, 1);
798 in_dev = __in_dev_get_rtnl(dev);
801 return -EADDRNOTAVAIL;
803 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
804 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
806 ip_rt_multicast_event(in_dev);
808 /* Fill in the VIF structures */
810 v->rate_limit = vifc->vifc_rate_limit;
811 v->local = vifc->vifc_lcl_addr.s_addr;
812 v->remote = vifc->vifc_rmt_addr.s_addr;
813 v->flags = vifc->vifc_flags;
815 v->flags |= VIFF_STATIC;
816 v->threshold = vifc->vifc_threshold;
821 v->link = dev->ifindex;
822 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
823 v->link = dev_get_iflink(dev);
825 /* And finish update writing critical data */
826 write_lock_bh(&mrt_lock);
828 if (v->flags & VIFF_REGISTER)
829 mrt->mroute_reg_vif_num = vifi;
830 if (vifi+1 > mrt->maxvif)
831 mrt->maxvif = vifi+1;
832 write_unlock_bh(&mrt_lock);
836 /* called with rcu_read_lock() */
837 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
841 int line = MFC_HASH(mcastgrp, origin);
844 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
845 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
851 /* Look for a (*,*,oif) entry */
852 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
855 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
858 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
859 if (c->mfc_origin == htonl(INADDR_ANY) &&
860 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
861 c->mfc_un.res.ttls[vifi] < 255)
867 /* Look for a (*,G) entry */
868 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
869 __be32 mcastgrp, int vifi)
871 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
872 struct mfc_cache *c, *proxy;
874 if (mcastgrp == htonl(INADDR_ANY))
877 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
878 if (c->mfc_origin == htonl(INADDR_ANY) &&
879 c->mfc_mcastgrp == mcastgrp) {
880 if (c->mfc_un.res.ttls[vifi] < 255)
883 /* It's ok if the vifi is part of the static tree */
884 proxy = ipmr_cache_find_any_parent(mrt,
886 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
891 return ipmr_cache_find_any_parent(mrt, vifi);
894 /* Allocate a multicast cache entry */
895 static struct mfc_cache *ipmr_cache_alloc(void)
897 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
900 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
901 c->mfc_un.res.minvif = MAXVIFS;
906 static struct mfc_cache *ipmr_cache_alloc_unres(void)
908 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
911 skb_queue_head_init(&c->mfc_un.unres.unresolved);
912 c->mfc_un.unres.expires = jiffies + 10*HZ;
917 /* A cache entry has gone into a resolved state from queued */
918 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
919 struct mfc_cache *uc, struct mfc_cache *c)
924 /* Play the pending entries through our router */
925 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
926 if (ip_hdr(skb)->version == 0) {
927 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
929 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
930 nlh->nlmsg_len = skb_tail_pointer(skb) -
933 nlh->nlmsg_type = NLMSG_ERROR;
934 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
935 skb_trim(skb, nlh->nlmsg_len);
937 e->error = -EMSGSIZE;
938 memset(&e->msg, 0, sizeof(e->msg));
941 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
943 ip_mr_forward(net, mrt, skb, c, 0);
948 /* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
949 * expects the following bizarre scheme.
951 * Called under mrt_lock.
953 static int ipmr_cache_report(struct mr_table *mrt,
954 struct sk_buff *pkt, vifi_t vifi, int assert)
956 const int ihl = ip_hdrlen(pkt);
957 struct sock *mroute_sk;
958 struct igmphdr *igmp;
963 if (assert == IGMPMSG_WHOLEPKT)
964 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
966 skb = alloc_skb(128, GFP_ATOMIC);
971 if (assert == IGMPMSG_WHOLEPKT) {
972 /* Ugly, but we have no choice with this interface.
973 * Duplicate old header, fix ihl, length etc.
974 * And all this only to mangle msg->im_msgtype and
975 * to set msg->im_mbz to "mbz" :-)
977 skb_push(skb, sizeof(struct iphdr));
978 skb_reset_network_header(skb);
979 skb_reset_transport_header(skb);
980 msg = (struct igmpmsg *)skb_network_header(skb);
981 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
982 msg->im_msgtype = IGMPMSG_WHOLEPKT;
984 msg->im_vif = mrt->mroute_reg_vif_num;
985 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
986 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
987 sizeof(struct iphdr));
989 /* Copy the IP header */
990 skb_set_network_header(skb, skb->len);
992 skb_copy_to_linear_data(skb, pkt->data, ihl);
993 /* Flag to the kernel this is a route add */
994 ip_hdr(skb)->protocol = 0;
995 msg = (struct igmpmsg *)skb_network_header(skb);
997 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
999 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1000 igmp->type = assert;
1001 msg->im_msgtype = assert;
1003 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1004 skb->transport_header = skb->network_header;
1008 mroute_sk = rcu_dereference(mrt->mroute_sk);
1015 /* Deliver to mrouted */
1016 ret = sock_queue_rcv_skb(mroute_sk, skb);
1019 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1026 /* Queue a packet for resolution. It gets locked cache entry! */
1027 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1028 struct sk_buff *skb)
1032 struct mfc_cache *c;
1033 const struct iphdr *iph = ip_hdr(skb);
1035 spin_lock_bh(&mfc_unres_lock);
1036 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1037 if (c->mfc_mcastgrp == iph->daddr &&
1038 c->mfc_origin == iph->saddr) {
1045 /* Create a new entry if allowable */
1046 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1047 (c = ipmr_cache_alloc_unres()) == NULL) {
1048 spin_unlock_bh(&mfc_unres_lock);
1054 /* Fill in the new cache entry */
1056 c->mfc_origin = iph->saddr;
1057 c->mfc_mcastgrp = iph->daddr;
1059 /* Reflect first query at mrouted. */
1060 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1062 /* If the report failed throw the cache entry
1065 spin_unlock_bh(&mfc_unres_lock);
1072 atomic_inc(&mrt->cache_resolve_queue_len);
1073 list_add(&c->list, &mrt->mfc_unres_queue);
1074 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1076 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1077 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1080 /* See if we can append the packet */
1081 if (c->mfc_un.unres.unresolved.qlen > 3) {
1085 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1089 spin_unlock_bh(&mfc_unres_lock);
1093 /* MFC cache manipulation by user space mroute daemon */
1095 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1098 struct mfc_cache *c, *next;
1100 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1102 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1103 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1104 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1105 (parent == -1 || parent == c->mfc_parent)) {
1106 list_del_rcu(&c->list);
1107 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1115 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1116 struct mfcctl *mfc, int mrtsock, int parent)
1120 struct mfc_cache *uc, *c;
1122 if (mfc->mfcc_parent >= MAXVIFS)
1125 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1127 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1128 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1129 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1130 (parent == -1 || parent == c->mfc_parent)) {
1137 write_lock_bh(&mrt_lock);
1138 c->mfc_parent = mfc->mfcc_parent;
1139 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1141 c->mfc_flags |= MFC_STATIC;
1142 write_unlock_bh(&mrt_lock);
1143 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1147 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1148 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1151 c = ipmr_cache_alloc();
1155 c->mfc_origin = mfc->mfcc_origin.s_addr;
1156 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1157 c->mfc_parent = mfc->mfcc_parent;
1158 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1160 c->mfc_flags |= MFC_STATIC;
1162 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1164 /* Check to see if we resolved a queued list. If so we
1165 * need to send on the frames and tidy up.
1168 spin_lock_bh(&mfc_unres_lock);
1169 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1170 if (uc->mfc_origin == c->mfc_origin &&
1171 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1172 list_del(&uc->list);
1173 atomic_dec(&mrt->cache_resolve_queue_len);
1178 if (list_empty(&mrt->mfc_unres_queue))
1179 del_timer(&mrt->ipmr_expire_timer);
1180 spin_unlock_bh(&mfc_unres_lock);
1183 ipmr_cache_resolve(net, mrt, uc, c);
1184 ipmr_cache_free(uc);
1186 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1190 /* Close the multicast socket, and clear the vif tables etc */
1191 static void mroute_clean_tables(struct mr_table *mrt, bool all)
1195 struct mfc_cache *c, *next;
1197 /* Shut down all active vif entries */
1198 for (i = 0; i < mrt->maxvif; i++) {
1199 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1201 vif_delete(mrt, i, 0, &list);
1203 unregister_netdevice_many(&list);
1205 /* Wipe the cache */
1206 for (i = 0; i < MFC_LINES; i++) {
1207 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1208 if (!all && (c->mfc_flags & MFC_STATIC))
1210 list_del_rcu(&c->list);
1211 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1216 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1217 spin_lock_bh(&mfc_unres_lock);
1218 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1220 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1221 ipmr_destroy_unres(mrt, c);
1223 spin_unlock_bh(&mfc_unres_lock);
1227 /* called from ip_ra_control(), before an RCU grace period,
1228 * we dont need to call synchronize_rcu() here
1230 static void mrtsock_destruct(struct sock *sk)
1232 struct net *net = sock_net(sk);
1233 struct mr_table *mrt;
1236 ipmr_for_each_table(mrt, net) {
1237 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1238 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1239 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1240 NETCONFA_IFINDEX_ALL,
1241 net->ipv4.devconf_all);
1242 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1243 mroute_clean_tables(mrt, false);
1249 /* Socket options and virtual interface manipulation. The whole
1250 * virtual interface system is a complete heap, but unfortunately
1251 * that's how BSD mrouted happens to think. Maybe one day with a proper
1252 * MOSPF/PIM router set up we can clean this up.
1255 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1256 unsigned int optlen)
1258 struct net *net = sock_net(sk);
1259 int val, ret = 0, parent = 0;
1260 struct mr_table *mrt;
1265 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1267 if (sk->sk_type != SOCK_RAW ||
1268 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1273 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1278 if (optname != MRT_INIT) {
1279 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1280 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1288 if (optlen != sizeof(int)) {
1292 if (rtnl_dereference(mrt->mroute_sk)) {
1297 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1299 rcu_assign_pointer(mrt->mroute_sk, sk);
1300 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1301 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1302 NETCONFA_IFINDEX_ALL,
1303 net->ipv4.devconf_all);
1307 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1310 /* We need to unlock here because mrtsock_destruct takes
1311 * care of rtnl itself and we can't change that due to
1312 * the IP_ROUTER_ALERT setsockopt which runs without it.
1315 ret = ip_ra_control(sk, 0, NULL);
1321 if (optlen != sizeof(vif)) {
1325 if (copy_from_user(&vif, optval, sizeof(vif))) {
1329 if (vif.vifc_vifi >= MAXVIFS) {
1333 if (optname == MRT_ADD_VIF) {
1334 ret = vif_add(net, mrt, &vif,
1335 sk == rtnl_dereference(mrt->mroute_sk));
1337 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1340 /* Manipulate the forwarding caches. These live
1341 * in a sort of kernel/user symbiosis.
1346 case MRT_ADD_MFC_PROXY:
1347 case MRT_DEL_MFC_PROXY:
1348 if (optlen != sizeof(mfc)) {
1352 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1357 parent = mfc.mfcc_parent;
1358 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1359 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1361 ret = ipmr_mfc_add(net, mrt, &mfc,
1362 sk == rtnl_dereference(mrt->mroute_sk),
1365 /* Control PIM assert. */
1367 if (optlen != sizeof(val)) {
1371 if (get_user(val, (int __user *)optval)) {
1375 mrt->mroute_do_assert = val;
1378 if (!ipmr_pimsm_enabled()) {
1382 if (optlen != sizeof(val)) {
1386 if (get_user(val, (int __user *)optval)) {
1392 if (val != mrt->mroute_do_pim) {
1393 mrt->mroute_do_pim = val;
1394 mrt->mroute_do_assert = val;
1398 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1402 if (optlen != sizeof(uval)) {
1406 if (get_user(uval, (u32 __user *)optval)) {
1411 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1414 mrt = ipmr_new_table(net, uval);
1418 raw_sk(sk)->ipmr_table = uval;
1421 /* Spurious command, or MRT_VERSION which you cannot set. */
1431 /* Getsock opt support for the multicast routing system. */
1432 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1436 struct net *net = sock_net(sk);
1437 struct mr_table *mrt;
1439 if (sk->sk_type != SOCK_RAW ||
1440 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1443 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1452 if (!ipmr_pimsm_enabled())
1453 return -ENOPROTOOPT;
1454 val = mrt->mroute_do_pim;
1457 val = mrt->mroute_do_assert;
1460 return -ENOPROTOOPT;
1463 if (get_user(olr, optlen))
1465 olr = min_t(unsigned int, olr, sizeof(int));
1468 if (put_user(olr, optlen))
1470 if (copy_to_user(optval, &val, olr))
1475 /* The IP multicast ioctl support routines. */
1476 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1478 struct sioc_sg_req sr;
1479 struct sioc_vif_req vr;
1480 struct vif_device *vif;
1481 struct mfc_cache *c;
1482 struct net *net = sock_net(sk);
1483 struct mr_table *mrt;
1485 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1491 if (copy_from_user(&vr, arg, sizeof(vr)))
1493 if (vr.vifi >= mrt->maxvif)
1495 read_lock(&mrt_lock);
1496 vif = &mrt->vif_table[vr.vifi];
1497 if (VIF_EXISTS(mrt, vr.vifi)) {
1498 vr.icount = vif->pkt_in;
1499 vr.ocount = vif->pkt_out;
1500 vr.ibytes = vif->bytes_in;
1501 vr.obytes = vif->bytes_out;
1502 read_unlock(&mrt_lock);
1504 if (copy_to_user(arg, &vr, sizeof(vr)))
1508 read_unlock(&mrt_lock);
1509 return -EADDRNOTAVAIL;
1511 if (copy_from_user(&sr, arg, sizeof(sr)))
1515 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1517 sr.pktcnt = c->mfc_un.res.pkt;
1518 sr.bytecnt = c->mfc_un.res.bytes;
1519 sr.wrong_if = c->mfc_un.res.wrong_if;
1522 if (copy_to_user(arg, &sr, sizeof(sr)))
1527 return -EADDRNOTAVAIL;
1529 return -ENOIOCTLCMD;
1533 #ifdef CONFIG_COMPAT
1534 struct compat_sioc_sg_req {
1537 compat_ulong_t pktcnt;
1538 compat_ulong_t bytecnt;
1539 compat_ulong_t wrong_if;
1542 struct compat_sioc_vif_req {
1543 vifi_t vifi; /* Which iface */
1544 compat_ulong_t icount;
1545 compat_ulong_t ocount;
1546 compat_ulong_t ibytes;
1547 compat_ulong_t obytes;
1550 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1552 struct compat_sioc_sg_req sr;
1553 struct compat_sioc_vif_req vr;
1554 struct vif_device *vif;
1555 struct mfc_cache *c;
1556 struct net *net = sock_net(sk);
1557 struct mr_table *mrt;
1559 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1565 if (copy_from_user(&vr, arg, sizeof(vr)))
1567 if (vr.vifi >= mrt->maxvif)
1569 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1570 read_lock(&mrt_lock);
1571 vif = &mrt->vif_table[vr.vifi];
1572 if (VIF_EXISTS(mrt, vr.vifi)) {
1573 vr.icount = vif->pkt_in;
1574 vr.ocount = vif->pkt_out;
1575 vr.ibytes = vif->bytes_in;
1576 vr.obytes = vif->bytes_out;
1577 read_unlock(&mrt_lock);
1579 if (copy_to_user(arg, &vr, sizeof(vr)))
1583 read_unlock(&mrt_lock);
1584 return -EADDRNOTAVAIL;
1586 if (copy_from_user(&sr, arg, sizeof(sr)))
1590 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1592 sr.pktcnt = c->mfc_un.res.pkt;
1593 sr.bytecnt = c->mfc_un.res.bytes;
1594 sr.wrong_if = c->mfc_un.res.wrong_if;
1597 if (copy_to_user(arg, &sr, sizeof(sr)))
1602 return -EADDRNOTAVAIL;
1604 return -ENOIOCTLCMD;
1609 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1611 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1612 struct net *net = dev_net(dev);
1613 struct mr_table *mrt;
1614 struct vif_device *v;
1617 if (event != NETDEV_UNREGISTER)
1620 ipmr_for_each_table(mrt, net) {
1621 v = &mrt->vif_table[0];
1622 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1624 vif_delete(mrt, ct, 1, NULL);
1630 static struct notifier_block ip_mr_notifier = {
1631 .notifier_call = ipmr_device_event,
1634 /* Encapsulate a packet by attaching a valid IPIP header to it.
1635 * This avoids tunnel drivers and other mess and gives us the speed so
1636 * important for multicast video.
1638 static void ip_encap(struct net *net, struct sk_buff *skb,
1639 __be32 saddr, __be32 daddr)
1642 const struct iphdr *old_iph = ip_hdr(skb);
1644 skb_push(skb, sizeof(struct iphdr));
1645 skb->transport_header = skb->network_header;
1646 skb_reset_network_header(skb);
1650 iph->tos = old_iph->tos;
1651 iph->ttl = old_iph->ttl;
1655 iph->protocol = IPPROTO_IPIP;
1657 iph->tot_len = htons(skb->len);
1658 ip_select_ident(net, skb, NULL);
1661 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1665 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1666 struct sk_buff *skb)
1668 struct ip_options *opt = &(IPCB(skb)->opt);
1670 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1671 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1673 if (unlikely(opt->optlen))
1674 ip_forward_options(skb);
1676 return dst_output(net, sk, skb);
1679 /* Processing handlers for ipmr_forward */
1681 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1682 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1684 const struct iphdr *iph = ip_hdr(skb);
1685 struct vif_device *vif = &mrt->vif_table[vifi];
1686 struct net_device *dev;
1694 if (vif->flags & VIFF_REGISTER) {
1696 vif->bytes_out += skb->len;
1697 vif->dev->stats.tx_bytes += skb->len;
1698 vif->dev->stats.tx_packets++;
1699 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1703 if (vif->flags & VIFF_TUNNEL) {
1704 rt = ip_route_output_ports(net, &fl4, NULL,
1705 vif->remote, vif->local,
1708 RT_TOS(iph->tos), vif->link);
1711 encap = sizeof(struct iphdr);
1713 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1716 RT_TOS(iph->tos), vif->link);
1723 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1724 /* Do not fragment multicasts. Alas, IPv4 does not
1725 * allow to send ICMP, so that packets will disappear
1728 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1733 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1735 if (skb_cow(skb, encap)) {
1741 vif->bytes_out += skb->len;
1744 skb_dst_set(skb, &rt->dst);
1745 ip_decrease_ttl(ip_hdr(skb));
1747 /* FIXME: forward and output firewalls used to be called here.
1748 * What do we do with netfilter? -- RR
1750 if (vif->flags & VIFF_TUNNEL) {
1751 ip_encap(net, skb, vif->local, vif->remote);
1752 /* FIXME: extra output firewall step used to be here. --RR */
1753 vif->dev->stats.tx_packets++;
1754 vif->dev->stats.tx_bytes += skb->len;
1757 IPCB(skb)->flags |= IPSKB_FORWARDED;
1759 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1760 * not only before forwarding, but after forwarding on all output
1761 * interfaces. It is clear, if mrouter runs a multicasting
1762 * program, it should receive packets not depending to what interface
1763 * program is joined.
1764 * If we will not make it, the program will have to join on all
1765 * interfaces. On the other hand, multihoming host (or router, but
1766 * not mrouter) cannot join to more than one interface - it will
1767 * result in receiving multiple packets.
1769 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1770 net, NULL, skb, skb->dev, dev,
1771 ipmr_forward_finish);
1778 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1782 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1783 if (mrt->vif_table[ct].dev == dev)
1789 /* "local" means that we should preserve one skb (for local delivery) */
1790 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1791 struct sk_buff *skb, struct mfc_cache *cache,
1796 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1798 vif = cache->mfc_parent;
1799 cache->mfc_un.res.pkt++;
1800 cache->mfc_un.res.bytes += skb->len;
1801 cache->mfc_un.res.lastuse = jiffies;
1803 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1804 struct mfc_cache *cache_proxy;
1806 /* For an (*,G) entry, we only check that the incomming
1807 * interface is part of the static tree.
1809 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1811 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1815 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1816 if (mrt->vif_table[vif].dev != skb->dev) {
1817 if (rt_is_output_route(skb_rtable(skb))) {
1818 /* It is our own packet, looped back.
1819 * Very complicated situation...
1821 * The best workaround until routing daemons will be
1822 * fixed is not to redistribute packet, if it was
1823 * send through wrong interface. It means, that
1824 * multicast applications WILL NOT work for
1825 * (S,G), which have default multicast route pointing
1826 * to wrong oif. In any case, it is not a good
1827 * idea to use multicasting applications on router.
1832 cache->mfc_un.res.wrong_if++;
1834 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1835 /* pimsm uses asserts, when switching from RPT to SPT,
1836 * so that we cannot check that packet arrived on an oif.
1837 * It is bad, but otherwise we would need to move pretty
1838 * large chunk of pimd to kernel. Ough... --ANK
1840 (mrt->mroute_do_pim ||
1841 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1843 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1844 cache->mfc_un.res.last_assert = jiffies;
1845 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1851 mrt->vif_table[vif].pkt_in++;
1852 mrt->vif_table[vif].bytes_in += skb->len;
1854 /* Forward the frame */
1855 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1856 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1857 if (true_vifi >= 0 &&
1858 true_vifi != cache->mfc_parent &&
1860 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1861 /* It's an (*,*) entry and the packet is not coming from
1862 * the upstream: forward the packet to the upstream
1865 psend = cache->mfc_parent;
1870 for (ct = cache->mfc_un.res.maxvif - 1;
1871 ct >= cache->mfc_un.res.minvif; ct--) {
1872 /* For (*,G) entry, don't forward to the incoming interface */
1873 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1875 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1877 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1880 ipmr_queue_xmit(net, mrt, skb2, cache,
1889 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1892 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1894 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1904 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1906 struct rtable *rt = skb_rtable(skb);
1907 struct iphdr *iph = ip_hdr(skb);
1908 struct flowi4 fl4 = {
1909 .daddr = iph->daddr,
1910 .saddr = iph->saddr,
1911 .flowi4_tos = RT_TOS(iph->tos),
1912 .flowi4_oif = (rt_is_output_route(rt) ?
1913 skb->dev->ifindex : 0),
1914 .flowi4_iif = (rt_is_output_route(rt) ?
1917 .flowi4_mark = skb->mark,
1919 struct mr_table *mrt;
1922 err = ipmr_fib_lookup(net, &fl4, &mrt);
1924 return ERR_PTR(err);
1928 /* Multicast packets for forwarding arrive here
1929 * Called with rcu_read_lock();
1931 int ip_mr_input(struct sk_buff *skb)
1933 struct mfc_cache *cache;
1934 struct net *net = dev_net(skb->dev);
1935 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1936 struct mr_table *mrt;
1937 struct net_device *dev;
1939 /* skb->dev passed in is the loX master dev for vrfs.
1940 * As there are no vifs associated with loopback devices,
1941 * get the proper interface that does have a vif associated with it.
1944 if (netif_is_l3_master(skb->dev)) {
1945 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
1952 /* Packet is looped back after forward, it should not be
1953 * forwarded second time, but still can be delivered locally.
1955 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1958 mrt = ipmr_rt_fib_lookup(net, skb);
1961 return PTR_ERR(mrt);
1964 if (IPCB(skb)->opt.router_alert) {
1965 if (ip_call_ra_chain(skb))
1967 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1968 /* IGMPv1 (and broken IGMPv2 implementations sort of
1969 * Cisco IOS <= 11.2(8)) do not put router alert
1970 * option to IGMP packets destined to routable
1971 * groups. It is very bad, because it means
1972 * that we can forward NO IGMP messages.
1974 struct sock *mroute_sk;
1976 mroute_sk = rcu_dereference(mrt->mroute_sk);
1979 raw_rcv(mroute_sk, skb);
1985 /* already under rcu_read_lock() */
1986 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1988 int vif = ipmr_find_vif(mrt, dev);
1991 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
1995 /* No usable cache entry */
2000 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2001 ip_local_deliver(skb);
2007 read_lock(&mrt_lock);
2008 vif = ipmr_find_vif(mrt, dev);
2010 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
2011 read_unlock(&mrt_lock);
2015 read_unlock(&mrt_lock);
2020 read_lock(&mrt_lock);
2021 ip_mr_forward(net, mrt, skb, cache, local);
2022 read_unlock(&mrt_lock);
2025 return ip_local_deliver(skb);
2031 return ip_local_deliver(skb);
2036 #ifdef CONFIG_IP_PIMSM_V1
2037 /* Handle IGMP messages of PIMv1 */
2038 int pim_rcv_v1(struct sk_buff *skb)
2040 struct igmphdr *pim;
2041 struct net *net = dev_net(skb->dev);
2042 struct mr_table *mrt;
2044 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2047 pim = igmp_hdr(skb);
2049 mrt = ipmr_rt_fib_lookup(net, skb);
2052 if (!mrt->mroute_do_pim ||
2053 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2056 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2064 #ifdef CONFIG_IP_PIMSM_V2
2065 static int pim_rcv(struct sk_buff *skb)
2067 struct pimreghdr *pim;
2068 struct net *net = dev_net(skb->dev);
2069 struct mr_table *mrt;
2071 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2074 pim = (struct pimreghdr *)skb_transport_header(skb);
2075 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2076 (pim->flags & PIM_NULL_REGISTER) ||
2077 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2078 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2081 mrt = ipmr_rt_fib_lookup(net, skb);
2084 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2092 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2093 struct mfc_cache *c, struct rtmsg *rtm)
2095 struct rta_mfc_stats mfcs;
2096 struct nlattr *mp_attr;
2097 struct rtnexthop *nhp;
2098 unsigned long lastuse;
2101 /* If cache is unresolved, don't try to parse IIF and OIF */
2102 if (c->mfc_parent >= MAXVIFS)
2105 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2106 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2109 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2112 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2113 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2114 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2115 nla_nest_cancel(skb, mp_attr);
2119 nhp->rtnh_flags = 0;
2120 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2121 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2122 nhp->rtnh_len = sizeof(*nhp);
2126 nla_nest_end(skb, mp_attr);
2128 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2129 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2131 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2132 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2133 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2134 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2135 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2139 rtm->rtm_type = RTN_MULTICAST;
2143 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2144 __be32 saddr, __be32 daddr,
2145 struct rtmsg *rtm, int nowait, u32 portid)
2147 struct mfc_cache *cache;
2148 struct mr_table *mrt;
2151 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2156 cache = ipmr_cache_find(mrt, saddr, daddr);
2157 if (!cache && skb->dev) {
2158 int vif = ipmr_find_vif(mrt, skb->dev);
2161 cache = ipmr_cache_find_any(mrt, daddr, vif);
2164 struct sk_buff *skb2;
2166 struct net_device *dev;
2175 read_lock(&mrt_lock);
2177 vif = ipmr_find_vif(mrt, dev);
2179 read_unlock(&mrt_lock);
2183 skb2 = skb_clone(skb, GFP_ATOMIC);
2185 read_unlock(&mrt_lock);
2190 NETLINK_CB(skb2).portid = portid;
2191 skb_push(skb2, sizeof(struct iphdr));
2192 skb_reset_network_header(skb2);
2194 iph->ihl = sizeof(struct iphdr) >> 2;
2198 err = ipmr_cache_unresolved(mrt, vif, skb2);
2199 read_unlock(&mrt_lock);
2204 read_lock(&mrt_lock);
2205 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2206 read_unlock(&mrt_lock);
2211 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2212 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2215 struct nlmsghdr *nlh;
2219 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2223 rtm = nlmsg_data(nlh);
2224 rtm->rtm_family = RTNL_FAMILY_IPMR;
2225 rtm->rtm_dst_len = 32;
2226 rtm->rtm_src_len = 32;
2228 rtm->rtm_table = mrt->id;
2229 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2230 goto nla_put_failure;
2231 rtm->rtm_type = RTN_MULTICAST;
2232 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2233 if (c->mfc_flags & MFC_STATIC)
2234 rtm->rtm_protocol = RTPROT_STATIC;
2236 rtm->rtm_protocol = RTPROT_MROUTED;
2239 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2240 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2241 goto nla_put_failure;
2242 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2243 /* do not break the dump if cache is unresolved */
2244 if (err < 0 && err != -ENOENT)
2245 goto nla_put_failure;
2247 nlmsg_end(skb, nlh);
2251 nlmsg_cancel(skb, nlh);
2255 static size_t mroute_msgsize(bool unresolved, int maxvif)
2258 NLMSG_ALIGN(sizeof(struct rtmsg))
2259 + nla_total_size(4) /* RTA_TABLE */
2260 + nla_total_size(4) /* RTA_SRC */
2261 + nla_total_size(4) /* RTA_DST */
2266 + nla_total_size(4) /* RTA_IIF */
2267 + nla_total_size(0) /* RTA_MULTIPATH */
2268 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2270 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2276 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2279 struct net *net = read_pnet(&mrt->net);
2280 struct sk_buff *skb;
2283 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2288 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2292 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2298 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2301 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2303 struct net *net = sock_net(skb->sk);
2304 struct mr_table *mrt;
2305 struct mfc_cache *mfc;
2306 unsigned int t = 0, s_t;
2307 unsigned int h = 0, s_h;
2308 unsigned int e = 0, s_e;
2315 ipmr_for_each_table(mrt, net) {
2320 for (h = s_h; h < MFC_LINES; h++) {
2321 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2324 if (ipmr_fill_mroute(mrt, skb,
2325 NETLINK_CB(cb->skb).portid,
2335 spin_lock_bh(&mfc_unres_lock);
2336 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2339 if (ipmr_fill_mroute(mrt, skb,
2340 NETLINK_CB(cb->skb).portid,
2344 spin_unlock_bh(&mfc_unres_lock);
2350 spin_unlock_bh(&mfc_unres_lock);
2366 static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2367 [RTA_SRC] = { .type = NLA_U32 },
2368 [RTA_DST] = { .type = NLA_U32 },
2369 [RTA_IIF] = { .type = NLA_U32 },
2370 [RTA_TABLE] = { .type = NLA_U32 },
2371 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2374 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2376 switch (rtm_protocol) {
2378 case RTPROT_MROUTED:
2384 static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2386 struct rtnexthop *rtnh = nla_data(nla);
2387 int remaining = nla_len(nla), vifi = 0;
2389 while (rtnh_ok(rtnh, remaining)) {
2390 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2391 if (++vifi == MAXVIFS)
2393 rtnh = rtnh_next(rtnh, &remaining);
2396 return remaining > 0 ? -EINVAL : vifi;
2399 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2400 static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2401 struct mfcctl *mfcc, int *mrtsock,
2402 struct mr_table **mrtret)
2404 struct net_device *dev = NULL;
2405 u32 tblid = RT_TABLE_DEFAULT;
2406 struct mr_table *mrt;
2407 struct nlattr *attr;
2411 ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy);
2414 rtm = nlmsg_data(nlh);
2417 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2418 rtm->rtm_type != RTN_MULTICAST ||
2419 rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2420 !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2423 memset(mfcc, 0, sizeof(*mfcc));
2424 mfcc->mfcc_parent = -1;
2426 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2427 switch (nla_type(attr)) {
2429 mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2432 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2435 dev = __dev_get_by_index(net, nla_get_u32(attr));
2442 if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2451 tblid = nla_get_u32(attr);
2455 mrt = ipmr_get_table(net, tblid);
2461 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2463 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2469 /* takes care of both newroute and delroute */
2470 static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh)
2472 struct net *net = sock_net(skb->sk);
2473 int ret, mrtsock, parent;
2474 struct mr_table *tbl;
2479 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl);
2483 parent = ret ? mfcc.mfcc_parent : -1;
2484 if (nlh->nlmsg_type == RTM_NEWROUTE)
2485 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2487 return ipmr_mfc_delete(tbl, &mfcc, parent);
2490 #ifdef CONFIG_PROC_FS
2491 /* The /proc interfaces to multicast routing :
2492 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2494 struct ipmr_vif_iter {
2495 struct seq_net_private p;
2496 struct mr_table *mrt;
2500 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2501 struct ipmr_vif_iter *iter,
2504 struct mr_table *mrt = iter->mrt;
2506 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2507 if (!VIF_EXISTS(mrt, iter->ct))
2510 return &mrt->vif_table[iter->ct];
2515 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2516 __acquires(mrt_lock)
2518 struct ipmr_vif_iter *iter = seq->private;
2519 struct net *net = seq_file_net(seq);
2520 struct mr_table *mrt;
2522 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2524 return ERR_PTR(-ENOENT);
2528 read_lock(&mrt_lock);
2529 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2533 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2535 struct ipmr_vif_iter *iter = seq->private;
2536 struct net *net = seq_file_net(seq);
2537 struct mr_table *mrt = iter->mrt;
2540 if (v == SEQ_START_TOKEN)
2541 return ipmr_vif_seq_idx(net, iter, 0);
2543 while (++iter->ct < mrt->maxvif) {
2544 if (!VIF_EXISTS(mrt, iter->ct))
2546 return &mrt->vif_table[iter->ct];
2551 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2552 __releases(mrt_lock)
2554 read_unlock(&mrt_lock);
2557 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2559 struct ipmr_vif_iter *iter = seq->private;
2560 struct mr_table *mrt = iter->mrt;
2562 if (v == SEQ_START_TOKEN) {
2564 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2566 const struct vif_device *vif = v;
2567 const char *name = vif->dev ? vif->dev->name : "none";
2570 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2571 vif - mrt->vif_table,
2572 name, vif->bytes_in, vif->pkt_in,
2573 vif->bytes_out, vif->pkt_out,
2574 vif->flags, vif->local, vif->remote);
2579 static const struct seq_operations ipmr_vif_seq_ops = {
2580 .start = ipmr_vif_seq_start,
2581 .next = ipmr_vif_seq_next,
2582 .stop = ipmr_vif_seq_stop,
2583 .show = ipmr_vif_seq_show,
2586 static int ipmr_vif_open(struct inode *inode, struct file *file)
2588 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2589 sizeof(struct ipmr_vif_iter));
2592 static const struct file_operations ipmr_vif_fops = {
2593 .owner = THIS_MODULE,
2594 .open = ipmr_vif_open,
2596 .llseek = seq_lseek,
2597 .release = seq_release_net,
2600 struct ipmr_mfc_iter {
2601 struct seq_net_private p;
2602 struct mr_table *mrt;
2603 struct list_head *cache;
2608 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2609 struct ipmr_mfc_iter *it, loff_t pos)
2611 struct mr_table *mrt = it->mrt;
2612 struct mfc_cache *mfc;
2615 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2616 it->cache = &mrt->mfc_cache_array[it->ct];
2617 list_for_each_entry_rcu(mfc, it->cache, list)
2623 spin_lock_bh(&mfc_unres_lock);
2624 it->cache = &mrt->mfc_unres_queue;
2625 list_for_each_entry(mfc, it->cache, list)
2628 spin_unlock_bh(&mfc_unres_lock);
2635 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2637 struct ipmr_mfc_iter *it = seq->private;
2638 struct net *net = seq_file_net(seq);
2639 struct mr_table *mrt;
2641 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2643 return ERR_PTR(-ENOENT);
2648 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2652 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2654 struct mfc_cache *mfc = v;
2655 struct ipmr_mfc_iter *it = seq->private;
2656 struct net *net = seq_file_net(seq);
2657 struct mr_table *mrt = it->mrt;
2661 if (v == SEQ_START_TOKEN)
2662 return ipmr_mfc_seq_idx(net, seq->private, 0);
2664 if (mfc->list.next != it->cache)
2665 return list_entry(mfc->list.next, struct mfc_cache, list);
2667 if (it->cache == &mrt->mfc_unres_queue)
2670 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2672 while (++it->ct < MFC_LINES) {
2673 it->cache = &mrt->mfc_cache_array[it->ct];
2674 if (list_empty(it->cache))
2676 return list_first_entry(it->cache, struct mfc_cache, list);
2679 /* exhausted cache_array, show unresolved */
2681 it->cache = &mrt->mfc_unres_queue;
2684 spin_lock_bh(&mfc_unres_lock);
2685 if (!list_empty(it->cache))
2686 return list_first_entry(it->cache, struct mfc_cache, list);
2689 spin_unlock_bh(&mfc_unres_lock);
2695 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2697 struct ipmr_mfc_iter *it = seq->private;
2698 struct mr_table *mrt = it->mrt;
2700 if (it->cache == &mrt->mfc_unres_queue)
2701 spin_unlock_bh(&mfc_unres_lock);
2702 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2706 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2710 if (v == SEQ_START_TOKEN) {
2712 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2714 const struct mfc_cache *mfc = v;
2715 const struct ipmr_mfc_iter *it = seq->private;
2716 const struct mr_table *mrt = it->mrt;
2718 seq_printf(seq, "%08X %08X %-3hd",
2719 (__force u32) mfc->mfc_mcastgrp,
2720 (__force u32) mfc->mfc_origin,
2723 if (it->cache != &mrt->mfc_unres_queue) {
2724 seq_printf(seq, " %8lu %8lu %8lu",
2725 mfc->mfc_un.res.pkt,
2726 mfc->mfc_un.res.bytes,
2727 mfc->mfc_un.res.wrong_if);
2728 for (n = mfc->mfc_un.res.minvif;
2729 n < mfc->mfc_un.res.maxvif; n++) {
2730 if (VIF_EXISTS(mrt, n) &&
2731 mfc->mfc_un.res.ttls[n] < 255)
2734 n, mfc->mfc_un.res.ttls[n]);
2737 /* unresolved mfc_caches don't contain
2738 * pkt, bytes and wrong_if values
2740 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2742 seq_putc(seq, '\n');
2747 static const struct seq_operations ipmr_mfc_seq_ops = {
2748 .start = ipmr_mfc_seq_start,
2749 .next = ipmr_mfc_seq_next,
2750 .stop = ipmr_mfc_seq_stop,
2751 .show = ipmr_mfc_seq_show,
2754 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2756 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2757 sizeof(struct ipmr_mfc_iter));
2760 static const struct file_operations ipmr_mfc_fops = {
2761 .owner = THIS_MODULE,
2762 .open = ipmr_mfc_open,
2764 .llseek = seq_lseek,
2765 .release = seq_release_net,
2769 #ifdef CONFIG_IP_PIMSM_V2
2770 static const struct net_protocol pim_protocol = {
2776 /* Setup for IP multicast routing */
2777 static int __net_init ipmr_net_init(struct net *net)
2781 err = ipmr_rules_init(net);
2785 #ifdef CONFIG_PROC_FS
2787 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2789 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2790 goto proc_cache_fail;
2794 #ifdef CONFIG_PROC_FS
2796 remove_proc_entry("ip_mr_vif", net->proc_net);
2798 ipmr_rules_exit(net);
2804 static void __net_exit ipmr_net_exit(struct net *net)
2806 #ifdef CONFIG_PROC_FS
2807 remove_proc_entry("ip_mr_cache", net->proc_net);
2808 remove_proc_entry("ip_mr_vif", net->proc_net);
2810 ipmr_rules_exit(net);
2813 static struct pernet_operations ipmr_net_ops = {
2814 .init = ipmr_net_init,
2815 .exit = ipmr_net_exit,
2818 int __init ip_mr_init(void)
2822 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2823 sizeof(struct mfc_cache),
2824 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2827 err = register_pernet_subsys(&ipmr_net_ops);
2829 goto reg_pernet_fail;
2831 err = register_netdevice_notifier(&ip_mr_notifier);
2833 goto reg_notif_fail;
2834 #ifdef CONFIG_IP_PIMSM_V2
2835 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2836 pr_err("%s: can't add PIM protocol\n", __func__);
2838 goto add_proto_fail;
2841 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2842 NULL, ipmr_rtm_dumproute, NULL);
2843 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
2844 ipmr_rtm_route, NULL, NULL);
2845 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
2846 ipmr_rtm_route, NULL, NULL);
2849 #ifdef CONFIG_IP_PIMSM_V2
2851 unregister_netdevice_notifier(&ip_mr_notifier);
2854 unregister_pernet_subsys(&ipmr_net_ops);
2856 kmem_cache_destroy(mrt_cachep);