1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
14 #include <net/addrconf.h>
17 #include "br_private.h"
19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
20 struct net_device *dev)
22 struct net_bridge *br = netdev_priv(dev);
23 struct net_bridge_port *p;
24 struct nlattr *nest, *port_nest;
26 if (!br->multicast_router || hlist_empty(&br->router_list))
29 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
36 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
41 br_timer_value(&p->multicast_router_timer)) ||
42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
43 p->multicast_router)) {
44 nla_nest_cancel(skb, port_nest);
47 nla_nest_end(skb, port_nest);
50 nla_nest_end(skb, nest);
53 nla_nest_cancel(skb, nest);
57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
59 e->state = flags & MDB_PG_FLAGS_PERMANENT;
61 if (flags & MDB_PG_FLAGS_OFFLOAD)
62 e->flags |= MDB_FLAGS_OFFLOAD;
63 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
64 e->flags |= MDB_FLAGS_FAST_LEAVE;
65 if (flags & MDB_PG_FLAGS_STAR_EXCL)
66 e->flags |= MDB_FLAGS_STAR_EXCL;
67 if (flags & MDB_PG_FLAGS_BLOCKED)
68 e->flags |= MDB_FLAGS_BLOCKED;
71 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
72 struct nlattr **mdb_attrs)
74 memset(ip, 0, sizeof(struct br_ip));
76 ip->proto = entry->addr.proto;
79 ip->dst.ip4 = entry->addr.u.ip4;
80 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
81 ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
83 #if IS_ENABLED(CONFIG_IPV6)
84 case htons(ETH_P_IPV6):
85 ip->dst.ip6 = entry->addr.u.ip6;
86 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
87 ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
91 ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
96 static int __mdb_fill_srcs(struct sk_buff *skb,
97 struct net_bridge_port_group *p)
99 struct net_bridge_group_src *ent;
100 struct nlattr *nest, *nest_ent;
102 if (hlist_empty(&p->src_list))
105 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
109 hlist_for_each_entry_rcu(ent, &p->src_list, node,
110 lockdep_is_held(&p->key.port->br->multicast_lock)) {
111 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
114 switch (ent->addr.proto) {
115 case htons(ETH_P_IP):
116 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
117 ent->addr.src.ip4)) {
118 nla_nest_cancel(skb, nest_ent);
122 #if IS_ENABLED(CONFIG_IPV6)
123 case htons(ETH_P_IPV6):
124 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
125 &ent->addr.src.ip6)) {
126 nla_nest_cancel(skb, nest_ent);
132 nla_nest_cancel(skb, nest_ent);
135 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
136 br_timer_value(&ent->timer))) {
137 nla_nest_cancel(skb, nest_ent);
140 nla_nest_end(skb, nest_ent);
143 nla_nest_end(skb, nest);
148 nla_nest_cancel(skb, nest);
152 static int __mdb_fill_info(struct sk_buff *skb,
153 struct net_bridge_mdb_entry *mp,
154 struct net_bridge_port_group *p)
156 bool dump_srcs_mode = false;
157 struct timer_list *mtimer;
158 struct nlattr *nest_ent;
159 struct br_mdb_entry e;
163 memset(&e, 0, sizeof(e));
165 ifindex = p->key.port->dev->ifindex;
169 ifindex = mp->br->dev->ifindex;
173 __mdb_entry_fill_flags(&e, flags);
175 e.vid = mp->addr.vid;
176 if (mp->addr.proto == htons(ETH_P_IP))
177 e.addr.u.ip4 = mp->addr.dst.ip4;
178 #if IS_ENABLED(CONFIG_IPV6)
179 else if (mp->addr.proto == htons(ETH_P_IPV6))
180 e.addr.u.ip6 = mp->addr.dst.ip6;
183 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
184 e.addr.proto = mp->addr.proto;
185 nest_ent = nla_nest_start_noflag(skb,
186 MDBA_MDB_ENTRY_INFO);
190 if (nla_put_nohdr(skb, sizeof(e), &e) ||
192 MDBA_MDB_EATTR_TIMER,
193 br_timer_value(mtimer)))
196 switch (mp->addr.proto) {
197 case htons(ETH_P_IP):
198 dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
199 if (mp->addr.src.ip4) {
200 if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
206 #if IS_ENABLED(CONFIG_IPV6)
207 case htons(ETH_P_IPV6):
208 dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
209 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
210 if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
218 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
221 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
223 if (dump_srcs_mode &&
224 (__mdb_fill_srcs(skb, p) ||
225 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
229 nla_nest_end(skb, nest_ent);
234 nla_nest_cancel(skb, nest_ent);
238 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
239 struct net_device *dev)
241 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
242 struct net_bridge *br = netdev_priv(dev);
243 struct net_bridge_mdb_entry *mp;
244 struct nlattr *nest, *nest2;
246 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
249 nest = nla_nest_start_noflag(skb, MDBA_MDB);
253 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
254 struct net_bridge_port_group *p;
255 struct net_bridge_port_group __rcu **pp;
260 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
266 if (!s_pidx && mp->host_joined) {
267 err = __mdb_fill_info(skb, mp, NULL);
269 nla_nest_cancel(skb, nest2);
274 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
281 err = __mdb_fill_info(skb, mp, p);
283 nla_nest_end(skb, nest2);
291 nla_nest_end(skb, nest2);
299 nla_nest_end(skb, nest);
303 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
304 struct netlink_ext_ack *extack)
306 struct br_port_msg *bpm;
308 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
309 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
313 bpm = nlmsg_data(nlh);
315 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
318 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
319 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
326 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
328 struct net_device *dev;
329 struct net *net = sock_net(skb->sk);
330 struct nlmsghdr *nlh = NULL;
333 if (cb->strict_check) {
334 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
344 cb->seq = net->dev_base_seq;
346 for_each_netdev_rcu(net, dev) {
347 if (dev->priv_flags & IFF_EBRIDGE) {
348 struct br_port_msg *bpm;
353 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
354 cb->nlh->nlmsg_seq, RTM_GETMDB,
355 sizeof(*bpm), NLM_F_MULTI);
359 bpm = nlmsg_data(nlh);
360 memset(bpm, 0, sizeof(*bpm));
361 bpm->ifindex = dev->ifindex;
362 if (br_mdb_fill_info(skb, cb, dev) < 0)
364 if (br_rports_fill_info(skb, cb, dev) < 0)
382 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
383 struct net_device *dev,
384 struct net_bridge_mdb_entry *mp,
385 struct net_bridge_port_group *pg,
388 struct nlmsghdr *nlh;
389 struct br_port_msg *bpm;
390 struct nlattr *nest, *nest2;
392 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
396 bpm = nlmsg_data(nlh);
397 memset(bpm, 0, sizeof(*bpm));
398 bpm->family = AF_BRIDGE;
399 bpm->ifindex = dev->ifindex;
400 nest = nla_nest_start_noflag(skb, MDBA_MDB);
403 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
407 if (__mdb_fill_info(skb, mp, pg))
410 nla_nest_end(skb, nest2);
411 nla_nest_end(skb, nest);
416 nla_nest_end(skb, nest);
418 nlmsg_cancel(skb, nlh);
422 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
424 size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
425 nla_total_size(sizeof(struct br_mdb_entry)) +
426 nla_total_size(sizeof(u32));
427 struct net_bridge_group_src *ent;
428 size_t addr_size = 0;
433 /* MDBA_MDB_EATTR_RTPROT */
434 nlmsg_size += nla_total_size(sizeof(u8));
436 switch (pg->key.addr.proto) {
437 case htons(ETH_P_IP):
438 /* MDBA_MDB_EATTR_SOURCE */
439 if (pg->key.addr.src.ip4)
440 nlmsg_size += nla_total_size(sizeof(__be32));
441 if (pg->key.port->br->multicast_igmp_version == 2)
443 addr_size = sizeof(__be32);
445 #if IS_ENABLED(CONFIG_IPV6)
446 case htons(ETH_P_IPV6):
447 /* MDBA_MDB_EATTR_SOURCE */
448 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
449 nlmsg_size += nla_total_size(sizeof(struct in6_addr));
450 if (pg->key.port->br->multicast_mld_version == 1)
452 addr_size = sizeof(struct in6_addr);
457 /* MDBA_MDB_EATTR_GROUP_MODE */
458 nlmsg_size += nla_total_size(sizeof(u8));
460 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
461 if (!hlist_empty(&pg->src_list))
462 nlmsg_size += nla_total_size(0);
464 hlist_for_each_entry(ent, &pg->src_list, node) {
465 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
466 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
468 nlmsg_size += nla_total_size(0) +
469 nla_total_size(addr_size) +
470 nla_total_size(sizeof(u32));
476 struct br_mdb_complete_info {
477 struct net_bridge_port *port;
481 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
483 struct br_mdb_complete_info *data = priv;
484 struct net_bridge_port_group __rcu **pp;
485 struct net_bridge_port_group *p;
486 struct net_bridge_mdb_entry *mp;
487 struct net_bridge_port *port = data->port;
488 struct net_bridge *br = port->br;
493 spin_lock_bh(&br->multicast_lock);
494 mp = br_mdb_ip_get(br, &data->ip);
497 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
499 if (p->key.port != port)
501 p->flags |= MDB_PG_FLAGS_OFFLOAD;
504 spin_unlock_bh(&br->multicast_lock);
509 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
510 const struct net_bridge_mdb_entry *mp)
512 if (mp->addr.proto == htons(ETH_P_IP))
513 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
514 #if IS_ENABLED(CONFIG_IPV6)
515 else if (mp->addr.proto == htons(ETH_P_IPV6))
516 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
519 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
521 mdb->vid = mp->addr.vid;
524 static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
525 struct switchdev_obj_port_mdb *mdb,
526 struct netlink_ext_ack *extack)
528 struct switchdev_notifier_port_obj_info obj_info = {
537 err = nb->notifier_call(nb, SWITCHDEV_PORT_OBJ_ADD, &obj_info);
538 return notifier_to_errno(err);
541 static int br_mdb_queue_one(struct list_head *mdb_list,
542 enum switchdev_obj_id id,
543 const struct net_bridge_mdb_entry *mp,
544 struct net_device *orig_dev)
546 struct switchdev_obj_port_mdb *mdb;
548 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
553 mdb->obj.orig_dev = orig_dev;
554 br_switchdev_mdb_populate(mdb, mp);
555 list_add_tail(&mdb->obj.list, mdb_list);
560 int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
561 struct notifier_block *nb, struct netlink_ext_ack *extack)
563 struct net_bridge_mdb_entry *mp;
564 struct switchdev_obj *obj, *tmp;
565 struct net_bridge *br;
571 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
574 br = netdev_priv(br_dev);
576 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
579 /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
580 * because the write-side protection is br->multicast_lock. But we
581 * need to emulate the [ blocking ] calling context of a regular
582 * switchdev event, so since both br->multicast_lock and RCU read side
583 * critical sections are atomic, we have no choice but to pick the RCU
584 * read side lock, queue up all our events, leave the critical section
585 * and notify switchdev from blocking context.
589 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
590 struct net_bridge_port_group __rcu **pp;
591 struct net_bridge_port_group *p;
593 if (mp->host_joined) {
594 err = br_mdb_queue_one(&mdb_list,
595 SWITCHDEV_OBJ_ID_HOST_MDB,
603 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
605 if (p->key.port->dev != dev)
608 err = br_mdb_queue_one(&mdb_list,
609 SWITCHDEV_OBJ_ID_PORT_MDB,
620 list_for_each_entry(obj, &mdb_list, list) {
621 err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj),
628 list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
629 list_del(&obj->list);
630 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
635 EXPORT_SYMBOL_GPL(br_mdb_replay);
637 static void br_mdb_switchdev_host_port(struct net_device *dev,
638 struct net_device *lower_dev,
639 struct net_bridge_mdb_entry *mp,
642 struct switchdev_obj_port_mdb mdb = {
644 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
645 .flags = SWITCHDEV_F_DEFER,
650 br_switchdev_mdb_populate(&mdb, mp);
654 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
657 switchdev_port_obj_del(lower_dev, &mdb.obj);
662 static void br_mdb_switchdev_host(struct net_device *dev,
663 struct net_bridge_mdb_entry *mp, int type)
665 struct net_device *lower_dev;
666 struct list_head *iter;
668 netdev_for_each_lower_dev(dev, lower_dev, iter)
669 br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
672 void br_mdb_notify(struct net_device *dev,
673 struct net_bridge_mdb_entry *mp,
674 struct net_bridge_port_group *pg,
677 struct br_mdb_complete_info *complete_info;
678 struct switchdev_obj_port_mdb mdb = {
680 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
681 .flags = SWITCHDEV_F_DEFER,
684 struct net *net = dev_net(dev);
689 br_switchdev_mdb_populate(&mdb, mp);
691 mdb.obj.orig_dev = pg->key.port->dev;
694 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
697 complete_info->port = pg->key.port;
698 complete_info->ip = mp->addr;
699 mdb.obj.complete_priv = complete_info;
700 mdb.obj.complete = br_mdb_complete;
701 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
702 kfree(complete_info);
705 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
709 br_mdb_switchdev_host(dev, mp, type);
712 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
716 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
722 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
725 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
728 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
729 struct net_device *dev,
730 int ifindex, u32 pid,
731 u32 seq, int type, unsigned int flags)
733 struct br_port_msg *bpm;
734 struct nlmsghdr *nlh;
737 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
741 bpm = nlmsg_data(nlh);
742 memset(bpm, 0, sizeof(*bpm));
743 bpm->family = AF_BRIDGE;
744 bpm->ifindex = dev->ifindex;
745 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
749 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
752 nla_nest_end(skb, nest);
757 nla_nest_end(skb, nest);
759 nlmsg_cancel(skb, nlh);
763 static inline size_t rtnl_rtr_nlmsg_size(void)
765 return NLMSG_ALIGN(sizeof(struct br_port_msg))
766 + nla_total_size(sizeof(__u32));
769 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
772 struct net *net = dev_net(dev);
777 ifindex = port ? port->dev->ifindex : 0;
778 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
782 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
788 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
792 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
795 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
796 struct netlink_ext_ack *extack)
798 if (entry->ifindex == 0) {
799 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
803 if (entry->addr.proto == htons(ETH_P_IP)) {
804 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
805 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
808 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
809 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
812 #if IS_ENABLED(CONFIG_IPV6)
813 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
814 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
815 NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
819 } else if (entry->addr.proto == 0) {
821 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
822 NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
826 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
830 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
831 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
834 if (entry->vid >= VLAN_VID_MASK) {
835 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
842 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
843 struct netlink_ext_ack *extack)
846 case htons(ETH_P_IP):
847 if (nla_len(attr) != sizeof(struct in_addr)) {
848 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
851 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
852 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
856 #if IS_ENABLED(CONFIG_IPV6)
857 case htons(ETH_P_IPV6): {
860 if (nla_len(attr) != sizeof(struct in6_addr)) {
861 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
864 src = nla_get_in6_addr(attr);
865 if (ipv6_addr_is_multicast(&src)) {
866 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
873 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
880 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
881 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
882 sizeof(struct in_addr),
883 sizeof(struct in6_addr)),
886 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
887 struct net_device **pdev, struct br_mdb_entry **pentry,
888 struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
890 struct net *net = sock_net(skb->sk);
891 struct br_mdb_entry *entry;
892 struct br_port_msg *bpm;
893 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
894 struct net_device *dev;
897 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
898 MDBA_SET_ENTRY_MAX, NULL, NULL);
902 bpm = nlmsg_data(nlh);
903 if (bpm->ifindex == 0) {
904 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
908 dev = __dev_get_by_index(net, bpm->ifindex);
910 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
914 if (!(dev->priv_flags & IFF_EBRIDGE)) {
915 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
921 if (!tb[MDBA_SET_ENTRY]) {
922 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
925 if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
926 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
930 entry = nla_data(tb[MDBA_SET_ENTRY]);
931 if (!is_valid_mdb_entry(entry, extack))
935 if (tb[MDBA_SET_ENTRY_ATTRS]) {
936 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
937 tb[MDBA_SET_ENTRY_ATTRS],
938 br_mdbe_attrs_pol, extack);
941 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
942 !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
943 entry->addr.proto, extack))
947 sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
953 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
954 struct br_mdb_entry *entry,
955 struct nlattr **mdb_attrs,
956 struct netlink_ext_ack *extack)
958 struct net_bridge_mdb_entry *mp, *star_mp;
959 struct net_bridge_port_group *p;
960 struct net_bridge_port_group __rcu **pp;
961 struct br_ip group, star_group;
962 unsigned long now = jiffies;
963 unsigned char flags = 0;
967 __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
969 /* host join errors which can happen before creating the group */
971 /* don't allow any flags for host-joined groups */
973 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
976 if (!br_multicast_is_star_g(&group)) {
977 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
982 if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
983 NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
987 mp = br_mdb_ip_get(br, &group);
989 mp = br_multicast_new_group(br, &group);
990 err = PTR_ERR_OR_ZERO(mp);
997 if (mp->host_joined) {
998 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1002 br_multicast_host_join(mp, false);
1003 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1008 for (pp = &mp->ports;
1009 (p = mlock_dereference(*pp, br)) != NULL;
1011 if (p->key.port == port) {
1012 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
1015 if ((unsigned long)p->key.port < (unsigned long)port)
1019 filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
1022 if (entry->state == MDB_PERMANENT)
1023 flags |= MDB_PG_FLAGS_PERMANENT;
1025 p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
1026 filter_mode, RTPROT_STATIC);
1028 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
1031 rcu_assign_pointer(*pp, p);
1032 if (entry->state == MDB_TEMPORARY)
1033 mod_timer(&p->timer, now + br->multicast_membership_interval);
1034 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
1035 /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
1036 * added to all S,G entries for proper replication, if we are adding
1037 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
1038 * added to it for proper replication
1040 if (br_multicast_should_handle_mode(br, group.proto)) {
1041 switch (filter_mode) {
1043 br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1046 star_group = p->key.addr;
1047 memset(&star_group.src, 0, sizeof(star_group.src));
1048 star_mp = br_mdb_ip_get(br, &star_group);
1050 br_multicast_sg_add_exclude_ports(star_mp, p);
1058 static int __br_mdb_add(struct net *net, struct net_bridge *br,
1059 struct net_bridge_port *p,
1060 struct br_mdb_entry *entry,
1061 struct nlattr **mdb_attrs,
1062 struct netlink_ext_ack *extack)
1066 spin_lock_bh(&br->multicast_lock);
1067 ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
1068 spin_unlock_bh(&br->multicast_lock);
1073 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1074 struct netlink_ext_ack *extack)
1076 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1077 struct net *net = sock_net(skb->sk);
1078 struct net_bridge_vlan_group *vg;
1079 struct net_bridge_port *p = NULL;
1080 struct net_device *dev, *pdev;
1081 struct br_mdb_entry *entry;
1082 struct net_bridge_vlan *v;
1083 struct net_bridge *br;
1086 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1090 br = netdev_priv(dev);
1092 if (!netif_running(br->dev)) {
1093 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1097 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
1098 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1102 if (entry->ifindex != br->dev->ifindex) {
1103 pdev = __dev_get_by_index(net, entry->ifindex);
1105 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1109 p = br_port_get_rtnl(pdev);
1111 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1116 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1119 if (p->state == BR_STATE_DISABLED) {
1120 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
1123 vg = nbp_vlan_group(p);
1125 vg = br_vlan_group(br);
1128 /* If vlan filtering is enabled and VLAN is not specified
1129 * install mdb entry on all vlans configured on the port.
1131 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1132 list_for_each_entry(v, &vg->vlan_list, vlist) {
1133 entry->vid = v->vid;
1134 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1139 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1145 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1146 struct nlattr **mdb_attrs)
1148 struct net_bridge_mdb_entry *mp;
1149 struct net_bridge_port_group *p;
1150 struct net_bridge_port_group __rcu **pp;
1154 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1157 __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1159 spin_lock_bh(&br->multicast_lock);
1160 mp = br_mdb_ip_get(br, &ip);
1165 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1166 br_multicast_host_leave(mp, false);
1168 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1169 if (!mp->ports && netif_running(br->dev))
1170 mod_timer(&mp->timer, jiffies);
1174 for (pp = &mp->ports;
1175 (p = mlock_dereference(*pp, br)) != NULL;
1177 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1180 if (p->key.port->state == BR_STATE_DISABLED)
1183 br_multicast_del_pg(mp, p, pp);
1189 spin_unlock_bh(&br->multicast_lock);
1193 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1194 struct netlink_ext_ack *extack)
1196 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1197 struct net *net = sock_net(skb->sk);
1198 struct net_bridge_vlan_group *vg;
1199 struct net_bridge_port *p = NULL;
1200 struct net_device *dev, *pdev;
1201 struct br_mdb_entry *entry;
1202 struct net_bridge_vlan *v;
1203 struct net_bridge *br;
1206 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1210 br = netdev_priv(dev);
1212 if (entry->ifindex != br->dev->ifindex) {
1213 pdev = __dev_get_by_index(net, entry->ifindex);
1217 p = br_port_get_rtnl(pdev);
1218 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1220 vg = nbp_vlan_group(p);
1222 vg = br_vlan_group(br);
1225 /* If vlan filtering is enabled and VLAN is not specified
1226 * delete mdb entry on all vlans configured on the port.
1228 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1229 list_for_each_entry(v, &vg->vlan_list, vlist) {
1230 entry->vid = v->vid;
1231 err = __br_mdb_del(br, entry, mdb_attrs);
1234 err = __br_mdb_del(br, entry, mdb_attrs);
1240 void br_mdb_init(void)
1242 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1243 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1244 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1247 void br_mdb_uninit(void)
1249 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1250 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1251 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);