1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/netdevice.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/slab.h>
6 #include <net/switchdev.h>
8 #include "br_private.h"
9 #include "br_private_tunnel.h"
11 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
13 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
16 const struct net_bridge_vlan *vle = ptr;
17 u16 vid = *(u16 *)arg->key;
19 return vle->vid != vid;
22 static const struct rhashtable_params br_vlan_rht_params = {
23 .head_offset = offsetof(struct net_bridge_vlan, vnode),
24 .key_offset = offsetof(struct net_bridge_vlan, vid),
25 .key_len = sizeof(u16),
27 .max_size = VLAN_N_VID,
28 .obj_cmpfn = br_vlan_cmp,
29 .automatic_shrinking = true,
32 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
34 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
37 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
48 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
59 /* return true if anything changed, false otherwise */
60 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
62 struct net_bridge_vlan_group *vg;
63 u16 old_flags = v->flags;
66 if (br_vlan_is_master(v))
67 vg = br_vlan_group(v->br);
69 vg = nbp_vlan_group(v->port);
71 if (flags & BRIDGE_VLAN_INFO_PVID)
72 ret = __vlan_add_pvid(vg, v->vid);
74 ret = __vlan_delete_pvid(vg, v->vid);
76 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
77 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
79 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
81 return ret || !!(old_flags ^ v->flags);
84 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
85 struct net_bridge_vlan *v, u16 flags,
86 struct netlink_ext_ack *extack)
90 /* Try switchdev op first. In case it is not supported, fallback to
93 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
94 if (err == -EOPNOTSUPP)
95 return vlan_vid_add(dev, br->vlan_proto, v->vid);
96 v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
100 static void __vlan_add_list(struct net_bridge_vlan *v)
102 struct net_bridge_vlan_group *vg;
103 struct list_head *headp, *hpos;
104 struct net_bridge_vlan *vent;
106 if (br_vlan_is_master(v))
107 vg = br_vlan_group(v->br);
109 vg = nbp_vlan_group(v->port);
111 headp = &vg->vlan_list;
112 list_for_each_prev(hpos, headp) {
113 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
114 if (v->vid < vent->vid)
119 list_add_rcu(&v->vlist, hpos);
122 static void __vlan_del_list(struct net_bridge_vlan *v)
124 list_del_rcu(&v->vlist);
127 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
128 const struct net_bridge_vlan *v)
132 /* Try switchdev op first. In case it is not supported, fallback to
135 err = br_switchdev_port_vlan_del(dev, v->vid);
136 if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
137 vlan_vid_del(dev, br->vlan_proto, v->vid);
138 return err == -EOPNOTSUPP ? 0 : err;
141 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
142 * a reference is taken to the master vlan before returning.
144 static struct net_bridge_vlan *
145 br_vlan_get_master(struct net_bridge *br, u16 vid,
146 struct netlink_ext_ack *extack)
148 struct net_bridge_vlan_group *vg;
149 struct net_bridge_vlan *masterv;
151 vg = br_vlan_group(br);
152 masterv = br_vlan_find(vg, vid);
156 /* missing global ctx, create it now */
157 if (br_vlan_add(br, vid, 0, &changed, extack))
159 masterv = br_vlan_find(vg, vid);
160 if (WARN_ON(!masterv))
162 refcount_set(&masterv->refcnt, 1);
165 refcount_inc(&masterv->refcnt);
170 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
172 struct net_bridge_vlan *v;
174 v = container_of(rcu, struct net_bridge_vlan, rcu);
175 WARN_ON(!br_vlan_is_master(v));
176 free_percpu(v->stats);
181 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
183 struct net_bridge_vlan_group *vg;
185 if (!br_vlan_is_master(masterv))
188 vg = br_vlan_group(masterv->br);
189 if (refcount_dec_and_test(&masterv->refcnt)) {
190 rhashtable_remove_fast(&vg->vlan_hash,
191 &masterv->vnode, br_vlan_rht_params);
192 __vlan_del_list(masterv);
193 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
197 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
199 struct net_bridge_vlan *v;
201 v = container_of(rcu, struct net_bridge_vlan, rcu);
202 WARN_ON(br_vlan_is_master(v));
203 /* if we had per-port stats configured then free them here */
204 if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
205 free_percpu(v->stats);
210 /* This is the shared VLAN add function which works for both ports and bridge
211 * devices. There are four possible calls to this function in terms of the
213 * 1. vlan is being added on a port (no master flags, global entry exists)
214 * 2. vlan is being added on a bridge (both master and brentry flags)
215 * 3. vlan is being added on a port, but a global entry didn't exist which
216 * is being created right now (master flag set, brentry flag unset), the
217 * global entry is used for global per-vlan features, but not for filtering
218 * 4. same as 3 but with both master and brentry flags set so the entry
219 * will be used for filtering in both the port and the bridge
221 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
222 struct netlink_ext_ack *extack)
224 struct net_bridge_vlan *masterv = NULL;
225 struct net_bridge_port *p = NULL;
226 struct net_bridge_vlan_group *vg;
227 struct net_device *dev;
228 struct net_bridge *br;
231 if (br_vlan_is_master(v)) {
234 vg = br_vlan_group(br);
239 vg = nbp_vlan_group(p);
243 /* Add VLAN to the device filter if it is supported.
244 * This ensures tagged traffic enters the bridge when
245 * promiscuous mode is disabled by br_manage_promisc().
247 err = __vlan_vid_add(dev, br, v, flags, extack);
251 /* need to work on the master vlan too */
252 if (flags & BRIDGE_VLAN_INFO_MASTER) {
255 err = br_vlan_add(br, v->vid,
256 flags | BRIDGE_VLAN_INFO_BRENTRY,
262 masterv = br_vlan_get_master(br, v->vid, extack);
268 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
269 v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
274 v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
276 v->stats = masterv->stats;
279 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
280 if (err && err != -EOPNOTSUPP)
284 /* Add the dev mac and count the vlan only if it's usable */
285 if (br_vlan_should_use(v)) {
286 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
288 br_err(br, "failed insert local address into bridge forwarding table\n");
294 err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
300 __vlan_add_flags(v, flags);
303 nbp_vlan_set_vlan_dev_state(p, v->vid);
308 if (br_vlan_should_use(v)) {
309 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
315 __vlan_vid_del(dev, br, v);
317 if (v->stats && masterv->stats != v->stats)
318 free_percpu(v->stats);
321 br_vlan_put_master(masterv);
325 br_switchdev_port_vlan_del(dev, v->vid);
331 static int __vlan_del(struct net_bridge_vlan *v)
333 struct net_bridge_vlan *masterv = v;
334 struct net_bridge_vlan_group *vg;
335 struct net_bridge_port *p = NULL;
338 if (br_vlan_is_master(v)) {
339 vg = br_vlan_group(v->br);
342 vg = nbp_vlan_group(v->port);
346 __vlan_delete_pvid(vg, v->vid);
348 err = __vlan_vid_del(p->dev, p->br, v);
352 err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
353 if (err && err != -EOPNOTSUPP)
358 if (br_vlan_should_use(v)) {
359 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
364 vlan_tunnel_info_del(vg, v);
365 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
368 nbp_vlan_set_vlan_dev_state(p, v->vid);
369 call_rcu(&v->rcu, nbp_vlan_rcu_free);
372 br_vlan_put_master(masterv);
377 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
379 WARN_ON(!list_empty(&vg->vlan_list));
380 rhashtable_destroy(&vg->vlan_hash);
381 vlan_tunnel_deinit(vg);
385 static void __vlan_flush(struct net_bridge_vlan_group *vg)
387 struct net_bridge_vlan *vlan, *tmp;
389 __vlan_delete_pvid(vg, vg->pvid);
390 list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
394 struct sk_buff *br_handle_vlan(struct net_bridge *br,
395 const struct net_bridge_port *p,
396 struct net_bridge_vlan_group *vg,
399 struct br_vlan_stats *stats;
400 struct net_bridge_vlan *v;
403 /* If this packet was not filtered at input, let it pass */
404 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
407 /* At this point, we know that the frame was filtered and contains
408 * a valid vlan id. If the vlan id has untagged flag set,
409 * send untagged; otherwise, send tagged.
411 br_vlan_get_tag(skb, &vid);
412 v = br_vlan_find(vg, vid);
413 /* Vlan entry must be configured at this point. The
414 * only exception is the bridge is set in promisc mode and the
415 * packet is destined for the bridge device. In this case
416 * pass the packet as is.
418 if (!v || !br_vlan_should_use(v)) {
419 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
426 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
427 stats = this_cpu_ptr(v->stats);
428 u64_stats_update_begin(&stats->syncp);
429 stats->tx_bytes += skb->len;
431 u64_stats_update_end(&stats->syncp);
434 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
435 __vlan_hwaccel_clear_tag(skb);
437 if (p && (p->flags & BR_VLAN_TUNNEL) &&
438 br_handle_egress_vlan_tunnel(skb, v)) {
446 /* Called under RCU */
447 static bool __allowed_ingress(const struct net_bridge *br,
448 struct net_bridge_vlan_group *vg,
449 struct sk_buff *skb, u16 *vid)
451 struct br_vlan_stats *stats;
452 struct net_bridge_vlan *v;
455 BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
456 /* If vlan tx offload is disabled on bridge device and frame was
457 * sent from vlan device on the bridge device, it does not have
458 * HW accelerated vlan tag.
460 if (unlikely(!skb_vlan_tag_present(skb) &&
461 skb->protocol == br->vlan_proto)) {
462 skb = skb_vlan_untag(skb);
467 if (!br_vlan_get_tag(skb, vid)) {
469 if (skb->vlan_proto != br->vlan_proto) {
470 /* Protocol-mismatch, empty out vlan_tci for new tag */
471 skb_push(skb, ETH_HLEN);
472 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
473 skb_vlan_tag_get(skb));
477 skb_pull(skb, ETH_HLEN);
478 skb_reset_mac_len(skb);
490 u16 pvid = br_get_pvid(vg);
492 /* Frame had a tag with VID 0 or did not have a tag.
493 * See if pvid is set on this port. That tells us which
494 * vlan untagged or priority-tagged traffic belongs to.
499 /* PVID is set on this port. Any untagged or priority-tagged
500 * ingress frame is considered to belong to this vlan.
504 /* Untagged Frame. */
505 __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
507 /* Priority-tagged Frame.
508 * At this point, we know that skb->vlan_tci VID
510 * We update only VID field and preserve PCP field.
512 skb->vlan_tci |= pvid;
514 /* if stats are disabled we can avoid the lookup */
515 if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
518 v = br_vlan_find(vg, *vid);
519 if (!v || !br_vlan_should_use(v))
522 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
523 stats = this_cpu_ptr(v->stats);
524 u64_stats_update_begin(&stats->syncp);
525 stats->rx_bytes += skb->len;
527 u64_stats_update_end(&stats->syncp);
537 bool br_allowed_ingress(const struct net_bridge *br,
538 struct net_bridge_vlan_group *vg, struct sk_buff *skb,
541 /* If VLAN filtering is disabled on the bridge, all packets are
544 if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
545 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
549 return __allowed_ingress(br, vg, skb, vid);
552 /* Called under RCU. */
553 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
554 const struct sk_buff *skb)
556 const struct net_bridge_vlan *v;
559 /* If this packet was not filtered at input, let it pass */
560 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
563 br_vlan_get_tag(skb, &vid);
564 v = br_vlan_find(vg, vid);
565 if (v && br_vlan_should_use(v))
571 /* Called under RCU */
572 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
574 struct net_bridge_vlan_group *vg;
575 struct net_bridge *br = p->br;
577 /* If filtering was disabled at input, let it pass. */
578 if (!br_opt_get(br, BROPT_VLAN_ENABLED))
581 vg = nbp_vlan_group_rcu(p);
582 if (!vg || !vg->num_vlans)
585 if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
589 *vid = br_get_pvid(vg);
596 if (br_vlan_find(vg, *vid))
602 static int br_vlan_add_existing(struct net_bridge *br,
603 struct net_bridge_vlan_group *vg,
604 struct net_bridge_vlan *vlan,
605 u16 flags, bool *changed,
606 struct netlink_ext_ack *extack)
610 err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
611 if (err && err != -EOPNOTSUPP)
614 if (!br_vlan_is_brentry(vlan)) {
615 /* Trying to change flags of non-existent bridge vlan */
616 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
620 /* It was only kept for port vlans, now make it real */
621 err = br_fdb_insert(br, NULL, br->dev->dev_addr,
624 br_err(br, "failed to insert local address into bridge forwarding table\n");
628 refcount_inc(&vlan->refcnt);
629 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
634 if (__vlan_add_flags(vlan, flags))
641 br_switchdev_port_vlan_del(br->dev, vlan->vid);
645 /* Must be protected by RTNL.
646 * Must be called with vid in range from 1 to 4094 inclusive.
647 * changed must be true only if the vlan was created or updated
649 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
650 struct netlink_ext_ack *extack)
652 struct net_bridge_vlan_group *vg;
653 struct net_bridge_vlan *vlan;
659 vg = br_vlan_group(br);
660 vlan = br_vlan_find(vg, vid);
662 return br_vlan_add_existing(br, vg, vlan, flags, changed,
665 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
669 vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
675 vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
676 vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
678 if (flags & BRIDGE_VLAN_INFO_BRENTRY)
679 refcount_set(&vlan->refcnt, 1);
680 ret = __vlan_add(vlan, flags, extack);
682 free_percpu(vlan->stats);
691 /* Must be protected by RTNL.
692 * Must be called with vid in range from 1 to 4094 inclusive.
694 int br_vlan_delete(struct net_bridge *br, u16 vid)
696 struct net_bridge_vlan_group *vg;
697 struct net_bridge_vlan *v;
701 vg = br_vlan_group(br);
702 v = br_vlan_find(vg, vid);
703 if (!v || !br_vlan_is_brentry(v))
706 br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
707 br_fdb_delete_by_port(br, NULL, vid, 0);
709 vlan_tunnel_info_del(vg, v);
711 return __vlan_del(v);
714 void br_vlan_flush(struct net_bridge *br)
716 struct net_bridge_vlan_group *vg;
720 vg = br_vlan_group(br);
722 RCU_INIT_POINTER(br->vlgrp, NULL);
724 __vlan_group_free(vg);
727 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
732 return br_vlan_lookup(&vg->vlan_hash, vid);
735 /* Must be protected by RTNL. */
736 static void recalculate_group_addr(struct net_bridge *br)
738 if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
741 spin_lock_bh(&br->lock);
742 if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
743 br->vlan_proto == htons(ETH_P_8021Q)) {
744 /* Bridge Group Address */
745 br->group_addr[5] = 0x00;
746 } else { /* vlan_enabled && ETH_P_8021AD */
747 /* Provider Bridge Group Address */
748 br->group_addr[5] = 0x08;
750 spin_unlock_bh(&br->lock);
753 /* Must be protected by RTNL. */
754 void br_recalculate_fwd_mask(struct net_bridge *br)
756 if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
757 br->vlan_proto == htons(ETH_P_8021Q))
758 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
759 else /* vlan_enabled && ETH_P_8021AD */
760 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
761 ~(1u << br->group_addr[5]);
764 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
766 struct switchdev_attr attr = {
768 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
769 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
770 .u.vlan_filtering = val,
774 if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
777 err = switchdev_port_attr_set(br->dev, &attr);
778 if (err && err != -EOPNOTSUPP)
781 br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
782 br_manage_promisc(br);
783 recalculate_group_addr(br);
784 br_recalculate_fwd_mask(br);
789 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
791 return __br_vlan_filter_toggle(br, val);
794 bool br_vlan_enabled(const struct net_device *dev)
796 struct net_bridge *br = netdev_priv(dev);
798 return br_opt_get(br, BROPT_VLAN_ENABLED);
800 EXPORT_SYMBOL_GPL(br_vlan_enabled);
802 int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
804 struct net_bridge *br = netdev_priv(dev);
806 *p_proto = ntohs(br->vlan_proto);
810 EXPORT_SYMBOL_GPL(br_vlan_get_proto);
812 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
815 struct net_bridge_port *p;
816 struct net_bridge_vlan *vlan;
817 struct net_bridge_vlan_group *vg;
820 if (br->vlan_proto == proto)
823 /* Add VLANs for the new proto to the device filter. */
824 list_for_each_entry(p, &br->port_list, list) {
825 vg = nbp_vlan_group(p);
826 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
827 err = vlan_vid_add(p->dev, proto, vlan->vid);
833 oldproto = br->vlan_proto;
834 br->vlan_proto = proto;
836 recalculate_group_addr(br);
837 br_recalculate_fwd_mask(br);
839 /* Delete VLANs for the old proto from the device filter. */
840 list_for_each_entry(p, &br->port_list, list) {
841 vg = nbp_vlan_group(p);
842 list_for_each_entry(vlan, &vg->vlan_list, vlist)
843 vlan_vid_del(p->dev, oldproto, vlan->vid);
849 list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
850 vlan_vid_del(p->dev, proto, vlan->vid);
852 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
853 vg = nbp_vlan_group(p);
854 list_for_each_entry(vlan, &vg->vlan_list, vlist)
855 vlan_vid_del(p->dev, proto, vlan->vid);
861 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
863 if (val != ETH_P_8021Q && val != ETH_P_8021AD)
864 return -EPROTONOSUPPORT;
866 return __br_vlan_set_proto(br, htons(val));
869 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
874 br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
883 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
885 struct net_bridge_port *p;
887 /* allow to change the option if there are no port vlans configured */
888 list_for_each_entry(p, &br->port_list, list) {
889 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
898 br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
907 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
909 struct net_bridge_vlan *v;
914 v = br_vlan_lookup(&vg->vlan_hash, vid);
915 if (v && br_vlan_should_use(v) &&
916 (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
922 static void br_vlan_disable_default_pvid(struct net_bridge *br)
924 struct net_bridge_port *p;
925 u16 pvid = br->default_pvid;
927 /* Disable default_pvid on all ports where it is still
930 if (vlan_default_pvid(br_vlan_group(br), pvid))
931 br_vlan_delete(br, pvid);
933 list_for_each_entry(p, &br->port_list, list) {
934 if (vlan_default_pvid(nbp_vlan_group(p), pvid))
935 nbp_vlan_delete(p, pvid);
938 br->default_pvid = 0;
941 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
942 struct netlink_ext_ack *extack)
944 const struct net_bridge_vlan *pvent;
945 struct net_bridge_vlan_group *vg;
946 struct net_bridge_port *p;
947 unsigned long *changed;
953 br_vlan_disable_default_pvid(br);
957 changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
961 old_pvid = br->default_pvid;
963 /* Update default_pvid config only if we do not conflict with
964 * user configuration.
966 vg = br_vlan_group(br);
967 pvent = br_vlan_find(vg, pvid);
968 if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
969 (!pvent || !br_vlan_should_use(pvent))) {
970 err = br_vlan_add(br, pvid,
971 BRIDGE_VLAN_INFO_PVID |
972 BRIDGE_VLAN_INFO_UNTAGGED |
973 BRIDGE_VLAN_INFO_BRENTRY,
977 br_vlan_delete(br, old_pvid);
981 list_for_each_entry(p, &br->port_list, list) {
982 /* Update default_pvid config only if we do not conflict with
983 * user configuration.
985 vg = nbp_vlan_group(p);
987 !vlan_default_pvid(vg, old_pvid)) ||
988 br_vlan_find(vg, pvid))
991 err = nbp_vlan_add(p, pvid,
992 BRIDGE_VLAN_INFO_PVID |
993 BRIDGE_VLAN_INFO_UNTAGGED,
997 nbp_vlan_delete(p, old_pvid);
998 set_bit(p->port_no, changed);
1001 br->default_pvid = pvid;
1004 bitmap_free(changed);
1008 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1009 if (!test_bit(p->port_no, changed))
1013 nbp_vlan_add(p, old_pvid,
1014 BRIDGE_VLAN_INFO_PVID |
1015 BRIDGE_VLAN_INFO_UNTAGGED,
1017 nbp_vlan_delete(p, pvid);
1020 if (test_bit(0, changed)) {
1022 br_vlan_add(br, old_pvid,
1023 BRIDGE_VLAN_INFO_PVID |
1024 BRIDGE_VLAN_INFO_UNTAGGED |
1025 BRIDGE_VLAN_INFO_BRENTRY,
1027 br_vlan_delete(br, pvid);
1032 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1037 if (val >= VLAN_VID_MASK)
1040 if (pvid == br->default_pvid)
1043 /* Only allow default pvid change when filtering is disabled */
1044 if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1045 pr_info_once("Please disable vlan filtering to change default_pvid\n");
1049 err = __br_vlan_set_default_pvid(br, pvid, NULL);
1054 int br_vlan_init(struct net_bridge *br)
1056 struct net_bridge_vlan_group *vg;
1059 vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1062 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1065 ret = vlan_tunnel_init(vg);
1067 goto err_tunnel_init;
1068 INIT_LIST_HEAD(&vg->vlan_list);
1069 br->vlan_proto = htons(ETH_P_8021Q);
1070 br->default_pvid = 1;
1071 rcu_assign_pointer(br->vlgrp, vg);
1077 rhashtable_destroy(&vg->vlan_hash);
1084 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1086 struct switchdev_attr attr = {
1087 .orig_dev = p->br->dev,
1088 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1089 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1090 .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1092 struct net_bridge_vlan_group *vg;
1095 vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1099 ret = switchdev_port_attr_set(p->dev, &attr);
1100 if (ret && ret != -EOPNOTSUPP)
1101 goto err_vlan_enabled;
1103 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1106 ret = vlan_tunnel_init(vg);
1108 goto err_tunnel_init;
1109 INIT_LIST_HEAD(&vg->vlan_list);
1110 rcu_assign_pointer(p->vlgrp, vg);
1111 if (p->br->default_pvid) {
1114 ret = nbp_vlan_add(p, p->br->default_pvid,
1115 BRIDGE_VLAN_INFO_PVID |
1116 BRIDGE_VLAN_INFO_UNTAGGED,
1125 RCU_INIT_POINTER(p->vlgrp, NULL);
1127 vlan_tunnel_deinit(vg);
1129 rhashtable_destroy(&vg->vlan_hash);
1137 /* Must be protected by RTNL.
1138 * Must be called with vid in range from 1 to 4094 inclusive.
1139 * changed must be true only if the vlan was created or updated
1141 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1142 bool *changed, struct netlink_ext_ack *extack)
1144 struct net_bridge_vlan *vlan;
1150 vlan = br_vlan_find(nbp_vlan_group(port), vid);
1152 /* Pass the flags to the hardware bridge */
1153 ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1154 if (ret && ret != -EOPNOTSUPP)
1156 *changed = __vlan_add_flags(vlan, flags);
1161 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1167 ret = __vlan_add(vlan, flags, extack);
1176 /* Must be protected by RTNL.
1177 * Must be called with vid in range from 1 to 4094 inclusive.
1179 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1181 struct net_bridge_vlan *v;
1185 v = br_vlan_find(nbp_vlan_group(port), vid);
1188 br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1189 br_fdb_delete_by_port(port->br, port, vid, 0);
1191 return __vlan_del(v);
1194 void nbp_vlan_flush(struct net_bridge_port *port)
1196 struct net_bridge_vlan_group *vg;
1200 vg = nbp_vlan_group(port);
1202 RCU_INIT_POINTER(port->vlgrp, NULL);
1204 __vlan_group_free(vg);
1207 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1208 struct br_vlan_stats *stats)
1212 memset(stats, 0, sizeof(*stats));
1213 for_each_possible_cpu(i) {
1214 u64 rxpackets, rxbytes, txpackets, txbytes;
1215 struct br_vlan_stats *cpu_stats;
1218 cpu_stats = per_cpu_ptr(v->stats, i);
1220 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1221 rxpackets = cpu_stats->rx_packets;
1222 rxbytes = cpu_stats->rx_bytes;
1223 txbytes = cpu_stats->tx_bytes;
1224 txpackets = cpu_stats->tx_packets;
1225 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1227 stats->rx_packets += rxpackets;
1228 stats->rx_bytes += rxbytes;
1229 stats->tx_bytes += txbytes;
1230 stats->tx_packets += txpackets;
1234 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1236 struct net_bridge_vlan_group *vg;
1237 struct net_bridge_port *p;
1240 p = br_port_get_check_rtnl(dev);
1242 vg = nbp_vlan_group(p);
1243 else if (netif_is_bridge_master(dev))
1244 vg = br_vlan_group(netdev_priv(dev));
1248 *p_pvid = br_get_pvid(vg);
1251 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1253 int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1255 struct net_bridge_vlan_group *vg;
1256 struct net_bridge_port *p;
1258 p = br_port_get_check_rcu(dev);
1260 vg = nbp_vlan_group_rcu(p);
1261 else if (netif_is_bridge_master(dev))
1262 vg = br_vlan_group_rcu(netdev_priv(dev));
1266 *p_pvid = br_get_pvid(vg);
1269 EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1271 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1272 struct bridge_vlan_info *p_vinfo)
1274 struct net_bridge_vlan_group *vg;
1275 struct net_bridge_vlan *v;
1276 struct net_bridge_port *p;
1279 p = br_port_get_check_rtnl(dev);
1281 vg = nbp_vlan_group(p);
1282 else if (netif_is_bridge_master(dev))
1283 vg = br_vlan_group(netdev_priv(dev));
1287 v = br_vlan_find(vg, vid);
1292 p_vinfo->flags = v->flags;
1293 if (vid == br_get_pvid(vg))
1294 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1297 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1299 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1301 return is_vlan_dev(dev) &&
1302 !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1305 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1306 __always_unused void *data)
1308 return br_vlan_is_bind_vlan_dev(dev);
1311 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1316 found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1323 struct br_vlan_bind_walk_data {
1325 struct net_device *result;
1328 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1331 struct br_vlan_bind_walk_data *data = data_in;
1334 if (br_vlan_is_bind_vlan_dev(dev) &&
1335 vlan_dev_priv(dev)->vlan_id == data->vid) {
1343 static struct net_device *
1344 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1346 struct br_vlan_bind_walk_data data = {
1351 netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1358 static bool br_vlan_is_dev_up(const struct net_device *dev)
1360 return !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1363 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1364 struct net_device *vlan_dev)
1366 u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1367 struct net_bridge_vlan_group *vg;
1368 struct net_bridge_port *p;
1369 bool has_carrier = false;
1371 if (!netif_carrier_ok(br->dev)) {
1372 netif_carrier_off(vlan_dev);
1376 list_for_each_entry(p, &br->port_list, list) {
1377 vg = nbp_vlan_group(p);
1378 if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1385 netif_carrier_on(vlan_dev);
1387 netif_carrier_off(vlan_dev);
1390 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1392 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1393 struct net_bridge_vlan *vlan;
1394 struct net_device *vlan_dev;
1396 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1397 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1400 if (br_vlan_is_dev_up(p->dev)) {
1401 if (netif_carrier_ok(p->br->dev))
1402 netif_carrier_on(vlan_dev);
1404 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1410 static void br_vlan_upper_change(struct net_device *dev,
1411 struct net_device *upper_dev,
1414 struct net_bridge *br = netdev_priv(dev);
1416 if (!br_vlan_is_bind_vlan_dev(upper_dev))
1420 br_vlan_set_vlan_dev_state(br, upper_dev);
1421 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1423 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1424 br_vlan_has_upper_bind_vlan_dev(dev));
1428 struct br_vlan_link_state_walk_data {
1429 struct net_bridge *br;
1432 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1435 struct br_vlan_link_state_walk_data *data = data_in;
1437 if (br_vlan_is_bind_vlan_dev(vlan_dev))
1438 br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1443 static void br_vlan_link_state_change(struct net_device *dev,
1444 struct net_bridge *br)
1446 struct br_vlan_link_state_walk_data data = {
1451 netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1456 /* Must be protected by RTNL. */
1457 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1459 struct net_device *vlan_dev;
1461 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1464 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1466 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1469 /* Must be protected by RTNL. */
1470 int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1472 struct netdev_notifier_changeupper_info *info;
1473 struct net_bridge *br = netdev_priv(dev);
1478 case NETDEV_REGISTER:
1479 ret = br_vlan_add(br, br->default_pvid,
1480 BRIDGE_VLAN_INFO_PVID |
1481 BRIDGE_VLAN_INFO_UNTAGGED |
1482 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1484 case NETDEV_UNREGISTER:
1485 br_vlan_delete(br, br->default_pvid);
1487 case NETDEV_CHANGEUPPER:
1489 br_vlan_upper_change(dev, info->upper_dev, info->linking);
1494 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1496 br_vlan_link_state_change(dev, br);
1503 /* Must be protected by RTNL. */
1504 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1506 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1513 br_vlan_set_all_vlan_dev_state(p);