GNU Linux-libre 5.4.257-gnu1
[releases.git] / net / bridge / br_vlan.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/netdevice.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/slab.h>
6 #include <net/switchdev.h>
7
8 #include "br_private.h"
9 #include "br_private_tunnel.h"
10
11 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
12
13 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
14                               const void *ptr)
15 {
16         const struct net_bridge_vlan *vle = ptr;
17         u16 vid = *(u16 *)arg->key;
18
19         return vle->vid != vid;
20 }
21
22 static const struct rhashtable_params br_vlan_rht_params = {
23         .head_offset = offsetof(struct net_bridge_vlan, vnode),
24         .key_offset = offsetof(struct net_bridge_vlan, vid),
25         .key_len = sizeof(u16),
26         .nelem_hint = 3,
27         .max_size = VLAN_N_VID,
28         .obj_cmpfn = br_vlan_cmp,
29         .automatic_shrinking = true,
30 };
31
32 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
33 {
34         return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
35 }
36
37 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
38 {
39         if (vg->pvid == vid)
40                 return false;
41
42         smp_wmb();
43         vg->pvid = vid;
44
45         return true;
46 }
47
48 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
49 {
50         if (vg->pvid != vid)
51                 return false;
52
53         smp_wmb();
54         vg->pvid = 0;
55
56         return true;
57 }
58
59 /* return true if anything changed, false otherwise */
60 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
61 {
62         struct net_bridge_vlan_group *vg;
63         u16 old_flags = v->flags;
64         bool ret;
65
66         if (br_vlan_is_master(v))
67                 vg = br_vlan_group(v->br);
68         else
69                 vg = nbp_vlan_group(v->port);
70
71         if (flags & BRIDGE_VLAN_INFO_PVID)
72                 ret = __vlan_add_pvid(vg, v->vid);
73         else
74                 ret = __vlan_delete_pvid(vg, v->vid);
75
76         if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
77                 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
78         else
79                 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
80
81         return ret || !!(old_flags ^ v->flags);
82 }
83
84 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
85                           struct net_bridge_vlan *v, u16 flags,
86                           struct netlink_ext_ack *extack)
87 {
88         int err;
89
90         /* Try switchdev op first. In case it is not supported, fallback to
91          * 8021q add.
92          */
93         err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
94         if (err == -EOPNOTSUPP)
95                 return vlan_vid_add(dev, br->vlan_proto, v->vid);
96         v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
97         return err;
98 }
99
100 static void __vlan_add_list(struct net_bridge_vlan *v)
101 {
102         struct net_bridge_vlan_group *vg;
103         struct list_head *headp, *hpos;
104         struct net_bridge_vlan *vent;
105
106         if (br_vlan_is_master(v))
107                 vg = br_vlan_group(v->br);
108         else
109                 vg = nbp_vlan_group(v->port);
110
111         headp = &vg->vlan_list;
112         list_for_each_prev(hpos, headp) {
113                 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
114                 if (v->vid < vent->vid)
115                         continue;
116                 else
117                         break;
118         }
119         list_add_rcu(&v->vlist, hpos);
120 }
121
122 static void __vlan_del_list(struct net_bridge_vlan *v)
123 {
124         list_del_rcu(&v->vlist);
125 }
126
127 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
128                           const struct net_bridge_vlan *v)
129 {
130         int err;
131
132         /* Try switchdev op first. In case it is not supported, fallback to
133          * 8021q del.
134          */
135         err = br_switchdev_port_vlan_del(dev, v->vid);
136         if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
137                 vlan_vid_del(dev, br->vlan_proto, v->vid);
138         return err == -EOPNOTSUPP ? 0 : err;
139 }
140
141 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
142  * a reference is taken to the master vlan before returning.
143  */
144 static struct net_bridge_vlan *
145 br_vlan_get_master(struct net_bridge *br, u16 vid,
146                    struct netlink_ext_ack *extack)
147 {
148         struct net_bridge_vlan_group *vg;
149         struct net_bridge_vlan *masterv;
150
151         vg = br_vlan_group(br);
152         masterv = br_vlan_find(vg, vid);
153         if (!masterv) {
154                 bool changed;
155
156                 /* missing global ctx, create it now */
157                 if (br_vlan_add(br, vid, 0, &changed, extack))
158                         return NULL;
159                 masterv = br_vlan_find(vg, vid);
160                 if (WARN_ON(!masterv))
161                         return NULL;
162                 refcount_set(&masterv->refcnt, 1);
163                 return masterv;
164         }
165         refcount_inc(&masterv->refcnt);
166
167         return masterv;
168 }
169
170 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
171 {
172         struct net_bridge_vlan *v;
173
174         v = container_of(rcu, struct net_bridge_vlan, rcu);
175         WARN_ON(!br_vlan_is_master(v));
176         free_percpu(v->stats);
177         v->stats = NULL;
178         kfree(v);
179 }
180
181 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
182 {
183         struct net_bridge_vlan_group *vg;
184
185         if (!br_vlan_is_master(masterv))
186                 return;
187
188         vg = br_vlan_group(masterv->br);
189         if (refcount_dec_and_test(&masterv->refcnt)) {
190                 rhashtable_remove_fast(&vg->vlan_hash,
191                                        &masterv->vnode, br_vlan_rht_params);
192                 __vlan_del_list(masterv);
193                 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
194         }
195 }
196
197 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
198 {
199         struct net_bridge_vlan *v;
200
201         v = container_of(rcu, struct net_bridge_vlan, rcu);
202         WARN_ON(br_vlan_is_master(v));
203         /* if we had per-port stats configured then free them here */
204         if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
205                 free_percpu(v->stats);
206         v->stats = NULL;
207         kfree(v);
208 }
209
210 /* This is the shared VLAN add function which works for both ports and bridge
211  * devices. There are four possible calls to this function in terms of the
212  * vlan entry type:
213  * 1. vlan is being added on a port (no master flags, global entry exists)
214  * 2. vlan is being added on a bridge (both master and brentry flags)
215  * 3. vlan is being added on a port, but a global entry didn't exist which
216  *    is being created right now (master flag set, brentry flag unset), the
217  *    global entry is used for global per-vlan features, but not for filtering
218  * 4. same as 3 but with both master and brentry flags set so the entry
219  *    will be used for filtering in both the port and the bridge
220  */
221 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
222                       struct netlink_ext_ack *extack)
223 {
224         struct net_bridge_vlan *masterv = NULL;
225         struct net_bridge_port *p = NULL;
226         struct net_bridge_vlan_group *vg;
227         struct net_device *dev;
228         struct net_bridge *br;
229         int err;
230
231         if (br_vlan_is_master(v)) {
232                 br = v->br;
233                 dev = br->dev;
234                 vg = br_vlan_group(br);
235         } else {
236                 p = v->port;
237                 br = p->br;
238                 dev = p->dev;
239                 vg = nbp_vlan_group(p);
240         }
241
242         if (p) {
243                 /* Add VLAN to the device filter if it is supported.
244                  * This ensures tagged traffic enters the bridge when
245                  * promiscuous mode is disabled by br_manage_promisc().
246                  */
247                 err = __vlan_vid_add(dev, br, v, flags, extack);
248                 if (err)
249                         goto out;
250
251                 /* need to work on the master vlan too */
252                 if (flags & BRIDGE_VLAN_INFO_MASTER) {
253                         bool changed;
254
255                         err = br_vlan_add(br, v->vid,
256                                           flags | BRIDGE_VLAN_INFO_BRENTRY,
257                                           &changed, extack);
258                         if (err)
259                                 goto out_filt;
260                 }
261
262                 masterv = br_vlan_get_master(br, v->vid, extack);
263                 if (!masterv) {
264                         err = -ENOMEM;
265                         goto out_filt;
266                 }
267                 v->brvlan = masterv;
268                 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
269                         v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
270                         if (!v->stats) {
271                                 err = -ENOMEM;
272                                 goto out_filt;
273                         }
274                         v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
275                 } else {
276                         v->stats = masterv->stats;
277                 }
278         } else {
279                 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
280                 if (err && err != -EOPNOTSUPP)
281                         goto out;
282         }
283
284         /* Add the dev mac and count the vlan only if it's usable */
285         if (br_vlan_should_use(v)) {
286                 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
287                 if (err) {
288                         br_err(br, "failed insert local address into bridge forwarding table\n");
289                         goto out_filt;
290                 }
291                 vg->num_vlans++;
292         }
293
294         err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
295                                             br_vlan_rht_params);
296         if (err)
297                 goto out_fdb_insert;
298
299         __vlan_add_list(v);
300         __vlan_add_flags(v, flags);
301
302         if (p)
303                 nbp_vlan_set_vlan_dev_state(p, v->vid);
304 out:
305         return err;
306
307 out_fdb_insert:
308         if (br_vlan_should_use(v)) {
309                 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
310                 vg->num_vlans--;
311         }
312
313 out_filt:
314         if (p) {
315                 __vlan_vid_del(dev, br, v);
316                 if (masterv) {
317                         if (v->stats && masterv->stats != v->stats)
318                                 free_percpu(v->stats);
319                         v->stats = NULL;
320
321                         br_vlan_put_master(masterv);
322                         v->brvlan = NULL;
323                 }
324         } else {
325                 br_switchdev_port_vlan_del(dev, v->vid);
326         }
327
328         goto out;
329 }
330
331 static int __vlan_del(struct net_bridge_vlan *v)
332 {
333         struct net_bridge_vlan *masterv = v;
334         struct net_bridge_vlan_group *vg;
335         struct net_bridge_port *p = NULL;
336         int err = 0;
337
338         if (br_vlan_is_master(v)) {
339                 vg = br_vlan_group(v->br);
340         } else {
341                 p = v->port;
342                 vg = nbp_vlan_group(v->port);
343                 masterv = v->brvlan;
344         }
345
346         __vlan_delete_pvid(vg, v->vid);
347         if (p) {
348                 err = __vlan_vid_del(p->dev, p->br, v);
349                 if (err)
350                         goto out;
351         } else {
352                 err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
353                 if (err && err != -EOPNOTSUPP)
354                         goto out;
355                 err = 0;
356         }
357
358         if (br_vlan_should_use(v)) {
359                 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
360                 vg->num_vlans--;
361         }
362
363         if (masterv != v) {
364                 vlan_tunnel_info_del(vg, v);
365                 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
366                                        br_vlan_rht_params);
367                 __vlan_del_list(v);
368                 nbp_vlan_set_vlan_dev_state(p, v->vid);
369                 call_rcu(&v->rcu, nbp_vlan_rcu_free);
370         }
371
372         br_vlan_put_master(masterv);
373 out:
374         return err;
375 }
376
377 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
378 {
379         WARN_ON(!list_empty(&vg->vlan_list));
380         rhashtable_destroy(&vg->vlan_hash);
381         vlan_tunnel_deinit(vg);
382         kfree(vg);
383 }
384
385 static void __vlan_flush(struct net_bridge_vlan_group *vg)
386 {
387         struct net_bridge_vlan *vlan, *tmp;
388
389         __vlan_delete_pvid(vg, vg->pvid);
390         list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
391                 __vlan_del(vlan);
392 }
393
394 struct sk_buff *br_handle_vlan(struct net_bridge *br,
395                                const struct net_bridge_port *p,
396                                struct net_bridge_vlan_group *vg,
397                                struct sk_buff *skb)
398 {
399         struct br_vlan_stats *stats;
400         struct net_bridge_vlan *v;
401         u16 vid;
402
403         /* If this packet was not filtered at input, let it pass */
404         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
405                 goto out;
406
407         /* At this point, we know that the frame was filtered and contains
408          * a valid vlan id.  If the vlan id has untagged flag set,
409          * send untagged; otherwise, send tagged.
410          */
411         br_vlan_get_tag(skb, &vid);
412         v = br_vlan_find(vg, vid);
413         /* Vlan entry must be configured at this point.  The
414          * only exception is the bridge is set in promisc mode and the
415          * packet is destined for the bridge device.  In this case
416          * pass the packet as is.
417          */
418         if (!v || !br_vlan_should_use(v)) {
419                 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
420                         goto out;
421                 } else {
422                         kfree_skb(skb);
423                         return NULL;
424                 }
425         }
426         if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
427                 stats = this_cpu_ptr(v->stats);
428                 u64_stats_update_begin(&stats->syncp);
429                 stats->tx_bytes += skb->len;
430                 stats->tx_packets++;
431                 u64_stats_update_end(&stats->syncp);
432         }
433
434         if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
435                 __vlan_hwaccel_clear_tag(skb);
436
437         if (p && (p->flags & BR_VLAN_TUNNEL) &&
438             br_handle_egress_vlan_tunnel(skb, v)) {
439                 kfree_skb(skb);
440                 return NULL;
441         }
442 out:
443         return skb;
444 }
445
446 /* Called under RCU */
447 static bool __allowed_ingress(const struct net_bridge *br,
448                               struct net_bridge_vlan_group *vg,
449                               struct sk_buff *skb, u16 *vid)
450 {
451         struct br_vlan_stats *stats;
452         struct net_bridge_vlan *v;
453         bool tagged;
454
455         BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
456         /* If vlan tx offload is disabled on bridge device and frame was
457          * sent from vlan device on the bridge device, it does not have
458          * HW accelerated vlan tag.
459          */
460         if (unlikely(!skb_vlan_tag_present(skb) &&
461                      skb->protocol == br->vlan_proto)) {
462                 skb = skb_vlan_untag(skb);
463                 if (unlikely(!skb))
464                         return false;
465         }
466
467         if (!br_vlan_get_tag(skb, vid)) {
468                 /* Tagged frame */
469                 if (skb->vlan_proto != br->vlan_proto) {
470                         /* Protocol-mismatch, empty out vlan_tci for new tag */
471                         skb_push(skb, ETH_HLEN);
472                         skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
473                                                         skb_vlan_tag_get(skb));
474                         if (unlikely(!skb))
475                                 return false;
476
477                         skb_pull(skb, ETH_HLEN);
478                         skb_reset_mac_len(skb);
479                         *vid = 0;
480                         tagged = false;
481                 } else {
482                         tagged = true;
483                 }
484         } else {
485                 /* Untagged frame */
486                 tagged = false;
487         }
488
489         if (!*vid) {
490                 u16 pvid = br_get_pvid(vg);
491
492                 /* Frame had a tag with VID 0 or did not have a tag.
493                  * See if pvid is set on this port.  That tells us which
494                  * vlan untagged or priority-tagged traffic belongs to.
495                  */
496                 if (!pvid)
497                         goto drop;
498
499                 /* PVID is set on this port.  Any untagged or priority-tagged
500                  * ingress frame is considered to belong to this vlan.
501                  */
502                 *vid = pvid;
503                 if (likely(!tagged))
504                         /* Untagged Frame. */
505                         __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
506                 else
507                         /* Priority-tagged Frame.
508                          * At this point, we know that skb->vlan_tci VID
509                          * field was 0.
510                          * We update only VID field and preserve PCP field.
511                          */
512                         skb->vlan_tci |= pvid;
513
514                 /* if stats are disabled we can avoid the lookup */
515                 if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
516                         return true;
517         }
518         v = br_vlan_find(vg, *vid);
519         if (!v || !br_vlan_should_use(v))
520                 goto drop;
521
522         if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
523                 stats = this_cpu_ptr(v->stats);
524                 u64_stats_update_begin(&stats->syncp);
525                 stats->rx_bytes += skb->len;
526                 stats->rx_packets++;
527                 u64_stats_update_end(&stats->syncp);
528         }
529
530         return true;
531
532 drop:
533         kfree_skb(skb);
534         return false;
535 }
536
537 bool br_allowed_ingress(const struct net_bridge *br,
538                         struct net_bridge_vlan_group *vg, struct sk_buff *skb,
539                         u16 *vid)
540 {
541         /* If VLAN filtering is disabled on the bridge, all packets are
542          * permitted.
543          */
544         if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
545                 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
546                 return true;
547         }
548
549         return __allowed_ingress(br, vg, skb, vid);
550 }
551
552 /* Called under RCU. */
553 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
554                        const struct sk_buff *skb)
555 {
556         const struct net_bridge_vlan *v;
557         u16 vid;
558
559         /* If this packet was not filtered at input, let it pass */
560         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
561                 return true;
562
563         br_vlan_get_tag(skb, &vid);
564         v = br_vlan_find(vg, vid);
565         if (v && br_vlan_should_use(v))
566                 return true;
567
568         return false;
569 }
570
571 /* Called under RCU */
572 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
573 {
574         struct net_bridge_vlan_group *vg;
575         struct net_bridge *br = p->br;
576
577         /* If filtering was disabled at input, let it pass. */
578         if (!br_opt_get(br, BROPT_VLAN_ENABLED))
579                 return true;
580
581         vg = nbp_vlan_group_rcu(p);
582         if (!vg || !vg->num_vlans)
583                 return false;
584
585         if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
586                 *vid = 0;
587
588         if (!*vid) {
589                 *vid = br_get_pvid(vg);
590                 if (!*vid)
591                         return false;
592
593                 return true;
594         }
595
596         if (br_vlan_find(vg, *vid))
597                 return true;
598
599         return false;
600 }
601
602 static int br_vlan_add_existing(struct net_bridge *br,
603                                 struct net_bridge_vlan_group *vg,
604                                 struct net_bridge_vlan *vlan,
605                                 u16 flags, bool *changed,
606                                 struct netlink_ext_ack *extack)
607 {
608         int err;
609
610         err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
611         if (err && err != -EOPNOTSUPP)
612                 return err;
613
614         if (!br_vlan_is_brentry(vlan)) {
615                 /* Trying to change flags of non-existent bridge vlan */
616                 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
617                         err = -EINVAL;
618                         goto err_flags;
619                 }
620                 /* It was only kept for port vlans, now make it real */
621                 err = br_fdb_insert(br, NULL, br->dev->dev_addr,
622                                     vlan->vid);
623                 if (err) {
624                         br_err(br, "failed to insert local address into bridge forwarding table\n");
625                         goto err_fdb_insert;
626                 }
627
628                 refcount_inc(&vlan->refcnt);
629                 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
630                 vg->num_vlans++;
631                 *changed = true;
632         }
633
634         if (__vlan_add_flags(vlan, flags))
635                 *changed = true;
636
637         return 0;
638
639 err_fdb_insert:
640 err_flags:
641         br_switchdev_port_vlan_del(br->dev, vlan->vid);
642         return err;
643 }
644
645 /* Must be protected by RTNL.
646  * Must be called with vid in range from 1 to 4094 inclusive.
647  * changed must be true only if the vlan was created or updated
648  */
649 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
650                 struct netlink_ext_ack *extack)
651 {
652         struct net_bridge_vlan_group *vg;
653         struct net_bridge_vlan *vlan;
654         int ret;
655
656         ASSERT_RTNL();
657
658         *changed = false;
659         vg = br_vlan_group(br);
660         vlan = br_vlan_find(vg, vid);
661         if (vlan)
662                 return br_vlan_add_existing(br, vg, vlan, flags, changed,
663                                             extack);
664
665         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
666         if (!vlan)
667                 return -ENOMEM;
668
669         vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
670         if (!vlan->stats) {
671                 kfree(vlan);
672                 return -ENOMEM;
673         }
674         vlan->vid = vid;
675         vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
676         vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
677         vlan->br = br;
678         if (flags & BRIDGE_VLAN_INFO_BRENTRY)
679                 refcount_set(&vlan->refcnt, 1);
680         ret = __vlan_add(vlan, flags, extack);
681         if (ret) {
682                 free_percpu(vlan->stats);
683                 kfree(vlan);
684         } else {
685                 *changed = true;
686         }
687
688         return ret;
689 }
690
691 /* Must be protected by RTNL.
692  * Must be called with vid in range from 1 to 4094 inclusive.
693  */
694 int br_vlan_delete(struct net_bridge *br, u16 vid)
695 {
696         struct net_bridge_vlan_group *vg;
697         struct net_bridge_vlan *v;
698
699         ASSERT_RTNL();
700
701         vg = br_vlan_group(br);
702         v = br_vlan_find(vg, vid);
703         if (!v || !br_vlan_is_brentry(v))
704                 return -ENOENT;
705
706         br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
707         br_fdb_delete_by_port(br, NULL, vid, 0);
708
709         vlan_tunnel_info_del(vg, v);
710
711         return __vlan_del(v);
712 }
713
714 void br_vlan_flush(struct net_bridge *br)
715 {
716         struct net_bridge_vlan_group *vg;
717
718         ASSERT_RTNL();
719
720         vg = br_vlan_group(br);
721         __vlan_flush(vg);
722         RCU_INIT_POINTER(br->vlgrp, NULL);
723         synchronize_rcu();
724         __vlan_group_free(vg);
725 }
726
727 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
728 {
729         if (!vg)
730                 return NULL;
731
732         return br_vlan_lookup(&vg->vlan_hash, vid);
733 }
734
735 /* Must be protected by RTNL. */
736 static void recalculate_group_addr(struct net_bridge *br)
737 {
738         if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
739                 return;
740
741         spin_lock_bh(&br->lock);
742         if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
743             br->vlan_proto == htons(ETH_P_8021Q)) {
744                 /* Bridge Group Address */
745                 br->group_addr[5] = 0x00;
746         } else { /* vlan_enabled && ETH_P_8021AD */
747                 /* Provider Bridge Group Address */
748                 br->group_addr[5] = 0x08;
749         }
750         spin_unlock_bh(&br->lock);
751 }
752
753 /* Must be protected by RTNL. */
754 void br_recalculate_fwd_mask(struct net_bridge *br)
755 {
756         if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
757             br->vlan_proto == htons(ETH_P_8021Q))
758                 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
759         else /* vlan_enabled && ETH_P_8021AD */
760                 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
761                                               ~(1u << br->group_addr[5]);
762 }
763
764 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
765 {
766         struct switchdev_attr attr = {
767                 .orig_dev = br->dev,
768                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
769                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
770                 .u.vlan_filtering = val,
771         };
772         int err;
773
774         if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
775                 return 0;
776
777         err = switchdev_port_attr_set(br->dev, &attr);
778         if (err && err != -EOPNOTSUPP)
779                 return err;
780
781         br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
782         br_manage_promisc(br);
783         recalculate_group_addr(br);
784         br_recalculate_fwd_mask(br);
785
786         return 0;
787 }
788
789 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
790 {
791         return __br_vlan_filter_toggle(br, val);
792 }
793
794 bool br_vlan_enabled(const struct net_device *dev)
795 {
796         struct net_bridge *br = netdev_priv(dev);
797
798         return br_opt_get(br, BROPT_VLAN_ENABLED);
799 }
800 EXPORT_SYMBOL_GPL(br_vlan_enabled);
801
802 int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
803 {
804         struct net_bridge *br = netdev_priv(dev);
805
806         *p_proto = ntohs(br->vlan_proto);
807
808         return 0;
809 }
810 EXPORT_SYMBOL_GPL(br_vlan_get_proto);
811
812 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
813 {
814         int err = 0;
815         struct net_bridge_port *p;
816         struct net_bridge_vlan *vlan;
817         struct net_bridge_vlan_group *vg;
818         __be16 oldproto;
819
820         if (br->vlan_proto == proto)
821                 return 0;
822
823         /* Add VLANs for the new proto to the device filter. */
824         list_for_each_entry(p, &br->port_list, list) {
825                 vg = nbp_vlan_group(p);
826                 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
827                         err = vlan_vid_add(p->dev, proto, vlan->vid);
828                         if (err)
829                                 goto err_filt;
830                 }
831         }
832
833         oldproto = br->vlan_proto;
834         br->vlan_proto = proto;
835
836         recalculate_group_addr(br);
837         br_recalculate_fwd_mask(br);
838
839         /* Delete VLANs for the old proto from the device filter. */
840         list_for_each_entry(p, &br->port_list, list) {
841                 vg = nbp_vlan_group(p);
842                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
843                         vlan_vid_del(p->dev, oldproto, vlan->vid);
844         }
845
846         return 0;
847
848 err_filt:
849         list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
850                 vlan_vid_del(p->dev, proto, vlan->vid);
851
852         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
853                 vg = nbp_vlan_group(p);
854                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
855                         vlan_vid_del(p->dev, proto, vlan->vid);
856         }
857
858         return err;
859 }
860
861 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
862 {
863         if (val != ETH_P_8021Q && val != ETH_P_8021AD)
864                 return -EPROTONOSUPPORT;
865
866         return __br_vlan_set_proto(br, htons(val));
867 }
868
869 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
870 {
871         switch (val) {
872         case 0:
873         case 1:
874                 br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
875                 break;
876         default:
877                 return -EINVAL;
878         }
879
880         return 0;
881 }
882
883 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
884 {
885         struct net_bridge_port *p;
886
887         /* allow to change the option if there are no port vlans configured */
888         list_for_each_entry(p, &br->port_list, list) {
889                 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
890
891                 if (vg->num_vlans)
892                         return -EBUSY;
893         }
894
895         switch (val) {
896         case 0:
897         case 1:
898                 br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
899                 break;
900         default:
901                 return -EINVAL;
902         }
903
904         return 0;
905 }
906
907 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
908 {
909         struct net_bridge_vlan *v;
910
911         if (vid != vg->pvid)
912                 return false;
913
914         v = br_vlan_lookup(&vg->vlan_hash, vid);
915         if (v && br_vlan_should_use(v) &&
916             (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
917                 return true;
918
919         return false;
920 }
921
922 static void br_vlan_disable_default_pvid(struct net_bridge *br)
923 {
924         struct net_bridge_port *p;
925         u16 pvid = br->default_pvid;
926
927         /* Disable default_pvid on all ports where it is still
928          * configured.
929          */
930         if (vlan_default_pvid(br_vlan_group(br), pvid))
931                 br_vlan_delete(br, pvid);
932
933         list_for_each_entry(p, &br->port_list, list) {
934                 if (vlan_default_pvid(nbp_vlan_group(p), pvid))
935                         nbp_vlan_delete(p, pvid);
936         }
937
938         br->default_pvid = 0;
939 }
940
941 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
942                                struct netlink_ext_ack *extack)
943 {
944         const struct net_bridge_vlan *pvent;
945         struct net_bridge_vlan_group *vg;
946         struct net_bridge_port *p;
947         unsigned long *changed;
948         bool vlchange;
949         u16 old_pvid;
950         int err = 0;
951
952         if (!pvid) {
953                 br_vlan_disable_default_pvid(br);
954                 return 0;
955         }
956
957         changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
958         if (!changed)
959                 return -ENOMEM;
960
961         old_pvid = br->default_pvid;
962
963         /* Update default_pvid config only if we do not conflict with
964          * user configuration.
965          */
966         vg = br_vlan_group(br);
967         pvent = br_vlan_find(vg, pvid);
968         if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
969             (!pvent || !br_vlan_should_use(pvent))) {
970                 err = br_vlan_add(br, pvid,
971                                   BRIDGE_VLAN_INFO_PVID |
972                                   BRIDGE_VLAN_INFO_UNTAGGED |
973                                   BRIDGE_VLAN_INFO_BRENTRY,
974                                   &vlchange, extack);
975                 if (err)
976                         goto out;
977                 br_vlan_delete(br, old_pvid);
978                 set_bit(0, changed);
979         }
980
981         list_for_each_entry(p, &br->port_list, list) {
982                 /* Update default_pvid config only if we do not conflict with
983                  * user configuration.
984                  */
985                 vg = nbp_vlan_group(p);
986                 if ((old_pvid &&
987                      !vlan_default_pvid(vg, old_pvid)) ||
988                     br_vlan_find(vg, pvid))
989                         continue;
990
991                 err = nbp_vlan_add(p, pvid,
992                                    BRIDGE_VLAN_INFO_PVID |
993                                    BRIDGE_VLAN_INFO_UNTAGGED,
994                                    &vlchange, extack);
995                 if (err)
996                         goto err_port;
997                 nbp_vlan_delete(p, old_pvid);
998                 set_bit(p->port_no, changed);
999         }
1000
1001         br->default_pvid = pvid;
1002
1003 out:
1004         bitmap_free(changed);
1005         return err;
1006
1007 err_port:
1008         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1009                 if (!test_bit(p->port_no, changed))
1010                         continue;
1011
1012                 if (old_pvid)
1013                         nbp_vlan_add(p, old_pvid,
1014                                      BRIDGE_VLAN_INFO_PVID |
1015                                      BRIDGE_VLAN_INFO_UNTAGGED,
1016                                      &vlchange, NULL);
1017                 nbp_vlan_delete(p, pvid);
1018         }
1019
1020         if (test_bit(0, changed)) {
1021                 if (old_pvid)
1022                         br_vlan_add(br, old_pvid,
1023                                     BRIDGE_VLAN_INFO_PVID |
1024                                     BRIDGE_VLAN_INFO_UNTAGGED |
1025                                     BRIDGE_VLAN_INFO_BRENTRY,
1026                                     &vlchange, NULL);
1027                 br_vlan_delete(br, pvid);
1028         }
1029         goto out;
1030 }
1031
1032 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1033 {
1034         u16 pvid = val;
1035         int err = 0;
1036
1037         if (val >= VLAN_VID_MASK)
1038                 return -EINVAL;
1039
1040         if (pvid == br->default_pvid)
1041                 goto out;
1042
1043         /* Only allow default pvid change when filtering is disabled */
1044         if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1045                 pr_info_once("Please disable vlan filtering to change default_pvid\n");
1046                 err = -EPERM;
1047                 goto out;
1048         }
1049         err = __br_vlan_set_default_pvid(br, pvid, NULL);
1050 out:
1051         return err;
1052 }
1053
1054 int br_vlan_init(struct net_bridge *br)
1055 {
1056         struct net_bridge_vlan_group *vg;
1057         int ret = -ENOMEM;
1058
1059         vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1060         if (!vg)
1061                 goto out;
1062         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1063         if (ret)
1064                 goto err_rhtbl;
1065         ret = vlan_tunnel_init(vg);
1066         if (ret)
1067                 goto err_tunnel_init;
1068         INIT_LIST_HEAD(&vg->vlan_list);
1069         br->vlan_proto = htons(ETH_P_8021Q);
1070         br->default_pvid = 1;
1071         rcu_assign_pointer(br->vlgrp, vg);
1072
1073 out:
1074         return ret;
1075
1076 err_tunnel_init:
1077         rhashtable_destroy(&vg->vlan_hash);
1078 err_rhtbl:
1079         kfree(vg);
1080
1081         goto out;
1082 }
1083
1084 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1085 {
1086         struct switchdev_attr attr = {
1087                 .orig_dev = p->br->dev,
1088                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1089                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1090                 .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1091         };
1092         struct net_bridge_vlan_group *vg;
1093         int ret = -ENOMEM;
1094
1095         vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1096         if (!vg)
1097                 goto out;
1098
1099         ret = switchdev_port_attr_set(p->dev, &attr);
1100         if (ret && ret != -EOPNOTSUPP)
1101                 goto err_vlan_enabled;
1102
1103         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1104         if (ret)
1105                 goto err_rhtbl;
1106         ret = vlan_tunnel_init(vg);
1107         if (ret)
1108                 goto err_tunnel_init;
1109         INIT_LIST_HEAD(&vg->vlan_list);
1110         rcu_assign_pointer(p->vlgrp, vg);
1111         if (p->br->default_pvid) {
1112                 bool changed;
1113
1114                 ret = nbp_vlan_add(p, p->br->default_pvid,
1115                                    BRIDGE_VLAN_INFO_PVID |
1116                                    BRIDGE_VLAN_INFO_UNTAGGED,
1117                                    &changed, extack);
1118                 if (ret)
1119                         goto err_vlan_add;
1120         }
1121 out:
1122         return ret;
1123
1124 err_vlan_add:
1125         RCU_INIT_POINTER(p->vlgrp, NULL);
1126         synchronize_rcu();
1127         vlan_tunnel_deinit(vg);
1128 err_tunnel_init:
1129         rhashtable_destroy(&vg->vlan_hash);
1130 err_rhtbl:
1131 err_vlan_enabled:
1132         kfree(vg);
1133
1134         goto out;
1135 }
1136
1137 /* Must be protected by RTNL.
1138  * Must be called with vid in range from 1 to 4094 inclusive.
1139  * changed must be true only if the vlan was created or updated
1140  */
1141 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1142                  bool *changed, struct netlink_ext_ack *extack)
1143 {
1144         struct net_bridge_vlan *vlan;
1145         int ret;
1146
1147         ASSERT_RTNL();
1148
1149         *changed = false;
1150         vlan = br_vlan_find(nbp_vlan_group(port), vid);
1151         if (vlan) {
1152                 /* Pass the flags to the hardware bridge */
1153                 ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1154                 if (ret && ret != -EOPNOTSUPP)
1155                         return ret;
1156                 *changed = __vlan_add_flags(vlan, flags);
1157
1158                 return 0;
1159         }
1160
1161         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1162         if (!vlan)
1163                 return -ENOMEM;
1164
1165         vlan->vid = vid;
1166         vlan->port = port;
1167         ret = __vlan_add(vlan, flags, extack);
1168         if (ret)
1169                 kfree(vlan);
1170         else
1171                 *changed = true;
1172
1173         return ret;
1174 }
1175
1176 /* Must be protected by RTNL.
1177  * Must be called with vid in range from 1 to 4094 inclusive.
1178  */
1179 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1180 {
1181         struct net_bridge_vlan *v;
1182
1183         ASSERT_RTNL();
1184
1185         v = br_vlan_find(nbp_vlan_group(port), vid);
1186         if (!v)
1187                 return -ENOENT;
1188         br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1189         br_fdb_delete_by_port(port->br, port, vid, 0);
1190
1191         return __vlan_del(v);
1192 }
1193
1194 void nbp_vlan_flush(struct net_bridge_port *port)
1195 {
1196         struct net_bridge_vlan_group *vg;
1197
1198         ASSERT_RTNL();
1199
1200         vg = nbp_vlan_group(port);
1201         __vlan_flush(vg);
1202         RCU_INIT_POINTER(port->vlgrp, NULL);
1203         synchronize_rcu();
1204         __vlan_group_free(vg);
1205 }
1206
1207 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1208                        struct br_vlan_stats *stats)
1209 {
1210         int i;
1211
1212         memset(stats, 0, sizeof(*stats));
1213         for_each_possible_cpu(i) {
1214                 u64 rxpackets, rxbytes, txpackets, txbytes;
1215                 struct br_vlan_stats *cpu_stats;
1216                 unsigned int start;
1217
1218                 cpu_stats = per_cpu_ptr(v->stats, i);
1219                 do {
1220                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1221                         rxpackets = cpu_stats->rx_packets;
1222                         rxbytes = cpu_stats->rx_bytes;
1223                         txbytes = cpu_stats->tx_bytes;
1224                         txpackets = cpu_stats->tx_packets;
1225                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1226
1227                 stats->rx_packets += rxpackets;
1228                 stats->rx_bytes += rxbytes;
1229                 stats->tx_bytes += txbytes;
1230                 stats->tx_packets += txpackets;
1231         }
1232 }
1233
1234 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1235 {
1236         struct net_bridge_vlan_group *vg;
1237         struct net_bridge_port *p;
1238
1239         ASSERT_RTNL();
1240         p = br_port_get_check_rtnl(dev);
1241         if (p)
1242                 vg = nbp_vlan_group(p);
1243         else if (netif_is_bridge_master(dev))
1244                 vg = br_vlan_group(netdev_priv(dev));
1245         else
1246                 return -EINVAL;
1247
1248         *p_pvid = br_get_pvid(vg);
1249         return 0;
1250 }
1251 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1252
1253 int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1254 {
1255         struct net_bridge_vlan_group *vg;
1256         struct net_bridge_port *p;
1257
1258         p = br_port_get_check_rcu(dev);
1259         if (p)
1260                 vg = nbp_vlan_group_rcu(p);
1261         else if (netif_is_bridge_master(dev))
1262                 vg = br_vlan_group_rcu(netdev_priv(dev));
1263         else
1264                 return -EINVAL;
1265
1266         *p_pvid = br_get_pvid(vg);
1267         return 0;
1268 }
1269 EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1270
1271 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1272                      struct bridge_vlan_info *p_vinfo)
1273 {
1274         struct net_bridge_vlan_group *vg;
1275         struct net_bridge_vlan *v;
1276         struct net_bridge_port *p;
1277
1278         ASSERT_RTNL();
1279         p = br_port_get_check_rtnl(dev);
1280         if (p)
1281                 vg = nbp_vlan_group(p);
1282         else if (netif_is_bridge_master(dev))
1283                 vg = br_vlan_group(netdev_priv(dev));
1284         else
1285                 return -EINVAL;
1286
1287         v = br_vlan_find(vg, vid);
1288         if (!v)
1289                 return -ENOENT;
1290
1291         p_vinfo->vid = vid;
1292         p_vinfo->flags = v->flags;
1293         if (vid == br_get_pvid(vg))
1294                 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1295         return 0;
1296 }
1297 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1298
1299 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1300 {
1301         return is_vlan_dev(dev) &&
1302                 !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1303 }
1304
1305 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1306                                        __always_unused void *data)
1307 {
1308         return br_vlan_is_bind_vlan_dev(dev);
1309 }
1310
1311 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1312 {
1313         int found;
1314
1315         rcu_read_lock();
1316         found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1317                                               NULL);
1318         rcu_read_unlock();
1319
1320         return !!found;
1321 }
1322
1323 struct br_vlan_bind_walk_data {
1324         u16 vid;
1325         struct net_device *result;
1326 };
1327
1328 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1329                                           void *data_in)
1330 {
1331         struct br_vlan_bind_walk_data *data = data_in;
1332         int found = 0;
1333
1334         if (br_vlan_is_bind_vlan_dev(dev) &&
1335             vlan_dev_priv(dev)->vlan_id == data->vid) {
1336                 data->result = dev;
1337                 found = 1;
1338         }
1339
1340         return found;
1341 }
1342
1343 static struct net_device *
1344 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1345 {
1346         struct br_vlan_bind_walk_data data = {
1347                 .vid = vid,
1348         };
1349
1350         rcu_read_lock();
1351         netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1352                                       &data);
1353         rcu_read_unlock();
1354
1355         return data.result;
1356 }
1357
1358 static bool br_vlan_is_dev_up(const struct net_device *dev)
1359 {
1360         return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1361 }
1362
1363 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1364                                        struct net_device *vlan_dev)
1365 {
1366         u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1367         struct net_bridge_vlan_group *vg;
1368         struct net_bridge_port *p;
1369         bool has_carrier = false;
1370
1371         if (!netif_carrier_ok(br->dev)) {
1372                 netif_carrier_off(vlan_dev);
1373                 return;
1374         }
1375
1376         list_for_each_entry(p, &br->port_list, list) {
1377                 vg = nbp_vlan_group(p);
1378                 if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1379                         has_carrier = true;
1380                         break;
1381                 }
1382         }
1383
1384         if (has_carrier)
1385                 netif_carrier_on(vlan_dev);
1386         else
1387                 netif_carrier_off(vlan_dev);
1388 }
1389
1390 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1391 {
1392         struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1393         struct net_bridge_vlan *vlan;
1394         struct net_device *vlan_dev;
1395
1396         list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1397                 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1398                                                            vlan->vid);
1399                 if (vlan_dev) {
1400                         if (br_vlan_is_dev_up(p->dev)) {
1401                                 if (netif_carrier_ok(p->br->dev))
1402                                         netif_carrier_on(vlan_dev);
1403                         } else {
1404                                 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1405                         }
1406                 }
1407         }
1408 }
1409
1410 static void br_vlan_upper_change(struct net_device *dev,
1411                                  struct net_device *upper_dev,
1412                                  bool linking)
1413 {
1414         struct net_bridge *br = netdev_priv(dev);
1415
1416         if (!br_vlan_is_bind_vlan_dev(upper_dev))
1417                 return;
1418
1419         if (linking) {
1420                 br_vlan_set_vlan_dev_state(br, upper_dev);
1421                 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1422         } else {
1423                 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1424                               br_vlan_has_upper_bind_vlan_dev(dev));
1425         }
1426 }
1427
1428 struct br_vlan_link_state_walk_data {
1429         struct net_bridge *br;
1430 };
1431
1432 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1433                                         void *data_in)
1434 {
1435         struct br_vlan_link_state_walk_data *data = data_in;
1436
1437         if (br_vlan_is_bind_vlan_dev(vlan_dev))
1438                 br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1439
1440         return 0;
1441 }
1442
1443 static void br_vlan_link_state_change(struct net_device *dev,
1444                                       struct net_bridge *br)
1445 {
1446         struct br_vlan_link_state_walk_data data = {
1447                 .br = br
1448         };
1449
1450         rcu_read_lock();
1451         netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1452                                       &data);
1453         rcu_read_unlock();
1454 }
1455
1456 /* Must be protected by RTNL. */
1457 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1458 {
1459         struct net_device *vlan_dev;
1460
1461         if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1462                 return;
1463
1464         vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1465         if (vlan_dev)
1466                 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1467 }
1468
1469 /* Must be protected by RTNL. */
1470 int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1471 {
1472         struct netdev_notifier_changeupper_info *info;
1473         struct net_bridge *br = netdev_priv(dev);
1474         bool changed;
1475         int ret = 0;
1476
1477         switch (event) {
1478         case NETDEV_REGISTER:
1479                 ret = br_vlan_add(br, br->default_pvid,
1480                                   BRIDGE_VLAN_INFO_PVID |
1481                                   BRIDGE_VLAN_INFO_UNTAGGED |
1482                                   BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1483                 break;
1484         case NETDEV_UNREGISTER:
1485                 br_vlan_delete(br, br->default_pvid);
1486                 break;
1487         case NETDEV_CHANGEUPPER:
1488                 info = ptr;
1489                 br_vlan_upper_change(dev, info->upper_dev, info->linking);
1490                 break;
1491
1492         case NETDEV_CHANGE:
1493         case NETDEV_UP:
1494                 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1495                         break;
1496                 br_vlan_link_state_change(dev, br);
1497                 break;
1498         }
1499
1500         return ret;
1501 }
1502
1503 /* Must be protected by RTNL. */
1504 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1505 {
1506         if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1507                 return;
1508
1509         switch (event) {
1510         case NETDEV_CHANGE:
1511         case NETDEV_DOWN:
1512         case NETDEV_UP:
1513                 br_vlan_set_all_vlan_dev_state(p);
1514                 break;
1515         }
1516 }